id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
119793 | from typing import List
import datasets
# Citation, taken from https://github.com/microsoft/CodeXGLUE
_DEFAULT_CITATION = """@article{CodeXGLUE,
title={CodeXGLUE: A Benchmark Dataset and Open Challenge for Code Intelligence},
year={2020},}"""
class Child:
_DESCRIPTION = None
_FEATURES = None
_CITATION = None
SPLITS = {"train": datasets.Split.TRAIN}
_SUPERVISED_KEYS = None
def __init__(self, info):
self.info = info
def homepage(self):
return self.info["project_url"]
def _info(self):
# This is the description that will appear on the datasets page.
return datasets.DatasetInfo(
description=self.info["description"] + "\n\n" + self._DESCRIPTION,
features=datasets.Features(self._FEATURES),
homepage=self.homepage(),
citation=self._CITATION or _DEFAULT_CITATION,
supervised_keys=self._SUPERVISED_KEYS,
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
SPLITS = self.SPLITS
_URL = self.info["raw_url"]
urls_to_download = {}
for split in SPLITS:
if split not in urls_to_download:
urls_to_download[split] = {}
for key, url in self.generate_urls(split):
if not url.startswith("http"):
url = _URL + "/" + url
urls_to_download[split][key] = url
downloaded_files = {}
for k, v in urls_to_download.items():
downloaded_files[k] = dl_manager.download_and_extract(v)
return [
datasets.SplitGenerator(
name=SPLITS[k],
gen_kwargs={"split_name": k, "file_paths": downloaded_files[k]},
)
for k in SPLITS
]
def check_empty(self, entries):
all_empty = all([v == "" for v in entries.values()])
all_non_empty = all([v != "" for v in entries.values()])
if not all_non_empty and not all_empty:
raise RuntimeError("Parallel data files should have the same number of lines.")
return all_empty
class TrainValidTestChild(Child):
SPLITS = {
"train": datasets.Split.TRAIN,
"valid": datasets.Split.VALIDATION,
"test": datasets.Split.TEST,
}
| StarcoderdataPython |
298654 | #! /usr/bin/env python
from .triplet import TripletLoss, HardTripletLoss, FullTripletLoss
from .cross_entropy import LabelSmoothCrossEntropyLoss
| StarcoderdataPython |
4970597 | <filename>RecoLuminosity/LumiDB/python/lumiQTWidget.py<gh_stars>1-10
import sys,os
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from PyQt4 import QtGui, QtCore
class LumiCanvas(FigureCanvas):
"""this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, parent=None, fig=None):
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
class ApplicationWindow(QtGui.QMainWindow):
'''
main evt loop
'''
def __init__(self,fig=None):
self.qApp=QtGui.QApplication(sys.argv)#every PyQt4 application must create an application object
QtGui.QMainWindow.__init__(self)
self.setWindowTitle('lumi plot')
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.main_widget = QtGui.QWidget(self)
l = QtGui.QVBoxLayout(self.main_widget)
sc = LumiCanvas(self.main_widget,fig=fig)
bn = QtGui.QPushButton("Save to File",self.main_widget)
bn.clicked.connect(lambda:self.saveAs(fig))
l.addWidget(sc)
l.addWidget(bn)
self.main_widget.setFocus()
self.setCentralWidget(self.main_widget)
def saveAs(self,fig):
filename=QtGui.QFileDialog.getSaveFileName(self,"Save plot as file","","(*.png)")
if filename == "": return
fig.savefig(filename,format="PNG")
def fileQuit(self):
self.close()
def closeEvent(self, ce):
self.fileQuit()
def destroy(self):
sys.exit(self.qApp.exec_())
if __name__ == "__main__":
from numpy import arange, sin, pi
from matplotlib.figure import Figure
fig=Figure(figsize=(7.2,5.4),dpi=120)#create fig
t = arange(0.0,3.0,0.01)
s = sin(2*pi*t)
ax=fig.add_subplot(111)
ax.plot(t,s)
aw=ApplicationWindow(fig=fig)
aw.show()
aw.destroy()
| StarcoderdataPython |
230233 | <filename>server/config.py
import os
DEBUG = False
TOKEN_SECRET = os.environ.get('SECRET_KEY') or 'JWT_SECRET'
MYSQL_DATABASE_USER = os.environ.get('MYSQL_DATABASE_USER') or 'user'
MYSQL_DATABASE_PASSWORD = os.environ.get('MYSQL_DATABASE_PASSWORD') or 'password'
MYSQL_DATABASE_DB = os.environ.get('MYSQL_DATABASE_DB') or 'auth_example'
MYSQL_DATABASE_HOST = os.environ.get('MYSQL_DATABASE_HOST') or 'localhost' | StarcoderdataPython |
1898067 | #!/usr/bin/env python
import sys
from . import parser, printer, rewriter
#----------------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
# process cmd-line switches
f = open(sys.argv[-1], "r")
s = f.read()
f.close()
# parse
ast = parser.parse(s)
# lower
ast2 = rewriter.Rewriter().transform(ast)
# tune
# pretty-print
text = printer.Printer().pp(ast2, '')
print(text)
f = open(sys.argv[-1] + '.c', 'w')
f.write(text)
f.close()
| StarcoderdataPython |
6602893 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from transform_finder import build_transform
import torch
import torchvision as tv
from utils.converters import PilToNumpy, NumpyToTensor
CIFAR_MEAN = [125.3/255, 123.0/255, 113.9/255]
CIFAR_STD = [63.0/255, 62.1/255, 66.7/255]
#This is in RGB order since that is the standard for PIL
IM_MEAN = [0.485, 0.456, 0.406]
IM_STD = [0.229, 0.224, 0.225]
def read_corruption_csv(filename):
with open(filename) as f:
lines = [l.rstrip() for l in f.readlines()]
corruptions = []
for line in lines:
vals = line.split(",")
if not vals:
continue
corruptions.extend([(vals[0], float(v)) for v in vals[1:]])
return corruptions
@torch.no_grad()
def test_c_bar(
model,
dataset_type,
dataset_path,
batch_size,
corruption_string=None,
loader_kwargs={},
logger=None,
calculate_averages=True,
distributed=False,
num_gpus=1
):
assert dataset_type in ['imagenet', 'cifar'],\
"Only ImageNet and CIFAR-10 are supported."
if corruption_string is None:
corruption_filename = 'imagenet_c_bar.csv' if dataset_type=='imagenet'\
else 'cifar10_c_bar.csv'
corruptions = read_corruption_csv(corruption_filename)
else:
corruptions = [(c.split("-")[0], float(c.split("-")[1])) for c in corruption_string.split("--")]
results = {}
for name, severity in corruptions:
if dataset_type=='imagenet':
transform = tv.transforms.Compose([
tv.transforms.Resize(256),
tv.transforms.CenterCrop(224),
PilToNumpy(),
build_transform(name=name, severity=severity, dataset_type=dataset_type),
NumpyToTensor(),
tv.transforms.Normalize(IM_MEAN, IM_STD)
])
path = os.path.join(dataset_path, 'val')
dataset = tv.datasets.ImageFolder(path, transform=transform)
elif dataset_type=='cifar':
transform = tv.transforms.Compose([
PilToNumpy(),
build_transform(name=name, severity=severity, dataset_type=dataset_type),
NumpyToTensor(),
tv.transforms.Normalize(CIFAR_MEAN, CIFAR_STD)
])
dataset = tv.datasets.CIFAR10(dataset_path, train=False, download=False, transform=transform)
sampler = torch.utils.data.distributed.DistributedSampler(dataset)\
if distributed and num_gpus > 1 else None
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
sampler=sampler,
drop_last=False,
**loader_kwargs
)
num_correct = 0
for curr_iter, (inputs, labels) in enumerate(loader):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
preds = model(inputs)
correct = torch.sum(torch.argmax(preds, dim=1)==labels)
if distributed and num_gpus > 1:
torch.distributed.all_reduce(correct)
num_correct += correct.item()
err = 100 * (1 - num_correct / len(dataset))
corruption_string = "{}-{:.2f}".format(name, severity)
if logger:
logger.info("Top1 Error for {}: {:.2f}".format(corruption_string, err))
results[corruption_string] = err
if calculate_averages:
import numpy as np
unique_corruption_names = list(set([c.split("-")[0] for c in results]))
avg_errs = {"{}-avg".format(u) : np.mean([results[c] for c in results if c.split("-")[0]==u])
for u in unique_corruption_names}
overall_err = np.mean(list(results.values()))
results.update(avg_errs)
results['overall-avg'] = overall_err
if logger:
for k,v in avg_errs.items():
logger.info("Top1 Error for {}: {:.2f}".format(k,v))
logger.info("Average Top1 Error: {}".format(overall_err))
return results
| StarcoderdataPython |
5111888 | """
API support for endpoints located at API_ROOT/poe
"""
from .utils import urlforversion
class POEMixin(object):
__doc__ = __doc__
def poe_confirm(self, email, transaction_id, confirmation_token, result):
"""Allows confirmation of client side verification (javscript widget)
Arguments:
email (str): the email address that was verified
transaction_id: the transaction_id provided by the javascript
widget
confirmation_token: the confirmation_token provided by the
javascript widget
result: the verification result provided by the javascript widget
Returns:
A ``dict``
See Also:
https://developers.neverbounce.com/v4.0/reference#widget-poe-confirm
"""
endpoint = urlforversion(self.api_version, 'poe', 'confirm')
params = dict(email=email,
transaction_id=transaction_id,
confirmation_token=confirmation_token,
result=result)
resp = self._make_request('GET', endpoint, params=params)
self._check_response(resp)
return resp.json()
| StarcoderdataPython |
8039489 | #!/usr/bin/env python3
"""
https://www.reddit.com/r/dailyprogrammer/comments/3r7wxz/20151102_challenge_239_easy_a_game_of_threes/
Given a starting number, play the game of threes, printing your steps.
For every number, add 0, 1, or -1 to make it divisble by 3, then divide by 3.
Continue until you reach 1.
"""
def gameOfThrees(starting):
num = starting
while num > 1:
choice = [0, -1, 1][num % 3]
print("{} {}".format(num, choice))
num += choice
num //= 3
print(1)
gameOfThrees(100)
| StarcoderdataPython |
6596227 | <reponame>jakeogh/anormbookmarker
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# MIT License
class ConflictingAliasError(ValueError):
'''
An Alias cant be created because it conflicts with a existing Alias to a different Word.
'''
pass
class ConflictingWordMisSpellingError(ValueError):
'''
A WordMisSpelling cant be created because it conflicts with a existing WordMisSpelling to a different Word.
'''
pass
class ConflictingWordError(ValueError):
'''
An Alias or WordMisSpelling cant be created because it conflicts with a existing Word.
'''
pass
class MissingWordError(ValueError):
'''
An Alias or WordMisSpelling cant be created because it references a non-existing Word.
'''
pass
| StarcoderdataPython |
9661912 | <gh_stars>0
from wowstash.library.jsonrpc import wallet
from wowstash.models import Transaction
from wowstash.factory import db
# @app.errorhandler(404)
def not_found(error):
return make_response(jsonify({
'error': 'Page not found'
}), 404)
# @app.cli.command('initdb')
def init_db():
db.create_all()
# @app.cli.command('send_transfers')
def send_transfers():
txes = Transaction.query.all()
for i in txes:
print(i)
# tx = wallet.transfer(
# 0, current_user.subaddress_index, address, amount
# )
| StarcoderdataPython |
3352999 | """
basic.py
A basic calculator.
"""
from calc.keyboard import Keyboard
from calc.screen import Screen
from calc.memory import Memory
from calc.handler import Handler
class BasicCalculator():
vendor = "Python"
model = "basic"
# BasicCalculator HAS A Keyboard
keyboard = Keyboard()
# BasicCalculator HAS A Screen
screen = Screen()
# BasicCalculator HAS A Memory
memory = Memory()
# BasicCalculator HAS A Handler
handler = Handler()
def get_expression(self):
return self.keyboard.get_input("Expression: ")
def evaluate_expression(self, expression):
operator, operand_1, operand_2, error = self.handler.handle(expression)
if error:
self.screen.print("Error", error)
self.memory.write({"error": error})
exit(1)
else:
return operator, operand_1, operand_2
def print(self, operator, operand_1, operand_2, result):
self.screen.print("Operator", operator)
self.screen.print("Operand 1", operand_1)
self.screen.print("Operand 2", operand_2)
self.screen.print("Result", result)
def write(self, operator, operand_1, operand_2, result):
operation = {
"operator": operator,
"operand_1": operand_1,
"operand_2": operand_2,
"result": result
}
self.memory.write(operation)
def calculate(self, operator, operand_1, operand_2):
if operator == "+":
result = self.sum(operand_1, operand_2)
elif operator == "-":
result = self.subtract(operand_1, operand_2)
elif operator == "*":
result = self.multiply(operand_1, operand_2)
elif operator == "/":
result = self.divide(operand_1, operand_2)
else:
self.screen.print("Error", "invalid operator")
self.memory.write({"error": "invalid operator"})
exit(1)
self.print(operator, operand_1, operand_2, result)
self.write(operator, operand_1, operand_2, result)
def sum(self, operand_1, operand_2):
return operand_1 + operand_2
def subtract(self, operand_1, operand_2):
return operand_1 - operand_2
def multiply(self, operand_1, operand_2):
return operand_1 * operand_2
def divide(self, operand_1, operand_2):
if operand_2 == 0:
self.screen.print("Error", "can not divide by zero")
self.memory.write({"error": "can not divide by zero"})
exit()
else:
return int(operand_1 / operand_2)
| StarcoderdataPython |
8061440 | <reponame>dgaston/ddbio-ngsflow
"""
.. module:: gatk
:platform: Unix, OSX
:synopsis: A wrapper module for calling GATK utilities.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import pipeline
def diagnosetargets(job, config, name, samples, input_bam):
"""Run GATK's DiagnoseTargets against the supplied region
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param samples: samples dictionary.
:type samples: str.
:param input_bam: The input_bam file name to process.
:type input_bam: str.
:returns: str -- The DiagnoseTargets output vcf file name.
"""
diagnose_targets_vcf = "{}.diagnosetargets.vcf".format(name)
missing_intervals = "{}.missing.intervals".format(name)
logfile = "{}.diagnose_targets.log".format(name)
command = ["{}".format(config['gatk']['bin']),
"-T",
"DiagnoseTargets",
"-R",
"{}".format(config['reference']),
"-L",
"{}".format(samples[name]['regions']),
"--coverage_status_threshold",
"{}".format(config['coverage_loci_threshold']),
"--bad_mate_status_threshold",
"{}".format(config['bad_mate_threshold']),
"--minimum_coverage",
"{}".format(config['coverage_threshold']),
"--quality_status_threshold",
"{}".format(config['quality_loci_threshold']),
"-I",
"{}".format(input_bam),
"-o",
"{}".format(diagnose_targets_vcf),
"--missing_intervals",
"{}".format(missing_intervals)]
job.fileStore.logToMaster("GATK DiagnoseTargets Command: {}\n".format(command))
pipeline.run_and_log_command(" ".join(command), logfile)
return diagnose_targets_vcf
def diagnose_pooled_targets(job, config, name, regions, samples, input_bam1, input_bam2):
"""Run GATK's DiagnoseTargets against the supplied region
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param regions: regions dictionary key name and tag.
:type regions: str.
:param samples: samples dictionary.
:type samples: str.
:param input_bam: The input_bam file name to process.
:type input_bam: str.
:returns: str -- The DiagnoseTargets output vcf file name.
"""
diagnose_targets_vcf = "{}_{}.diagnosetargets.vcf".format(name, regions)
missing_intervals = "{}_{}.missing.intervals".format(name, regions)
logfile = "{}.{}.diagnose_targets.log".format(name, regions)
command = ["{}".format(config['gatk']['bin']),
"-T",
"DiagnoseTargets",
"-R",
"{}".format(config['reference']),
"-L",
"{}".format(samples[name][regions]),
"--coverage_status_threshold",
"{}".format(config['coverage_loci_threshold']),
"--bad_mate_status_threshold",
"{}".format(config['bad_mate_threshold']),
"--minimum_coverage",
"{}".format(config['coverage_threshold']),
"--quality_status_threshold",
"{}".format(config['quality_loci_threshold']),
"-I",
"{}".format(input_bam1),
"-I",
"{}".format(input_bam2),
"-o",
"{}".format(diagnose_targets_vcf),
"--missing_intervals",
"{}".format(missing_intervals)]
job.fileStore.logToMaster("GATK DiagnoseTargets Command: {}\n".format(command))
pipeline.run_and_log_command(" ".join(command), logfile)
return diagnose_targets_vcf
def annotate_vcf(job, config, name, input_vcf, input_bam):
"""Run GATK's VariantAnnotation on the specified VCF
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_vcf: The input_vcf file name to process.
:type input_vcf: str.
:param input_bam: The input_bam file name to process.
:type input_bam: str.
:returns: str -- The output vcf file name.
"""
output_vcf = "{}.annotated.vcf".format(name)
annotation_logfile = "{}.variantannotation.log".format(name)
annotation_command = ["{}".format(config['gatk-annotate']['bin']),
"-T",
"VariantAnnotator",
"-R",
"{}".format(config['reference']),
"-nt",
"{}".format(config['gatk-annotate']['num_cores']),
"--group",
"StandardAnnotation",
"--dbsnp",
"{}".format(config['dbsnp']),
"-I",
"{}".format(input_bam),
"--variant",
"{}".format(input_vcf),
"-L",
"{}".format(input_vcf),
"-o",
"{}".format(output_vcf)]
job.fileStore.logToMaster("GATK VariantAnnotator Command: {}\n".format(annotation_command))
pipeline.run_and_log_command(" ".join(annotation_command), annotation_logfile)
return output_vcf
def filter_variants(job, config, name, input_vcf):
"""Run GATK's VariantFilter on the specified VCF
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_vcf: The input_vcf file name to process.
:type input_vcf: str.
:returns: str -- The output vcf file name.
"""
output_vcf = "{}.filtered.vcf".format(name)
filter_log = "{}.variantfiltration.log".format(name)
filter_command = ["{}".format(config['gatk-filter']['bin']),
"-T",
"VariantFiltration",
"-R",
"{}".format(config['reference']),
"--filterExpression",
"'MQ0 > {}'".format(config['mq0_threshold']),
"--filterName",
"'HighMQ0'",
"--filterExpression",
"'DP < {}'".format(config['coverage_threshold']),
"--filterName",
"'LowDepth'",
"--filterExpression",
"'QUAL < {}'".format(config['var_qual_threshold']),
"--filterName",
"'LowQual'",
"--filterExpression",
"'MQ < {}'".format(config['map_qual_threshold']),
"--filterName",
"'LowMappingQual'",
"--variant",
"{}".format(input_vcf),
"-o",
"{}".format(output_vcf)]
job.fileStore.logToMaster("GATK VariantFiltration Command: {}\n".format(filter_command))
pipeline.run_and_log_command(" ".join(filter_command), filter_log)
return output_vcf
def mark_duplicates(job, config, name, input_bam):
"""Run Picard MarkDuplicates
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_bam: The input_bam file name to process.
:type input_bam: str.
:returns: str -- The output bam file name.
"""
job.fileStore.logToMaster("Running MarkDuplicates for sample: {}".format(name))
metrics_file = "{}.dedup.metrics".format(name)
output_bam = "{}.dedup.sorted.bam".format(name)
logfile = "{}.markduplicates.log".format(name)
command = ["{}".format(config['picard-dedup']['bin']),
"MarkDuplicates",
"CREATE_INDEX=true",
"METRICS_FILE={}".format(metrics_file),
"INPUT={}".format(input_bam),
"OUTPUT={}".format(output_bam)]
job.fileStore.logToMaster("Picard MarkDuplicates Command: {}\n".format(command))
pipeline.run_and_log_command(" ".join(command), logfile)
return output_bam
def add_or_replace_readgroups(job, config, name, input_bam):
"""Run Picard's AddOrReplaceReadGroups on the specified BAM
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_bam: The input_bam file name to process.
:type input_bam: str.
:returns: str -- The output bam file name.
"""
job.fileStore.logToMaster("Running AddOrReplaceReadGroups in sample: {}".format(name))
output_bam = "{}.rg.sorted.bam".format(name)
logfile = "{}.addreadgroups.log".format(name)
index_log = "{}.buildindex.log".format(name)
command = ["{}".format(config['picard-add']['bin']),
"AddOrReplaceReadGroups",
"INPUT={}".format(input_bam),
"OUTPUT={}".format(output_bam),
"RGID={}".format(name),
"RGSM={}".format(name),
"RGLB={}".format(name),
"RGPL=illumina",
"RGPU=miseq"]
command2 = ["{}".format(config['picard-add']['bin']),
"BuildBamIndex",
"INPUT={}".format(output_bam)]
job.fileStore.logToMaster("GATK AddOrReplaceReadGroupsCommand Command: {}\n".format(command))
pipeline.run_and_log_command(" ".join(command), logfile)
job.fileStore.logToMaster("GATK BuildBamIndex Command: {}\n".format(command2))
pipeline.run_and_log_command(" ".join(command2), index_log)
return output_bam
def realign_target_creator(job, config, name, input_bam):
"""Run GATK TargetCreator on the specified BAM to identify targets for realignment
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_bam: The input_bam file name to process.
:type input_bam: str.
:returns: str -- The file name of the targets file.
"""
targets = "{}.targets.intervals".format(name)
targets_log = "{}.targetcreation.log".format(name)
command = ["{}".format(config['gatk-realign']['bin']),
"-T",
"RealignerTargetCreator",
"-R",
"{}".format(config['reference']),
"-I",
"{}".format(input_bam),
"-o",
"{}".format(targets),
"-known",
"{}".format(config['indel1']),
"-known",
"{}".format(config['indel2']),
"-nt",
"{}".format(config['gatk-realign']['num_cores'])
]
job.fileStore.logToMaster("GATK RealignerTargetCreator Command: {}\n".format(command))
pipeline.run_and_log_command(" ".join(command), targets_log)
return targets
def realign_indels(job, config, name, input_bam, targets):
"""Run GATK Indel Realignment on the specified BAM
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_bam: The input_bam file name to process.
:type input_bam: str.
:param targets: The file name of targets to realign.
:type targets: str.
:returns: str -- The output bam file name.
"""
output_bam = "{}.realigned.sorted.bam".format(name)
realign_log = "{}.realignindels.log".format(name)
command = ["{}".format(config['gatk-realign']['bin']),
"-T",
"IndelRealigner",
"-R",
"{}".format(config['reference']),
"-I",
"{}".format(input_bam),
"-known",
"{}".format(config['indel1']),
"-known",
"{}".format(config['indel2']),
"-targetIntervals",
"{}".format(targets),
"--read_filter",
"NotPrimaryAlignment",
"-o",
"{}".format(output_bam)]
job.fileStore.logToMaster("GATK IndelRealigner Command: {}\n".format(command))
pipeline.run_and_log_command(" ".join(command), realign_log)
return output_bam
def recalibrator(job, config, name, input_bam):
"""Run GATK Recalibrator on the specified BAM
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_bam: The input_bam file name to process.
:type input_bam: str.
:returns: str -- The output bam file name.
"""
output_bam = "{}.recalibrated.sorted.bam".format(name)
recal_config = "{}.recal".format(name)
recal_log = "{}.recalibrate.log".format(name)
print_log = "{}.printrecalibrated.log".format(name)
cp_log = "{}.copy.log".format(name)
# Calculate covariates
recal_commands = ["{}".format(config['gatk-recal']['bin']),
"-T",
"BaseRecalibrator",
"-R",
"{}".format(config['reference']),
"-I",
"{}".format(input_bam),
"-o",
"{}".format(recal_config),
"--knownSites",
"{}".format(config['dbsnp']),
"-nct",
"{}".format(config['gatk-recal']['num_cores'])]
# Print recalibrated BAM
print_reads_command = ["{}".format(config['gatk-recal']['bin']),
"-T",
"PrintReads",
"-R",
"{}".format(config['reference']),
"-I",
"{}".format(input_bam),
"-o",
"{}".format(output_bam),
"-BQSR",
"{}".format(recal_config),
"-nct",
"{}".format(config['gatk-recal']['num_cores'])]
# Copy index to alternative name
cp_command = ["cp",
"{}.recalibrated.sorted.bai".format(name),
"{}.recalibrated.sorted.bam.bai".format(name)]
job.fileStore.logToMaster("GATK BaseRecalibrator Command: {}\n".format(recal_commands))
pipeline.run_and_log_command(" ".join(recal_commands), recal_log)
job.fileStore.logToMaster("GATK PrintReads Command: {}\n".format(print_reads_command))
pipeline.run_and_log_command(" ".join(print_reads_command), print_log)
job.fileStore.logToMaster("GATK Copy Command: {}\n".format(cp_command))
pipeline.run_and_log_command(" ".join(cp_command), cp_log)
return output_bam
def merge_sam(job, config, name, input_bams):
"""Run Picard MergeSamFiles
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_bams: The list of input_bam files to merge.
:type input_bams: str.
:returns: str -- The output bam file name.
"""
output_sam = "{}.merged.sorted.bam".format(name)
logfile = "{}.mergesam.log".format(name)
bam_string = " I=".join(input_bams)
command = ["{}".format(config['picard-merge']['bin']),
"MergeSamFiles",
"I={}".format(bam_string),
"O={}".format(output_sam),
"USE_THREADING=True"]
job.fileStore.logToMaster("Picard MergeSam Command: {}\n".format(command))
pipeline.run_and_log_command(" ".join(command), logfile)
return output_sam
| StarcoderdataPython |
1929286 | import os
import numpy as np
import math
from GPy.util import datasets as dat
class vertex:
def __init__(self, name, id, parents=[], children=[], meta = {}):
self.name = name
self.id = id
self.parents = parents
self.children = children
self.meta = meta
def __str__(self):
return self.name + '(' + str(self.id) + ').'
class tree:
def __init__(self):
self.vertices = []
self.vertices.append(vertex(name='root', id=0))
def __str__(self):
index = self.find_root()
return self.branch_str(index)
def branch_str(self, index, indent=''):
out = indent + str(self.vertices[index]) + '\n'
for child in self.vertices[index].children:
out+=self.branch_str(child, indent+' ')
return out
def find_children(self):
"""Take a tree and set the children according to the parents.
Takes a tree structure which lists the parents of each vertex
and computes the children for each vertex and places them in."""
for i in range(len(self.vertices)):
self.vertices[i].children = []
for i in range(len(self.vertices)):
for parent in self.vertices[i].parents:
if i not in self.vertices[parent].children:
self.vertices[parent].children.append(i)
def find_parents(self):
"""Take a tree and set the parents according to the children
Takes a tree structure which lists the children of each vertex
and computes the parents for each vertex and places them in."""
for i in range(len(self.vertices)):
self.vertices[i].parents = []
for i in range(len(self.vertices)):
for child in self.vertices[i].children:
if i not in self.vertices[child].parents:
self.vertices[child].parents.append(i)
def find_root(self):
"""Finds the index of the root node of the tree."""
self.find_parents()
index = 0
while len(self.vertices[index].parents)>0:
index = self.vertices[index].parents[0]
return index
def get_index_by_id(self, id):
"""Give the index associated with a given vertex id."""
for i in range(len(self.vertices)):
if self.vertices[i].id == id:
return i
raise ValueError('Reverse look up of id failed.')
def get_index_by_name(self, name):
"""Give the index associated with a given vertex name."""
for i in range(len(self.vertices)):
if self.vertices[i].name == name:
return i
raise ValueError('Reverse look up of name failed.')
def order_vertices(self):
"""Order vertices in the graph such that parents always have a lower index than children."""
ordered = False
while ordered == False:
for i in range(len(self.vertices)):
ordered = True
for parent in self.vertices[i].parents:
if parent>i:
ordered = False
self.swap_vertices(i, parent)
def swap_vertices(self, i, j):
"""
Swap two vertices in the tree structure array.
swap_vertex swaps the location of two vertices in a tree structure array.
:param tree: the tree for which two vertices are to be swapped.
:param i: the index of the first vertex to be swapped.
:param j: the index of the second vertex to be swapped.
:rval tree: the tree structure with the two vertex locations swapped.
"""
store_vertex_i = self.vertices[i]
store_vertex_j = self.vertices[j]
self.vertices[j] = store_vertex_i
self.vertices[i] = store_vertex_j
for k in range(len(self.vertices)):
for swap_list in [self.vertices[k].children, self.vertices[k].parents]:
if i in swap_list:
swap_list[swap_list.index(i)] = -1
if j in swap_list:
swap_list[swap_list.index(j)] = i
if -1 in swap_list:
swap_list[swap_list.index(-1)] = j
def rotation_matrix(xangle, yangle, zangle, order='zxy', degrees=False):
"""
Compute the rotation matrix for an angle in each direction.
This is a helper function for computing the rotation matrix for a given set of angles in a given order.
:param xangle: rotation for x-axis.
:param yangle: rotation for y-axis.
:param zangle: rotation for z-axis.
:param order: the order for the rotations.
"""
if degrees:
xangle = math.radians(xangle)
yangle = math.radians(yangle)
zangle = math.radians(zangle)
# Here we assume we rotate z, then x then y.
c1 = math.cos(xangle) # The x angle
c2 = math.cos(yangle) # The y angle
c3 = math.cos(zangle) # the z angle
s1 = math.sin(xangle)
s2 = math.sin(yangle)
s3 = math.sin(zangle)
# see http://en.wikipedia.org/wiki/Rotation_matrix for
# additional info.
if order=='zxy':
rot_mat = np.array([[c2*c3-s1*s2*s3, c2*s3+s1*s2*c3, -s2*c1],[-c1*s3, c1*c3, s1],[s2*c3+c2*s1*s3, s2*s3-c2*s1*c3, c2*c1]])
else:
rot_mat = np.eye(3)
for i in range(len(order)):
if order[i]=='x':
rot_mat = np.dot(np.array([[1, 0, 0], [0, c1, s1], [0, -s1, c1]]),rot_mat)
elif order[i] == 'y':
rot_mat = np.dot(np.array([[c2, 0, -s2], [0, 1, 0], [s2, 0, c2]]),rot_mat)
elif order[i] == 'z':
rot_mat = np.dot(np.array([[c3, s3, 0], [-s3, c3, 0], [0, 0, 1]]),rot_mat)
return rot_mat
# Motion capture data routines.
class skeleton(tree):
def __init__(self):
tree.__init__(self)
def connection_matrix(self):
connection = np.zeros((len(self.vertices), len(self.vertices)), dtype=bool)
for i in range(len(self.vertices)):
for j in range(len(self.vertices[i].children)):
connection[i, self.vertices[i].children[j]] = True
return connection
def to_xyz(self, channels):
raise NotImplementedError("this needs to be implemented to use the skeleton class")
def finalize(self):
"""After loading in a skeleton ensure parents are correct, vertex orders are correct and rotation matrices are correct."""
self.find_parents()
self.order_vertices()
self.set_rotation_matrices()
def smooth_angle_channels(self, channels):
"""Remove discontinuities in angle channels so that they don't cause artifacts in algorithms that rely on the smoothness of the functions."""
for vertex in self.vertices:
for col in vertex.meta['rot_ind']:
if col:
for k in range(1, channels.shape[0]):
diff=channels[k, col]-channels[k-1, col]
if abs(diff+360.)<abs(diff):
channels[k:, col]=channels[k:, col]+360.
elif abs(diff-360.)<abs(diff):
channels[k:, col]=channels[k:, col]-360.
# class bvh_skeleton(skeleton):
# def __init__(self):
# skeleton.__init__(self)
# def to_xyz(self, channels):
class acclaim_skeleton(skeleton):
def __init__(self, file_name=None):
skeleton.__init__(self)
self.documentation = []
self.angle = 'deg'
self.length = 1.0
self.mass = 1.0
self.type = 'acclaim'
self.vertices[0] = vertex(name='root', id=0,
parents = [0], children=[],
meta = {'orientation': [],
'axis': [0., 0., 0.],
'axis_order': [],
'C': np.eye(3),
'Cinv': np.eye(3),
'channels': [],
'bodymass': [],
'confmass': [],
'order': [],
'rot_ind': [],
'pos_ind': [],
'limits': [],
'xyz': np.array([0., 0., 0.]),
'rot': np.eye(3)})
if file_name:
self.load_skel(file_name)
def to_xyz(self, channels):
rot_val = list(self.vertices[0].meta['orientation'])
for i in range(len(self.vertices[0].meta['rot_ind'])):
rind = self.vertices[0].meta['rot_ind'][i]
if rind != -1:
rot_val[i] += channels[rind]
self.vertices[0].meta['rot'] = rotation_matrix(rot_val[0],
rot_val[1],
rot_val[2],
self.vertices[0].meta['axis_order'],
degrees=True)
# vertex based store of the xyz location
self.vertices[0].meta['xyz'] = list(self.vertices[0].meta['offset'])
for i in range(len(self.vertices[0].meta['pos_ind'])):
pind = self.vertices[0].meta['pos_ind'][i]
if pind != -1:
self.vertices[0].meta['xyz'][i] += channels[pind]
for i in range(len(self.vertices[0].children)):
ind = self.vertices[0].children[i]
self.get_child_xyz(ind, channels)
xyz = []
for vertex in self.vertices:
xyz.append(vertex.meta['xyz'])
return np.array(xyz)
def get_child_xyz(self, ind, channels):
parent = self.vertices[ind].parents[0]
children = self.vertices[ind].children
rot_val = np.zeros(3)
for j in range(len(self.vertices[ind].meta['rot_ind'])):
rind = self.vertices[ind].meta['rot_ind'][j]
if rind != -1:
rot_val[j] = channels[rind]
else:
rot_val[j] = 0
tdof = rotation_matrix(rot_val[0], rot_val[1], rot_val[2],
self.vertices[ind].meta['order'],
degrees=True)
torient = rotation_matrix(self.vertices[ind].meta['axis'][0],
self.vertices[ind].meta['axis'][1],
self.vertices[ind].meta['axis'][2],
self.vertices[ind].meta['axis_order'],
degrees=True)
torient_inv = rotation_matrix(-self.vertices[ind].meta['axis'][0],
-self.vertices[ind].meta['axis'][1],
-self.vertices[ind].meta['axis'][2],
self.vertices[ind].meta['axis_order'][::-1],
degrees=True)
self.vertices[ind].meta['rot'] = np.dot(np.dot(np.dot(torient_inv,tdof),torient),self.vertices[parent].meta['rot'])
self.vertices[ind].meta['xyz'] = self.vertices[parent].meta['xyz'] + np.dot(self.vertices[ind].meta['offset'],self.vertices[ind].meta['rot'])
for i in range(len(children)):
cind = children[i]
self.get_child_xyz(cind, channels)
def load_channels(self, file_name):
fid=open(file_name, 'r')
channels = self.read_channels(fid)
fid.close()
return channels
def save_channels(self, file_name, channels):
with open(file_name,'w') as fid:
self.writ_channels(fid, channels)
fid.close()
def load_skel(self, file_name):
"""
Loads an ASF file into a skeleton structure.
:param file_name: The file name to load in.
"""
fid = open(file_name, 'r')
self.read_skel(fid)
fid.close()
self.name = file_name
def read_bonedata(self, fid):
"""Read bone data from an acclaim skeleton file stream."""
bone_count = 0
lin = self.read_line(fid)
while lin[0]!=':':
parts = lin.split()
if parts[0] == 'begin':
bone_count += 1
self.vertices.append(vertex(name = '', id=np.NaN,
meta={'name': [],
'id': [],
'offset': [],
'orientation': [],
'axis': [0., 0., 0.],
'axis_order': [],
'C': np.eye(3),
'Cinv': np.eye(3),
'channels': [],
'bodymass': [],
'confmass': [],
'order': [],
'rot_ind': [],
'pos_ind': [],
'limits': [],
'xyz': np.array([0., 0., 0.]),
'rot': np.eye(3)}))
lin = self.read_line(fid)
elif parts[0]=='id':
self.vertices[bone_count].id = int(parts[1])
lin = self.read_line(fid)
self.vertices[bone_count].children = []
elif parts[0]=='name':
self.vertices[bone_count].name = parts[1]
lin = self.read_line(fid)
elif parts[0]=='direction':
direction = np.array([float(parts[1]), float(parts[2]), float(parts[3])])
lin = self.read_line(fid)
elif parts[0]=='length':
lgth = float(parts[1])
lin = self.read_line(fid)
elif parts[0]=='axis':
self.vertices[bone_count].meta['axis'] = np.array([float(parts[1]),
float(parts[2]),
float(parts[3])])
# order is reversed compared to bvh
self.vertices[bone_count].meta['axis_order'] = parts[-1][::-1].lower()
lin = self.read_line(fid)
elif parts[0]=='dof':
order = []
for i in range(1, len(parts)):
if parts[i]== 'rx':
chan = 'Xrotation'
order.append('x')
elif parts[i] =='ry':
chan = 'Yrotation'
order.append('y')
elif parts[i] == 'rz':
chan = 'Zrotation'
order.append('z')
elif parts[i] == 'tx':
chan = 'Xposition'
elif parts[i] == 'ty':
chan = 'Yposition'
elif parts[i] == 'tz':
chan = 'Zposition'
elif parts[i] == 'l':
chan = 'length'
self.vertices[bone_count].meta['channels'].append(chan)
# order is reversed compared to bvh
self.vertices[bone_count].meta['order'] = order[::-1]
lin = self.read_line(fid)
elif parts[0]=='limits':
self.vertices[bone_count].meta['limits'] = [[float(parts[1][1:]), float(parts[2][:-1])]]
lin = self.read_line(fid)
while lin !='end':
parts = lin.split()
self.vertices[bone_count].meta['limits'].append([float(parts[0][1:]), float(parts[1][:-1])])
lin = self.read_line(fid)
self.vertices[bone_count].meta['limits'] = np.array(self.vertices[bone_count].meta['limits'])
elif parts[0]=='end':
self.vertices[bone_count].meta['offset'] = direction*lgth
lin = self.read_line(fid)
return lin
def read_channels(self, fid):
"""Read channels from an acclaim file."""
bones = [[] for i in self.vertices]
num_channels = 0
for vertex in self.vertices:
num_channels = num_channels + len(vertex.meta['channels'])
lin = self.read_line(fid)
while lin != ':DEGREES':
lin = self.read_line(fid)
if lin == '':
raise ValueError('Could not find :DEGREES in ' + fid.name)
counter = 0
lin = self.read_line(fid)
while lin:
parts = lin.split()
if len(parts)==1:
frame_no = int(parts[0])
if frame_no:
counter += 1
if counter != frame_no:
raise ValueError('Unexpected frame number.')
else:
raise ValueError('Single bone name ...')
else:
ind = self.get_index_by_name(parts[0])
bones[ind].append(np.array([float(channel) for channel in parts[1:]]))
lin = self.read_line(fid)
num_frames = counter
channels = np.zeros((num_frames, num_channels))
end_val = 0
for i in range(len(self.vertices)):
vertex = self.vertices[i]
if len(vertex.meta['channels'])>0:
start_val = end_val
end_val = end_val + len(vertex.meta['channels'])
for j in range(num_frames):
channels[j, start_val:end_val] = bones[i][j]
self.resolve_indices(i, start_val)
self.smooth_angle_channels(channels)
return channels
def writ_channels(self, fid, channels):
fid.write('#!OML:ASF \n')
fid.write(':FULLY-SPECIFIED\n')
fid.write(':DEGREES\n')
num_frames = channels.shape[0]
for i_frame in range(num_frames):
fid.write(str(i_frame+1)+'\n')
offset = 0
for vertex in self.vertices:
fid.write(vertex.name+' '+ ' '.join([str(v) for v in channels[i_frame,offset:offset+len(vertex.meta['channels'])]])+'\n')
offset += len(vertex.meta['channels'])
def read_documentation(self, fid):
"""Read documentation from an acclaim skeleton file stream."""
lin = self.read_line(fid)
while lin[0] != ':':
self.documentation.append(lin)
lin = self.read_line(fid)
return lin
def read_hierarchy(self, fid):
"""Read hierarchy information from acclaim skeleton file stream."""
lin = self.read_line(fid)
while lin != 'end':
parts = lin.split()
if lin != 'begin':
ind = self.get_index_by_name(parts[0])
for i in range(1, len(parts)):
self.vertices[ind].children.append(self.get_index_by_name(parts[i]))
lin = self.read_line(fid)
lin = self.read_line(fid)
return lin
def read_line(self, fid):
"""Read a line from a file string and check it isn't either empty or commented before returning."""
lin = '#'
while lin[0] == '#':
lin = fid.readline().strip()
if lin == '':
return lin
return lin
def read_root(self, fid):
"""Read the root node from an acclaim skeleton file stream."""
lin = self.read_line(fid)
while lin[0] != ':':
parts = lin.split()
if parts[0]=='order':
order = []
for i in range(1, len(parts)):
if parts[i].lower()=='rx':
chan = 'Xrotation'
order.append('x')
elif parts[i].lower()=='ry':
chan = 'Yrotation'
order.append('y')
elif parts[i].lower()=='rz':
chan = 'Zrotation'
order.append('z')
elif parts[i].lower()=='tx':
chan = 'Xposition'
elif parts[i].lower()=='ty':
chan = 'Yposition'
elif parts[i].lower()=='tz':
chan = 'Zposition'
elif parts[i].lower()=='l':
chan = 'length'
self.vertices[0].meta['channels'].append(chan)
# order is reversed compared to bvh
self.vertices[0].meta['order'] = order[::-1]
elif parts[0]=='axis':
# order is reversed compared to bvh
self.vertices[0].meta['axis_order'] = parts[1][::-1].lower()
elif parts[0]=='position':
self.vertices[0].meta['offset'] = [float(parts[1]),
float(parts[2]),
float(parts[3])]
elif parts[0]=='orientation':
self.vertices[0].meta['orientation'] = [float(parts[1]),
float(parts[2]),
float(parts[3])]
lin = self.read_line(fid)
return lin
def read_skel(self, fid):
"""Loads an acclaim skeleton format from a file stream."""
lin = self.read_line(fid)
while lin:
if lin[0]==':':
if lin[1:]== 'name':
lin = self.read_line(fid)
self.name = lin
elif lin[1:]=='units':
lin = self.read_units(fid)
elif lin[1:]=='documentation':
lin = self.read_documentation(fid)
elif lin[1:]=='root':
lin = self.read_root(fid)
elif lin[1:]=='bonedata':
lin = self.read_bonedata(fid)
elif lin[1:]=='hierarchy':
lin = self.read_hierarchy(fid)
elif lin[1:8]=='version':
lin = self.read_line(fid)
continue
else:
if not lin:
self.finalize()
return
lin = self.read_line(fid)
else:
raise ValueError('Unrecognised file format')
self.finalize()
def read_units(self, fid):
"""Read units from an acclaim skeleton file stream."""
lin = self.read_line(fid)
while lin[0] != ':':
parts = lin.split()
if parts[0]=='mass':
self.mass = float(parts[1])
elif parts[0]=='length':
self.length = float(parts[1])
elif parts[0]=='angle':
self.angle = parts[1]
lin = self.read_line(fid)
return lin
def resolve_indices(self, index, start_val):
"""Get indices for the skeleton from the channels when loading in channel data."""
channels = self.vertices[index].meta['channels']
base_channel = start_val
rot_ind = -np.ones(3, dtype=int)
pos_ind = -np.ones(3, dtype=int)
for i in range(len(channels)):
if channels[i]== 'Xrotation':
rot_ind[0] = base_channel + i
elif channels[i]=='Yrotation':
rot_ind[1] = base_channel + i
elif channels[i]=='Zrotation':
rot_ind[2] = base_channel + i
elif channels[i]=='Xposition':
pos_ind[0] = base_channel + i
elif channels[i]=='Yposition':
pos_ind[1] = base_channel + i
elif channels[i]=='Zposition':
pos_ind[2] = base_channel + i
self.vertices[index].meta['rot_ind'] = list(rot_ind)
self.vertices[index].meta['pos_ind'] = list(pos_ind)
def set_rotation_matrices(self):
"""Set the meta information at each vertex to contain the correct matrices C and Cinv as prescribed by the rotations and rotation orders."""
for i in range(len(self.vertices)):
self.vertices[i].meta['C'] = rotation_matrix(self.vertices[i].meta['axis'][0],
self.vertices[i].meta['axis'][1],
self.vertices[i].meta['axis'][2],
self.vertices[i].meta['axis_order'],
degrees=True)
# Todo: invert this by applying angle operations in reverse order
self.vertices[i].meta['Cinv'] = np.linalg.inv(self.vertices[i].meta['C'])
# Utilities for loading in x,y,z data.
def load_text_data(dataset, directory, centre=True):
"""Load in a data set of marker points from the Ohio State University C3D motion capture files (http://accad.osu.edu/research/mocap/mocap_data.htm)."""
points, point_names = parse_text(os.path.join(directory, dataset + '.txt'))[0:2]
# Remove markers where there is a NaN
present_index = [i for i in range(points[0].shape[1]) if not (np.any(np.isnan(points[0][:, i])) or np.any(np.isnan(points[0][:, i])) or np.any(np.isnan(points[0][:, i])))]
point_names = point_names[present_index]
for i in range(3):
points[i] = points[i][:, present_index]
if centre:
points[i] = (points[i].T - points[i].mean(axis=1)).T
# Concatanate the X, Y and Z markers together
Y = np.concatenate((points[0], points[1], points[2]), axis=1)
Y = Y/400.
connect = read_connections(os.path.join(directory, 'connections.txt'), point_names)
return Y, connect
def parse_text(file_name):
"""Parse data from Ohio State University text mocap files (http://accad.osu.edu/research/mocap/mocap_data.htm)."""
# Read the header
fid = open(file_name, 'r')
point_names = np.array(fid.readline().split())[2:-1:3]
fid.close()
for i in range(len(point_names)):
point_names[i] = point_names[i][0:-2]
# Read the matrix data
S = np.loadtxt(file_name, skiprows=1)
field = np.uint(S[:, 0])
times = S[:, 1]
S = S[:, 2:]
# Set the -9999.99 markers to be not present
S[S==-9999.99] = np.NaN
# Store x, y and z in different arrays
points = []
points.append(S[:, 0:-1:3])
points.append(S[:, 1:-1:3])
points.append(S[:, 2:-1:3])
return points, point_names, times
def read_connections(file_name, point_names):
"""Read a file detailing which markers should be connected to which for motion capture data."""
connections = []
fid = open(file_name, 'r')
line=fid.readline()
while(line):
connections.append(np.array(line.split(',')))
connections[-1][0] = connections[-1][0].strip()
connections[-1][1] = connections[-1][1].strip()
line = fid.readline()
connect = np.zeros((len(point_names), len(point_names)),dtype=bool)
for i in range(len(point_names)):
for j in range(len(point_names)):
for k in range(len(connections)):
if connections[k][0] == point_names[i] and connections[k][1] == point_names[j]:
connect[i,j]=True
connect[j,i]=True
break
return connect
skel = acclaim_skeleton()
| StarcoderdataPython |
1820069 | import pygame
import neat
import time
import os
import random
pygame.font.init()
GEN = 0
WIN_WIDTH = 500
WIN_HEIGHT = 800
# load images and makes them 2 times bigger using scale2x
BIRD_IMGS = [pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'bird1.png'))),
pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'bird2.png'))),
pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'bird3.png')))]
PIPE_IMG = pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'pipe.png')))
BASE_IMG = pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'base.png')))
BG_IMG = pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'bg.png')))
STAT_FONT = pygame.font.SysFont('comicsans', 50)
class Bird:
IMGS = BIRD_IMGS # So that it can be accessed using self.
MAX_ROTATION = 25 # How much bird will tilt
ROT_VEL = 20 # How much we rotate on each frame
ANIMATION_TIME = 5 # How long we show each animation
def __init__(self, x, y):
self.x = x
self.y = y
self.tilt = 0
self.tick_count = 0
self.vel = 0
self.height = self.y
self.img_count = 0
self.img = self.IMGS[0]
def jump(self):
self.vel = -10.5
self.tick_count = 0
self.height = self.y
def move(self):
self.tick_count += 1
# d is being used to calculate the displacement in each jump depending on how long we are moving for
d = self.vel*self.tick_count + 1.5*self.tick_count**2
if d >= 16:
# if we are moving down more than 16
d = 16
if d < 0:
d -= 2
self.y += d
if d < 0 or self.y < self.height + 50:
if self.tilt < self.MAX_ROTATION:
self.tilt = self.MAX_ROTATION
else:
if self.tilt > -90:
self.tilt -= self.ROT_VEL
def draw(self, win):
self.img_count += 1
if self.img_count < self.ANIMATION_TIME:
self.img = self.IMGS[0]
elif self.img_count < self.ANIMATION_TIME*2:
self.img = self.IMGS[1]
elif self.img_count < self.ANIMATION_TIME*3:
self.img = self.IMGS[2]
elif self.img_count < self.ANIMATION_TIME*4:
self.img = self.IMGS[1]
elif self.img_count == self.ANIMATION_TIME*4 + 1:
self.img = self.IMGS[0]
self.img_count = 0
if self.tilt <= -80:
self.img = self.IMGS[1]
self.img_count = self.ANIMATION_TIME*2
rotated_image = pygame.transform.rotate(self.img, self.tilt)
new_rect = rotated_image.get_rect(center=self.img.get_rect(topleft=(self.x, self.y)).center)
win.blit(rotated_image, new_rect.topleft)
def get_mask(self):
return pygame.mask.from_surface(self.img)
class Pipe:
GAP = 200
VEL = 5
def __init__(self, x):
self.x = x
self.height = 0
self.top = 0
self.bottom = 0
self.PIPE_TOP = pygame.transform.flip(PIPE_IMG, False, True)
self.PIPE_BOTTOM = PIPE_IMG
self.passed = False
self.set_height()
def set_height(self):
self.height = random.randrange(50, 450)
self.top = self.height - self.PIPE_TOP.get_height()
self.bottom = self.height + self.GAP
def move(self):
self.x -= self.VEL
def draw(self, win):
win.blit(self.PIPE_TOP, (self.x, self.top))
win.blit(self.PIPE_BOTTOM, (self.x, self.bottom))
def collide(self, bird):
bird_mask = bird.get_mask()
top_mask = pygame.mask.from_surface(self.PIPE_TOP)
bot_mask = pygame.mask.from_surface(self.PIPE_BOTTOM)
top_offset = (self.x - bird.x, self.top - round(bird.y))
bot_offset = (self.x - bird.x, self.bottom - round(bird.y))
t_point = bird_mask.overlap(top_mask, top_offset)
b_point = bird_mask.overlap(bot_mask, bot_offset)
if t_point or b_point:
return True
return False
class Base:
# We use 2 images which keep replacing one another
VEL = 5
WIDTH = BASE_IMG.get_width()
IMG = BASE_IMG
def __init__(self, y):
self.y = y
self.x1 = 0
self.x2 = self.WIDTH
def move(self):
self.x1 -= self.VEL
self.x2 -= self.VEL
if self.x1 + self.WIDTH < 0:
self.x1 = self.x2 + self.WIDTH
if self.x2 + self.WIDTH < 0:
self.x2 = self.x1 + self.WIDTH
def draw(self, win):
win.blit(self.IMG, (self.x1, self.y))
win.blit(self.IMG, (self.x2, self.y))
def draw_window(win, birds, pipes, base, score, gen):
win.blit(BG_IMG, (0, 0))
for pipe in pipes:
pipe.draw(win)
text = STAT_FONT.render('Score: ' + str(score), 1, (255, 255, 255))
win.blit(text, (WIN_WIDTH - 10 - text.get_width(), 10))
text = STAT_FONT.render('Gen: ' + str(gen), 1, (255, 255, 255))
win.blit(text, (10, 10))
base.draw(win)
for bird in birds:
bird.draw(win)
pygame.display.update()
# Fitness Function
def main(genomes, config):
global GEN
GEN += 1
neural_nets = [] # Neural Network for each bird
ge = [] # Genome tracker
birds = []
# genomes is a list of tuples that has (id, genome obj)
for _, g in genomes:
net = neat.nn.FeedForwardNetwork.create(g, config)
neural_nets.append(net)
birds.append(Bird(230, 350))
g.fitness = 0
ge.append(g)
base = Base(730)
pipes = [Pipe(600)]
win = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))
clock = pygame.time.Clock()
score = 0
run = True
while run:
clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.quit()
quit()
# Find out which pipe is coming next
pipe_index = 0
if len(birds) > 0:
if len(pipes) > 1 and birds[0].x > pipes[0].x + pipes[0].PIPE_TOP.get_width():
pipe_index = 1
else:
# If all birds of current generation are dead then restart game
break
for x, bird in enumerate(birds):
bird.move()
ge[x].fitness += 0.1 # Encourage bird to keep going
output = neural_nets[x].activate((bird.y, abs(bird.y - pipes[pipe_index].height), abs(bird.y - pipes[pipe_index].bottom)))
# Decide whether to jump or not with the output neuron
if output[0] > 0.5:
bird.jump()
add_pipe = False
to_remove = []
for pipe in pipes:
for x, bird in enumerate(birds):
if pipe.collide(bird):
# If a bird hits a pipe then reduce fitness
# ge[x].fitness -= 1
birds.pop(x)
neural_nets.pop(x)
ge.pop(x)
if not pipe.passed and pipe.x < bird.x:
pipe.passed = True
add_pipe = True
if pipe.x + pipe.PIPE_TOP.get_width() < 0:
to_remove.append(pipe)
pipe.move()
if add_pipe:
score += 1
for g in ge:
# If bird passes through pipe then increase fitness score by 5
g.fitness += 5
pipes.append(Pipe(600))
for pipe in to_remove:
pipes.remove(pipe)
for x, bird in enumerate(birds):
if bird.y + bird.img.get_height() >= 730 or bird.y < 0:
birds.pop(x)
neural_nets.pop(x)
ge.pop(x)
base.move()
draw_window(win, birds, pipes, base, score, GEN)
'''
Neural Network Setup:
- Inputs: Bird Y, Top Pipe, Bottom Pipe
- Outputs: Jump or not
- Activation Function: tanh
- Population Size (To start with): 20
- Fitness Function: How far the bird goes
- Max generations: 30
'''
def run(config_path):
config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_path)
# Create a population
pop = neat.Population(config)
pop.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
pop.add_reporter(stats)
# We run the fitness function for 50 generations
winner = pop.run(main, 50)
if __name__ == '__main__':
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'config.txt')
run(config_path)
| StarcoderdataPython |
1621189 | <reponame>timsque/deep-histopath
# ------------------------------------------------------------------------
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------
# To get around renderer issue on macOS going from Matplotlib image to NumPy image.
import matplotlib
matplotlib.use('Agg')
import colorsys
import math
import matplotlib.pyplot as plt
import multiprocessing
import numpy as np
import os
from PIL import Image, ImageDraw, ImageFont
from enum import Enum
from deephistopath.wsi import util
from deephistopath.wsi import filter
from deephistopath.wsi import slide
from deephistopath.wsi.util import Time
TISSUE_HIGH_THRESH = 80
TISSUE_LOW_THRESH = 10
ROW_TILE_SIZE = 1024
COL_TILE_SIZE = 1024
NUM_TOP_TILES = 1000
DISPLAY_TILE_SUMMARY_LABELS = False
TILE_LABEL_TEXT_SIZE = 10
LABEL_ALL_TILES_IN_TOP_TILE_SUMMARY = False
BORDER_ALL_TILES_IN_TOP_TILE_SUMMARY = False
TILE_BORDER_SIZE = 2 # The size of the colored rectangular border around summary tiles.
HIGH_COLOR = (0, 255, 0)
MEDIUM_COLOR = (255, 255, 0)
LOW_COLOR = (255, 165, 0)
NONE_COLOR = (255, 0, 0)
FADED_THRESH_COLOR = (128, 255, 128)
FADED_MEDIUM_COLOR = (255, 255, 128)
FADED_LOW_COLOR = (255, 210, 128)
FADED_NONE_COLOR = (255, 128, 128)
FONT_PATH = "/usr/share/fonts/truetype/liberation/LiberationMono-Regular.ttf" #"/Library/Fonts/Arial Bold.ttf"
SUMMARY_TITLE_FONT_PATH = "/usr/share/fonts/truetype/liberation/LiberationMono-Regular.ttf" #"/Library/Fonts/Courier New Bold.ttf"
SUMMARY_TITLE_TEXT_COLOR = (0, 0, 0)
SUMMARY_TITLE_TEXT_SIZE = 24
SUMMARY_TILE_TEXT_COLOR = (255, 255, 255)
TILE_TEXT_COLOR = (0, 0, 0)
TILE_TEXT_SIZE = 36
TILE_TEXT_BACKGROUND_COLOR = (255, 255, 255)
TILE_TEXT_W_BORDER = 5
TILE_TEXT_H_BORDER = 4
HSV_PURPLE = 270
HSV_PINK = 330
def get_num_tiles(rows, cols, row_tile_size, col_tile_size):
"""
Obtain the number of vertical and horizontal tiles that an image can be divided into given a row tile size and
a column tile size.
Args:
rows: Number of rows.
cols: Number of columns.
row_tile_size: Number of pixels in a tile row.
col_tile_size: Number of pixels in a tile column.
Returns:
Tuple consisting of the number of vertical tiles and the number of horizontal tiles that the image can be divided
into given the row tile size and the column tile size.
"""
num_row_tiles = math.ceil(rows / row_tile_size)
num_col_tiles = math.ceil(cols / col_tile_size)
return num_row_tiles, num_col_tiles
def get_tile_indices(rows, cols, row_tile_size, col_tile_size):
"""
Obtain a list of tile coordinates (starting row, ending row, starting column, ending column, row number, column number).
Args:
rows: Number of rows.
cols: Number of columns.
row_tile_size: Number of pixels in a tile row.
col_tile_size: Number of pixels in a tile column.
Returns:
List of tuples representing tile coordinates consisting of starting row, ending row,
starting column, ending column, row number, column number.
"""
indices = list()
num_row_tiles, num_col_tiles = get_num_tiles(rows, cols, row_tile_size, col_tile_size)
for r in range(0, num_row_tiles):
start_r = r * row_tile_size
end_r = ((r + 1) * row_tile_size) if (r < num_row_tiles - 1) else rows
for c in range(0, num_col_tiles):
start_c = c * col_tile_size
end_c = ((c + 1) * col_tile_size) if (c < num_col_tiles - 1) else cols
indices.append((start_r, end_r, start_c, end_c, r + 1, c + 1))
return indices
def create_summary_pil_img(np_img, title_area_height, row_tile_size, col_tile_size, num_row_tiles, num_col_tiles):
"""
Create a PIL summary image including top title area and right side and bottom padding.
Args:
np_img: Image as a NumPy array.
title_area_height: Height of the title area at the top of the summary image.
row_tile_size: The tile size in rows.
col_tile_size: The tile size in columns.
num_row_tiles: The number of row tiles.
num_col_tiles: The number of column tiles.
Returns:
Summary image as a PIL image. This image contains the image data specified by the np_img input and also has
potentially a top title area and right side and bottom padding.
"""
r = row_tile_size * num_row_tiles + title_area_height
c = col_tile_size * num_col_tiles
summary_img = np.zeros([r, c, np_img.shape[2]], dtype=np.uint8)
# add gray edges so that tile text does not get cut off
summary_img.fill(120)
# color title area white
summary_img[0:title_area_height, 0:summary_img.shape[1]].fill(255)
summary_img[title_area_height:np_img.shape[0] + title_area_height, 0:np_img.shape[1]] = np_img
summary = util.np_to_pil(summary_img)
return summary
def generate_tile_summaries(tile_sum, np_img, display=True, save_summary=False):
"""
Generate summary images/thumbnails showing a 'heatmap' representation of the tissue segmentation of all tiles.
Args:
tile_sum: TileSummary object.
np_img: Image as a NumPy array.
display: If True, display tile summary to screen.
save_summary: If True, save tile summary images.
"""
z = 300 # height of area at top of summary slide
slide_num = tile_sum.slide_num
rows = tile_sum.scaled_h
cols = tile_sum.scaled_w
row_tile_size = tile_sum.scaled_tile_h
col_tile_size = tile_sum.scaled_tile_w
num_row_tiles, num_col_tiles = get_num_tiles(rows, cols, row_tile_size, col_tile_size)
summary = create_summary_pil_img(np_img, z, row_tile_size, col_tile_size, num_row_tiles, num_col_tiles)
draw = ImageDraw.Draw(summary)
original_img_path = slide.get_training_image_path(slide_num)
np_orig = slide.open_image_np(original_img_path)
summary_orig = create_summary_pil_img(np_orig, z, row_tile_size, col_tile_size, num_row_tiles, num_col_tiles)
draw_orig = ImageDraw.Draw(summary_orig)
for t in tile_sum.tiles:
border_color = tile_border_color(t.tissue_percentage)
tile_border(draw, t.r_s + z, t.r_e + z, t.c_s, t.c_e, border_color)
tile_border(draw_orig, t.r_s + z, t.r_e + z, t.c_s, t.c_e, border_color)
summary_txt = summary_title(tile_sum) + "\n" + summary_stats(tile_sum)
summary_font = ImageFont.truetype(SUMMARY_TITLE_FONT_PATH, size=SUMMARY_TITLE_TEXT_SIZE)
draw.text((5, 5), summary_txt, SUMMARY_TITLE_TEXT_COLOR, font=summary_font)
draw_orig.text((5, 5), summary_txt, SUMMARY_TITLE_TEXT_COLOR, font=summary_font)
if DISPLAY_TILE_SUMMARY_LABELS:
count = 0
for t in tile_sum.tiles:
count += 1
label = "R%d\nC%d" % (t.r, t.c)
font = ImageFont.truetype(FONT_PATH, size=TILE_LABEL_TEXT_SIZE)
# drop shadow behind text
draw.text(((t.c_s + 3), (t.r_s + 3 + z)), label, (0, 0, 0), font=font)
draw_orig.text(((t.c_s + 3), (t.r_s + 3 + z)), label, (0, 0, 0), font=font)
draw.text(((t.c_s + 2), (t.r_s + 2 + z)), label, SUMMARY_TILE_TEXT_COLOR, font=font)
draw_orig.text(((t.c_s + 2), (t.r_s + 2 + z)), label, SUMMARY_TILE_TEXT_COLOR, font=font)
if display:
summary.show()
summary_orig.show()
if save_summary:
save_tile_summary_image(summary, slide_num)
save_tile_summary_on_original_image(summary_orig, slide_num)
def generate_top_tile_summaries(tile_sum, np_img, display=True, save_summary=False, show_top_stats=True,
label_all_tiles=LABEL_ALL_TILES_IN_TOP_TILE_SUMMARY,
border_all_tiles=BORDER_ALL_TILES_IN_TOP_TILE_SUMMARY):
"""
Generate summary images/thumbnails showing the top tiles ranked by score.
Args:
tile_sum: TileSummary object.
np_img: Image as a NumPy array.
display: If True, display top tiles to screen.
save_summary: If True, save top tiles images.
show_top_stats: If True, append top tile score stats to image.
label_all_tiles: If True, label all tiles. If False, label only top tiles.
"""
z = 300 # height of area at top of summary slide
slide_num = tile_sum.slide_num
rows = tile_sum.scaled_h
cols = tile_sum.scaled_w
row_tile_size = tile_sum.scaled_tile_h
col_tile_size = tile_sum.scaled_tile_w
num_row_tiles, num_col_tiles = get_num_tiles(rows, cols, row_tile_size, col_tile_size)
summary = create_summary_pil_img(np_img, z, row_tile_size, col_tile_size, num_row_tiles, num_col_tiles)
draw = ImageDraw.Draw(summary)
original_img_path = slide.get_training_image_path(slide_num)
np_orig = slide.open_image_np(original_img_path)
summary_orig = create_summary_pil_img(np_orig, z, row_tile_size, col_tile_size, num_row_tiles, num_col_tiles)
draw_orig = ImageDraw.Draw(summary_orig)
if border_all_tiles:
for t in tile_sum.tiles:
border_color = faded_tile_border_color(t.tissue_percentage)
tile_border(draw, t.r_s + z, t.r_e + z, t.c_s, t.c_e, border_color, border_size=1)
tile_border(draw_orig, t.r_s + z, t.r_e + z, t.c_s, t.c_e, border_color, border_size=1)
tbs = TILE_BORDER_SIZE
top_tiles = tile_sum.top_tiles()
for t in top_tiles:
border_color = tile_border_color(t.tissue_percentage)
tile_border(draw, t.r_s + z, t.r_e + z, t.c_s, t.c_e, border_color)
tile_border(draw_orig, t.r_s + z, t.r_e + z, t.c_s, t.c_e, border_color)
if border_all_tiles:
tile_border(draw, t.r_s + z + tbs, t.r_e + z - tbs, t.c_s + tbs, t.c_e - tbs, (0, 0, 0))
tile_border(draw_orig, t.r_s + z + tbs, t.r_e + z - tbs, t.c_s + tbs, t.c_e - tbs, (0, 0, 0))
summary_title = "Slide %03d Top Tile Summary:" % slide_num
summary_txt = summary_title + "\n" + summary_stats(tile_sum)
summary_font = ImageFont.truetype(SUMMARY_TITLE_FONT_PATH, size=SUMMARY_TITLE_TEXT_SIZE)
draw.text((5, 5), summary_txt, SUMMARY_TITLE_TEXT_COLOR, font=summary_font)
draw_orig.text((5, 5), summary_txt, SUMMARY_TITLE_TEXT_COLOR, font=summary_font)
tiles_to_label = tile_sum.tiles if label_all_tiles else top_tiles
h_offset = TILE_BORDER_SIZE + 2
v_offset = TILE_BORDER_SIZE
h_ds_offset = TILE_BORDER_SIZE + 3
v_ds_offset = TILE_BORDER_SIZE + 1
for t in tiles_to_label:
label = "R%d\nC%d" % (t.r, t.c)
font = ImageFont.truetype(FONT_PATH, size=TILE_LABEL_TEXT_SIZE)
# drop shadow behind text
draw.text(((t.c_s + h_ds_offset), (t.r_s + v_ds_offset + z)), label, (0, 0, 0), font=font)
draw_orig.text(((t.c_s + h_ds_offset), (t.r_s + v_ds_offset + z)), label, (0, 0, 0), font=font)
draw.text(((t.c_s + h_offset), (t.r_s + v_offset + z)), label, SUMMARY_TILE_TEXT_COLOR, font=font)
draw_orig.text(((t.c_s + h_offset), (t.r_s + v_offset + z)), label, SUMMARY_TILE_TEXT_COLOR, font=font)
if show_top_stats:
summary = add_tile_stats_to_top_tile_summary(summary, top_tiles, z)
summary_orig = add_tile_stats_to_top_tile_summary(summary_orig, top_tiles, z)
if display:
summary.show()
summary_orig.show()
if save_summary:
save_top_tiles_image(summary, slide_num)
save_top_tiles_on_original_image(summary_orig, slide_num)
def add_tile_stats_to_top_tile_summary(pil_img, tiles, z):
np_sum = util.pil_to_np_rgb(pil_img)
sum_r, sum_c, sum_ch = np_sum.shape
np_stats = np_tile_stat_img(tiles)
st_r, st_c, _ = np_stats.shape
combo_c = sum_c + st_c
combo_r = max(sum_r, st_r + z)
combo = np.zeros([combo_r, combo_c, sum_ch], dtype=np.uint8)
combo.fill(255)
combo[0:sum_r, 0:sum_c] = np_sum
combo[z:st_r + z, sum_c:sum_c + st_c] = np_stats
result = util.np_to_pil(combo)
return result
def np_tile_stat_img(tiles):
"""
Generate tile scoring statistics for a list of tiles and return the result as a NumPy array image.
Args:
tiles: List of tiles (such as top tiles)
Returns:
Tile scoring statistics converted into an NumPy array image.
"""
tt = sorted(tiles, key=lambda t: (t.r, t.c), reverse=False)
tile_stats = "Tile Score Statistics:\n"
count = 0
for t in tt:
if count > 0:
tile_stats += "\n"
count += 1
tup = (t.r, t.c, t.rank, t.tissue_percentage, t.color_factor, t.s_and_v_factor, t.quantity_factor, t.score)
tile_stats += "R%03d C%03d #%003d TP:%6.2f%% CF:%4.0f SVF:%4.2f QF:%4.2f S:%0.4f" % tup
np_stats = np_text(tile_stats, font_path=SUMMARY_TITLE_FONT_PATH, font_size=14)
return np_stats
def tile_border_color(tissue_percentage):
"""
Obtain the corresponding tile border color for a particular tile tissue percentage.
Args:
tissue_percentage: The tile tissue percentage
Returns:
The tile border color corresponding to the tile tissue percentage.
"""
if tissue_percentage >= TISSUE_HIGH_THRESH:
border_color = HIGH_COLOR
elif (tissue_percentage >= TISSUE_LOW_THRESH) and (tissue_percentage < TISSUE_HIGH_THRESH):
border_color = MEDIUM_COLOR
elif (tissue_percentage > 0) and (tissue_percentage < TISSUE_LOW_THRESH):
border_color = LOW_COLOR
else:
border_color = NONE_COLOR
return border_color
def faded_tile_border_color(tissue_percentage):
"""
Obtain the corresponding faded tile border color for a particular tile tissue percentage.
Args:
tissue_percentage: The tile tissue percentage
Returns:
The faded tile border color corresponding to the tile tissue percentage.
"""
if tissue_percentage >= TISSUE_HIGH_THRESH:
border_color = FADED_THRESH_COLOR
elif (tissue_percentage >= TISSUE_LOW_THRESH) and (tissue_percentage < TISSUE_HIGH_THRESH):
border_color = FADED_MEDIUM_COLOR
elif (tissue_percentage > 0) and (tissue_percentage < TISSUE_LOW_THRESH):
border_color = FADED_LOW_COLOR
else:
border_color = FADED_NONE_COLOR
return border_color
def summary_title(tile_summary):
"""
Obtain tile summary title.
Args:
tile_summary: TileSummary object.
Returns:
The tile summary title.
"""
return "Slide %03d Tile Summary:" % tile_summary.slide_num
def summary_stats(tile_summary):
"""
Obtain various stats about the slide tiles.
Args:
tile_summary: TileSummary object.
Returns:
Various stats about the slide tiles as a string.
"""
return "Original Dimensions: %dx%d\n" % (tile_summary.orig_w, tile_summary.orig_h) + \
"Original Tile Size: %dx%d\n" % (tile_summary.orig_tile_w, tile_summary.orig_tile_h) + \
"Scale Factor: 1/%dx\n" % tile_summary.scale_factor + \
"Scaled Dimensions: %dx%d\n" % (tile_summary.scaled_w, tile_summary.scaled_h) + \
"Scaled Tile Size: %dx%d\n" % (tile_summary.scaled_tile_w, tile_summary.scaled_tile_w) + \
"Total Mask: %3.2f%%, Total Tissue: %3.2f%%\n" % (
tile_summary.mask_percentage(), tile_summary.tissue_percentage) + \
"Tiles: %dx%d = %d\n" % (tile_summary.num_col_tiles, tile_summary.num_row_tiles, tile_summary.count) + \
" %5d (%5.2f%%) tiles >=%d%% tissue\n" % (
tile_summary.high, tile_summary.high / tile_summary.count * 100, TISSUE_HIGH_THRESH) + \
" %5d (%5.2f%%) tiles >=%d%% and <%d%% tissue\n" % (
tile_summary.medium, tile_summary.medium / tile_summary.count * 100, TISSUE_LOW_THRESH,
TISSUE_HIGH_THRESH) + \
" %5d (%5.2f%%) tiles >0%% and <%d%% tissue\n" % (
tile_summary.low, tile_summary.low / tile_summary.count * 100, TISSUE_LOW_THRESH) + \
" %5d (%5.2f%%) tiles =0%% tissue" % (tile_summary.none, tile_summary.none / tile_summary.count * 100)
def tile_border(draw, r_s, r_e, c_s, c_e, color, border_size=TILE_BORDER_SIZE):
"""
Draw a border around a tile with width TILE_BORDER_SIZE.
Args:
draw: Draw object for drawing on PIL image.
r_s: Row starting pixel.
r_e: Row ending pixel.
c_s: Column starting pixel.
c_e: Column ending pixel.
color: Color of the border.
border_size: Width of tile border in pixels.
"""
for x in range(0, border_size):
draw.rectangle([(c_s + x, r_s + x), (c_e - 1 - x, r_e - 1 - x)], outline=color)
def save_tile_summary_image(pil_img, slide_num):
"""
Save a tile summary image and thumbnail to the file system.
Args:
pil_img: Image as a PIL Image.
slide_num: The slide number.
"""
t = Time()
filepath = slide.get_tile_summary_image_path(slide_num)
pil_img.save(filepath)
print("%-20s | Time: %-14s Name: %s" % ("Save Tile Sum", str(t.elapsed()), filepath))
t = Time()
thumbnail_filepath = slide.get_tile_summary_thumbnail_path(slide_num)
slide.save_thumbnail(pil_img, slide.THUMBNAIL_SIZE, thumbnail_filepath)
print("%-20s | Time: %-14s Name: %s" % ("Save Tile Sum Thumb", str(t.elapsed()), thumbnail_filepath))
def save_top_tiles_image(pil_img, slide_num):
"""
Save a top tiles image and thumbnail to the file system.
Args:
pil_img: Image as a PIL Image.
slide_num: The slide number.
"""
t = Time()
filepath = slide.get_top_tiles_image_path(slide_num)
pil_img.save(filepath)
print("%-20s | Time: %-14s Name: %s" % ("Save Top Tiles Image", str(t.elapsed()), filepath))
t = Time()
thumbnail_filepath = slide.get_top_tiles_thumbnail_path(slide_num)
slide.save_thumbnail(pil_img, slide.THUMBNAIL_SIZE, thumbnail_filepath)
print("%-20s | Time: %-14s Name: %s" % ("Save Top Tiles Thumb", str(t.elapsed()), thumbnail_filepath))
def save_tile_summary_on_original_image(pil_img, slide_num):
"""
Save a tile summary on original image and thumbnail to the file system.
Args:
pil_img: Image as a PIL Image.
slide_num: The slide number.
"""
t = Time()
filepath = slide.get_tile_summary_on_original_image_path(slide_num)
pil_img.save(filepath)
print("%-20s | Time: %-14s Name: %s" % ("Save Tile Sum Orig", str(t.elapsed()), filepath))
t = Time()
thumbnail_filepath = slide.get_tile_summary_on_original_thumbnail_path(slide_num)
slide.save_thumbnail(pil_img, slide.THUMBNAIL_SIZE, thumbnail_filepath)
print(
"%-20s | Time: %-14s Name: %s" % ("Save Tile Sum Orig T", str(t.elapsed()), thumbnail_filepath))
def save_top_tiles_on_original_image(pil_img, slide_num):
"""
Save a top tiles on original image and thumbnail to the file system.
Args:
pil_img: Image as a PIL Image.
slide_num: The slide number.
"""
t = Time()
filepath = slide.get_top_tiles_on_original_image_path(slide_num)
pil_img.save(filepath)
print("%-20s | Time: %-14s Name: %s" % ("Save Top Orig", str(t.elapsed()), filepath))
t = Time()
thumbnail_filepath = slide.get_top_tiles_on_original_thumbnail_path(slide_num)
slide.save_thumbnail(pil_img, slide.THUMBNAIL_SIZE, thumbnail_filepath)
print(
"%-20s | Time: %-14s Name: %s" % ("Save Top Orig Thumb", str(t.elapsed()), thumbnail_filepath))
def summary_and_tiles(slide_num, display=True, save_summary=False, save_data=True, save_top_tiles=True):
"""
Generate tile summary and top tiles for slide.
Args:
slide_num: The slide number.
display: If True, display tile summary to screen.
save_summary: If True, save tile summary images.
save_data: If True, save tile data to csv file.
save_top_tiles: If True, save top tiles to files.
"""
img_path = slide.get_filter_image_result(slide_num)
np_img = slide.open_image_np(img_path)
tile_sum = score_tiles(slide_num, np_img)
if save_data:
save_tile_data(tile_sum)
generate_tile_summaries(tile_sum, np_img, display=display, save_summary=save_summary)
generate_top_tile_summaries(tile_sum, np_img, display=display, save_summary=save_summary)
if save_top_tiles:
#modified below to save more tiles above low tissue threshold
for tile in tile_sum.top_tiles():
if tile.tissue_percentage > TISSUE_LOW_THRESH:
tile.save_tile()
return tile_sum
def save_tile_data(tile_summary):
"""
Save tile data to csv file.
Args
tile_summary: TimeSummary object.
"""
time = Time()
csv = summary_title(tile_summary) + "\n" + summary_stats(tile_summary)
csv += "\n\n\nTile Num,Row,Column,Tissue %,Tissue Quantity,Col Start,Row Start,Col End,Row End,Col Size,Row Size," + \
"Original Col Start,Original Row Start,Original Col End,Original Row End,Original Col Size,Original Row Size," + \
"Color Factor,S and V Factor,Quantity Factor,Score\n"
for t in tile_summary.tiles:
line = "%d,%d,%d,%4.2f,%s,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%4.0f,%4.2f,%4.2f,%0.4f\n" % (
t.tile_num, t.r, t.c, t.tissue_percentage, t.tissue_quantity().name, t.c_s, t.r_s, t.c_e, t.r_e, t.c_e - t.c_s,
t.r_e - t.r_s, t.o_c_s, t.o_r_s, t.o_c_e, t.o_r_e, t.o_c_e - t.o_c_s, t.o_r_e - t.o_r_s, t.color_factor,
t.s_and_v_factor, t.quantity_factor, t.score)
csv += line
data_path = slide.get_tile_data_path(tile_summary.slide_num)
csv_file = open(data_path, "w")
csv_file.write(csv)
csv_file.close()
print("%-20s | Time: %-14s Name: %s" % ("Save Tile Data", str(time.elapsed()), data_path))
def tile_to_pil_tile(tile):
"""
Convert tile information into the corresponding tile as a PIL image read from the whole-slide image file.
Args:
tile: Tile object.
Return:
Tile as a PIL image.
"""
t = tile
slide_filepath = slide.get_training_slide_path(t.slide_num)
s = slide.open_slide(slide_filepath)
x, y = t.o_c_s, t.o_r_s
w, h = t.o_c_e - t.o_c_s, t.o_r_e - t.o_r_s
tile_region = s.read_region((x, y), 0, (w, h))
# RGBA to RGB
pil_img = tile_region.convert("RGB")
return pil_img
def tile_to_np_tile(tile):
"""
Convert tile information into the corresponding tile as a NumPy image read from the whole-slide image file.
Args:
tile: Tile object.
Return:
Tile as a NumPy image.
"""
pil_img = tile_to_pil_tile(tile)
np_img = util.pil_to_np_rgb(pil_img)
return np_img
def save_display_tile(tile, save=True, display=False):
"""
Save and/or display a tile image.
Args:
tile: Tile object.
save: If True, save tile image.
display: If True, dispaly tile image.
"""
tile_pil_img = tile_to_pil_tile(tile)
if save:
t = Time()
img_path = slide.get_tile_image_path(tile)
dir = os.path.dirname(img_path)
if not os.path.exists(dir):
os.makedirs(dir)
tile_pil_img.save(img_path)
print("%-20s | Time: %-14s Name: %s" % ("Save Tile", str(t.elapsed()), img_path))
if display:
tile_pil_img.show()
def score_tiles(slide_num, np_img=None, dimensions=None, small_tile_in_tile=False):
"""
Score all tiles for a slide and return the results in a TileSummary object.
Args:
slide_num: The slide number.
np_img: Optional image as a NumPy array.
dimensions: Optional tuple consisting of (original width, original height, new width, new height). Used for dynamic
tile retrieval.
small_tile_in_tile: If True, include the small NumPy image in the Tile objects.
Returns:
TileSummary object which includes a list of Tile objects containing information about each tile.
"""
if dimensions is None:
img_path = slide.get_filter_image_result(slide_num)
o_w, o_h, w, h = slide.parse_dimensions_from_image_filename(img_path)
else:
o_w, o_h, w, h = dimensions
if np_img is None:
np_img = slide.open_image_np(img_path)
row_tile_size = round(ROW_TILE_SIZE / slide.SCALE_FACTOR) # use round?
col_tile_size = round(COL_TILE_SIZE / slide.SCALE_FACTOR) # use round?
num_row_tiles, num_col_tiles = get_num_tiles(h, w, row_tile_size, col_tile_size)
tile_sum = TileSummary(slide_num=slide_num,
orig_w=o_w,
orig_h=o_h,
orig_tile_w=COL_TILE_SIZE,
orig_tile_h=ROW_TILE_SIZE,
scaled_w=w,
scaled_h=h,
scaled_tile_w=col_tile_size,
scaled_tile_h=row_tile_size,
tissue_percentage=filter.tissue_percent(np_img),
num_col_tiles=num_col_tiles,
num_row_tiles=num_row_tiles)
count = 0
high = 0
medium = 0
low = 0
none = 0
tile_indices = get_tile_indices(h, w, row_tile_size, col_tile_size)
for t in tile_indices:
count += 1 # tile_num
r_s, r_e, c_s, c_e, r, c = t
np_tile = np_img[r_s:r_e, c_s:c_e]
t_p = filter.tissue_percent(np_tile)
amount = tissue_quantity(t_p)
if amount == TissueQuantity.HIGH:
high += 1
elif amount == TissueQuantity.MEDIUM:
medium += 1
elif amount == TissueQuantity.LOW:
low += 1
elif amount == TissueQuantity.NONE:
none += 1
o_c_s, o_r_s = slide.small_to_large_mapping((c_s, r_s), (o_w, o_h))
o_c_e, o_r_e = slide.small_to_large_mapping((c_e, r_e), (o_w, o_h))
# pixel adjustment in case tile dimension too large (for example, 1025 instead of 1024)
if (o_c_e - o_c_s) > COL_TILE_SIZE:
o_c_e -= 1
if (o_r_e - o_r_s) > ROW_TILE_SIZE:
o_r_e -= 1
score, color_factor, s_and_v_factor, quantity_factor = score_tile(np_tile, t_p, slide_num, r, c)
np_scaled_tile = np_tile if small_tile_in_tile else None
tile = Tile(tile_sum, slide_num, np_scaled_tile, count, r, c, r_s, r_e, c_s, c_e, o_r_s, o_r_e, o_c_s,
o_c_e, t_p, color_factor, s_and_v_factor, quantity_factor, score)
tile_sum.tiles.append(tile)
tile_sum.count = count
tile_sum.high = high
tile_sum.medium = medium
tile_sum.low = low
tile_sum.none = none
tiles_by_score = tile_sum.tiles_by_score()
rank = 0
for t in tiles_by_score:
rank += 1
t.rank = rank
return tile_sum
def score_tile(np_tile, tissue_percent, slide_num, row, col):
"""
Score tile based on tissue percentage, color factor, saturation/value factor, and tissue quantity factor.
Args:
np_tile: Tile as NumPy array.
tissue_percent: The percentage of the tile judged to be tissue.
slide_num: Slide number.
row: Tile row.
col: Tile column.
Returns tuple consisting of score, color factor, saturation/value factor, and tissue quantity factor.
"""
color_factor = hsv_purple_pink_factor(np_tile)
s_and_v_factor = hsv_saturation_and_value_factor(np_tile)
amount = tissue_quantity(tissue_percent)
quantity_factor = tissue_quantity_factor(amount)
combined_factor = color_factor * s_and_v_factor * quantity_factor
score = (tissue_percent ** 2) * np.log(1 + combined_factor) / 1000.0
# scale score to between 0 and 1
score = 1.0 - (10.0 / (10.0 + score))
return score, color_factor, s_and_v_factor, quantity_factor
def tissue_quantity_factor(amount):
"""
Obtain a scoring factor based on the quantity of tissue in a tile.
Args:
amount: Tissue amount as a TissueQuantity enum value.
Returns:
Scoring factor based on the tile tissue quantity.
"""
if amount == TissueQuantity.HIGH:
quantity_factor = 1.0
elif amount == TissueQuantity.MEDIUM:
quantity_factor = 0.2
elif amount == TissueQuantity.LOW:
quantity_factor = 0.1
else:
quantity_factor = 0.0
return quantity_factor
def tissue_quantity(tissue_percentage):
"""
Obtain TissueQuantity enum member (HIGH, MEDIUM, LOW, or NONE) for corresponding tissue percentage.
Args:
tissue_percentage: The tile tissue percentage.
Returns:
TissueQuantity enum member (HIGH, MEDIUM, LOW, or NONE).
"""
if tissue_percentage >= TISSUE_HIGH_THRESH:
return TissueQuantity.HIGH
elif (tissue_percentage >= TISSUE_LOW_THRESH) and (tissue_percentage < TISSUE_HIGH_THRESH):
return TissueQuantity.MEDIUM
elif (tissue_percentage > 0) and (tissue_percentage < TISSUE_LOW_THRESH):
return TissueQuantity.LOW
else:
return TissueQuantity.NONE
def image_list_to_tiles(image_num_list, display=False, save_summary=True, save_data=True, save_top_tiles=True):
"""
Generate tile summaries and tiles for a list of images.
Args:
image_num_list: List of image numbers.
display: If True, display tile summary images to screen.
save_summary: If True, save tile summary images.
save_data: If True, save tile data to csv file.
save_top_tiles: If True, save top tiles to files.
"""
tile_summaries_dict = dict()
for slide_num in image_num_list:
tile_summary = summary_and_tiles(slide_num, display, save_summary, save_data, save_top_tiles)
tile_summaries_dict[slide_num] = tile_summary
return image_num_list, tile_summaries_dict
def image_range_to_tiles(start_ind, end_ind, display=False, save_summary=True, save_data=True, save_top_tiles=True):
"""
Generate tile summaries and tiles for a range of images.
Args:
start_ind: Starting index (inclusive).
end_ind: Ending index (inclusive).
display: If True, display tile summary images to screen.
save_summary: If True, save tile summary images.
save_data: If True, save tile data to csv file.
save_top_tiles: If True, save top tiles to files.
"""
image_num_list = list()
tile_summaries_dict = dict()
for slide_num in range(start_ind, end_ind + 1):
tile_summary = summary_and_tiles(slide_num, display, save_summary, save_data, save_top_tiles)
image_num_list.append(slide_num)
tile_summaries_dict[slide_num] = tile_summary
return image_num_list, tile_summaries_dict
def singleprocess_filtered_images_to_tiles(display=False, save_summary=True, save_data=True, save_top_tiles=True,
html=True, image_num_list=None):
"""
Generate tile summaries and tiles for training images using a single process.
Args:
display: If True, display tile summary images to screen.
save_summary: If True, save tile summary images.
save_data: If True, save tile data to csv file.
save_top_tiles: If True, save top tiles to files.
html: If True, generate HTML page to display tiled images
image_num_list: Optionally specify a list of image slide numbers.
"""
t = Time()
print("Generating tile summaries\n")
if image_num_list is not None:
image_num_list, tile_summaries_dict = image_list_to_tiles(image_num_list, display, save_summary, save_data,
save_top_tiles)
else:
num_training_slides = slide.get_num_training_slides()
image_num_list, tile_summaries_dict = image_range_to_tiles(1, num_training_slides, display, save_summary, save_data,
save_top_tiles)
print("Time to generate tile summaries: %s\n" % str(t.elapsed()))
if html:
generate_tiled_html_result(image_num_list, tile_summaries_dict, save_data)
def multiprocess_filtered_images_to_tiles(display=False, save_summary=True, save_data=True, save_top_tiles=True,
html=True, image_num_list=None):
"""
Generate tile summaries and tiles for all training images using multiple processes (one process per core).
Args:
display: If True, display images to screen (multiprocessed display not recommended).
save_summary: If True, save tile summary images.
save_data: If True, save tile data to csv file.
save_top_tiles: If True, save top tiles to files.
html: If True, generate HTML page to display tiled images.
image_num_list: Optionally specify a list of image slide numbers.
"""
timer = Time()
print("Generating tile summaries (multiprocess)\n")
if save_summary and not os.path.exists(slide.TILE_SUMMARY_DIR):
os.makedirs(slide.TILE_SUMMARY_DIR)
# how many processes to use
num_processes = multiprocessing.cpu_count()
pool = multiprocessing.Pool(num_processes)
if image_num_list is not None:
num_train_images = len(image_num_list)
else:
num_train_images = slide.get_num_training_slides()
if num_processes > num_train_images:
num_processes = num_train_images
images_per_process = num_train_images / num_processes
print("Number of processes: " + str(num_processes))
print("Number of training images: " + str(num_train_images))
tasks = []
for num_process in range(1, num_processes + 1):
start_index = (num_process - 1) * images_per_process + 1
end_index = num_process * images_per_process
start_index = int(start_index)
end_index = int(end_index)
if image_num_list is not None:
sublist = image_num_list[start_index - 1:end_index]
tasks.append((sublist, display, save_summary, save_data, save_top_tiles))
print("Task #" + str(num_process) + ": Process slides " + str(sublist))
else:
tasks.append((start_index, end_index, display, save_summary, save_data, save_top_tiles))
if start_index == end_index:
print("Task #" + str(num_process) + ": Process slide " + str(start_index))
else:
print("Task #" + str(num_process) + ": Process slides " + str(start_index) + " to " + str(end_index))
# start tasks
results = []
for t in tasks:
if image_num_list is not None:
results.append(pool.apply_async(image_list_to_tiles, t))
else:
results.append(pool.apply_async(image_range_to_tiles, t))
slide_nums = list()
tile_summaries_dict = dict()
for result in results:
image_nums, tile_summaries = result.get()
slide_nums.extend(image_nums)
tile_summaries_dict.update(tile_summaries)
print("Done tiling slides: %s" % image_nums)
if html:
generate_tiled_html_result(slide_nums, tile_summaries_dict, save_data)
print("Time to generate tile previews (multiprocess): %s\n" % str(timer.elapsed()))
def image_row(slide_num, tile_summary, data_link):
"""
Generate HTML for viewing a tiled image.
Args:
slide_num: The slide number.
tile_summary: TileSummary object.
data_link: If True, add link to tile data csv file.
Returns:
HTML table row for viewing a tiled image.
"""
orig_img = slide.get_training_image_path(slide_num)
orig_thumb = slide.get_training_thumbnail_path(slide_num)
filt_img = slide.get_filter_image_result(slide_num)
filt_thumb = slide.get_filter_thumbnail_result(slide_num)
sum_img = slide.get_tile_summary_image_path(slide_num)
sum_thumb = slide.get_tile_summary_thumbnail_path(slide_num)
osum_img = slide.get_tile_summary_on_original_image_path(slide_num)
osum_thumb = slide.get_tile_summary_on_original_thumbnail_path(slide_num)
top_img = slide.get_top_tiles_image_path(slide_num)
top_thumb = slide.get_top_tiles_thumbnail_path(slide_num)
otop_img = slide.get_top_tiles_on_original_image_path(slide_num)
otop_thumb = slide.get_top_tiles_on_original_thumbnail_path(slide_num)
html = " <tr>\n" + \
" <td style=\"vertical-align: top\">\n" + \
" <a target=\"_blank\" href=\"%s\">S%03d Original<br/>\n" % (orig_img, slide_num) + \
" <img src=\"%s\" />\n" % (orig_thumb) + \
" </a>\n" + \
" </td>\n" + \
" <td style=\"vertical-align: top\">\n" + \
" <a target=\"_blank\" href=\"%s\">S%03d Filtered<br/>\n" % (filt_img, slide_num) + \
" <img src=\"%s\" />\n" % (filt_thumb) + \
" </a>\n" + \
" </td>\n"
html += " <td style=\"vertical-align: top\">\n" + \
" <a target=\"_blank\" href=\"%s\">S%03d Tiles<br/>\n" % (sum_img, slide_num) + \
" <img src=\"%s\" />\n" % (sum_thumb) + \
" </a>\n" + \
" </td>\n"
html += " <td style=\"vertical-align: top\">\n" + \
" <a target=\"_blank\" href=\"%s\">S%03d Tiles<br/>\n" % (osum_img, slide_num) + \
" <img src=\"%s\" />\n" % (osum_thumb) + \
" </a>\n" + \
" </td>\n"
html += " <td style=\"vertical-align: top\">\n"
if data_link:
html += " <div style=\"white-space: nowrap;\">S%03d Tile Summary\n" % slide_num + \
" (<a target=\"_blank\" href=\"%s\">Data</a>)</div>\n" % slide.get_tile_data_path(slide_num)
else:
html += " <div style=\"white-space: nowrap;\">S%03d Tile Summary</div>\n" % slide_num
html += " <div style=\"font-size: smaller; white-space: nowrap;\">\n" + \
" %s\n" % summary_stats(tile_summary).replace("\n", "<br/>\n ") + \
" </div>\n" + \
" </td>\n"
html += " <td style=\"vertical-align: top\">\n" + \
" <a target=\"_blank\" href=\"%s\">S%03d Top Tiles<br/>\n" % (top_img, slide_num) + \
" <img src=\"%s\" />\n" % (top_thumb) + \
" </a>\n" + \
" </td>\n"
html += " <td style=\"vertical-align: top\">\n" + \
" <a target=\"_blank\" href=\"%s\">S%03d Top Tiles<br/>\n" % (otop_img, slide_num) + \
" <img src=\"%s\" />\n" % (otop_thumb) + \
" </a>\n" + \
" </td>\n"
top_tiles = tile_summary.top_tiles()
num_tiles = len(top_tiles)
score_num = 0
for t in top_tiles:
score_num += 1
t.tile_num = score_num
# sort top tiles by rows and columns to make them easier to locate on HTML page
top_tiles = sorted(top_tiles, key=lambda t: (t.r, t.c), reverse=False)
html += " <td style=\"vertical-align: top\">\n" + \
" <div style=\"white-space: nowrap;\">S%03d Top %d Tile Scores</div>\n" % (slide_num, num_tiles) + \
" <div style=\"font-size: smaller; white-space: nowrap;\">\n"
html += " <table>\n"
MAX_TILES_PER_ROW = 15
num_cols = math.ceil(num_tiles / MAX_TILES_PER_ROW)
num_rows = num_tiles if num_tiles < MAX_TILES_PER_ROW else MAX_TILES_PER_ROW
for row in range(num_rows):
html += " <tr>\n"
for col in range(num_cols):
html += " <td style=\"border: none;\">"
tile_num = row + (col * num_rows) + 1
if tile_num <= num_tiles:
t = top_tiles[tile_num - 1]
label = "R%03d C%03d %0.4f (#%02d)" % (t.r, t.c, t.score, t.tile_num)
tile_img_path = slide.get_tile_image_path(t)
html += "<a target=\"_blank\" href=\"%s\">%s</a>" % (tile_img_path, label)
else:
html += " "
html += "</td>\n"
html += " </tr>\n"
html += " </table>\n"
html += " </div>\n"
html += " </td>\n"
html += " </tr>\n"
return html
def generate_tiled_html_result(slide_nums, tile_summaries_dict, data_link):
"""
Generate HTML to view the tiled images.
Args:
slide_nums: List of slide numbers.
tile_summaries_dict: Dictionary of TileSummary objects keyed by slide number.
data_link: If True, add link to tile data csv file.
"""
slide_nums = sorted(slide_nums)
if not slide.TILE_SUMMARY_PAGINATE:
html = ""
html += filter.html_header("Tiles")
html += " <table>\n"
for slide_num in slide_nums:
html += image_row(slide_num, data_link)
html += " </table>\n"
html += filter.html_footer()
text_file = open(os.path.join(slide.TILE_SUMMARY_HTML_DIR, "tiles.html"), "w")
text_file.write(html)
text_file.close()
else:
total_len = len(slide_nums)
page_size = slide.TILE_SUMMARY_PAGINATION_SIZE
num_pages = math.ceil(total_len / page_size)
for page_num in range(1, num_pages + 1):
start_index = (page_num - 1) * page_size
end_index = (page_num * page_size) if (page_num < num_pages) else total_len
page_slide_nums = slide_nums[start_index:end_index]
html = ""
html += filter.html_header("Tiles, Page %d" % page_num)
html += " <div style=\"font-size: 20px\">"
if page_num > 1:
if page_num == 2:
html += "<a href=\"tiles.html\"><</a> "
else:
html += "<a href=\"tiles-%d.html\"><</a> " % (page_num - 1)
html += "Page %d" % page_num
if page_num < num_pages:
html += " <a href=\"tiles-%d.html\">></a> " % (page_num + 1)
html += "</div>\n"
html += " <table>\n"
for slide_num in page_slide_nums:
tile_summary = tile_summaries_dict[slide_num]
html += image_row(slide_num, tile_summary, data_link)
html += " </table>\n"
html += filter.html_footer()
if page_num == 1:
text_file = open(os.path.join(slide.TILE_SUMMARY_HTML_DIR, "tiles.html"), "w")
else:
text_file = open(os.path.join(slide.TILE_SUMMARY_HTML_DIR, "tiles-%d.html" % page_num), "w")
text_file.write(html)
text_file.close()
def np_hsv_hue_histogram(h):
"""
Create Matplotlib histogram of hue values for an HSV image and return the histogram as a NumPy array image.
Args:
h: Hue values as a 1-dimensional int NumPy array (scaled 0 to 360)
Returns:
Matplotlib histogram of hue values converted to a NumPy array image.
"""
figure = plt.figure()
canvas = figure.canvas
_, _, patches = plt.hist(h, bins=360)
plt.title("HSV Hue Histogram, mean=%3.1f, std=%3.1f" % (np.mean(h), np.std(h)))
bin_num = 0
for patch in patches:
rgb_color = colorsys.hsv_to_rgb(bin_num / 360.0, 1, 1)
patch.set_facecolor(rgb_color)
bin_num += 1
canvas.draw()
w, h = canvas.get_width_height()
np_hist = np.fromstring(canvas.get_renderer().tostring_rgb(), dtype=np.uint8).reshape(h, w, 3)
plt.close(figure)
util.np_info(np_hist)
return np_hist
def np_histogram(data, title, bins="auto"):
"""
Create Matplotlib histogram and return it as a NumPy array image.
Args:
data: Data to plot in the histogram.
title: Title of the histogram.
bins: Number of histogram bins, "auto" by default.
Returns:
Matplotlib histogram as a NumPy array image.
"""
figure = plt.figure()
canvas = figure.canvas
plt.hist(data, bins=bins)
plt.title(title)
canvas.draw()
w, h = canvas.get_width_height()
np_hist = np.fromstring(canvas.get_renderer().tostring_rgb(), dtype=np.uint8).reshape(h, w, 3)
plt.close(figure)
util.np_info(np_hist)
return np_hist
def np_hsv_saturation_histogram(s):
"""
Create Matplotlib histogram of saturation values for an HSV image and return the histogram as a NumPy array image.
Args:
s: Saturation values as a 1-dimensional float NumPy array
Returns:
Matplotlib histogram of saturation values converted to a NumPy array image.
"""
title = "HSV Saturation Histogram, mean=%.2f, std=%.2f" % (np.mean(s), np.std(s))
return np_histogram(s, title)
def np_hsv_value_histogram(v):
"""
Create Matplotlib histogram of value values for an HSV image and return the histogram as a NumPy array image.
Args:
v: Value values as a 1-dimensional float NumPy array
Returns:
Matplotlib histogram of saturation values converted to a NumPy array image.
"""
title = "HSV Value Histogram, mean=%.2f, std=%.2f" % (np.mean(v), np.std(v))
return np_histogram(v, title)
def np_rgb_channel_histogram(rgb, ch_num, ch_name):
"""
Create Matplotlib histogram of an RGB channel for an RGB image and return the histogram as a NumPy array image.
Args:
rgb: Image as RGB NumPy array.
ch_num: Which channel (0=red, 1=green, 2=blue)
ch_name: Channel name ("R", "G", "B")
Returns:
Matplotlib histogram of RGB channel converted to a NumPy array image.
"""
ch = rgb[:, :, ch_num]
ch = ch.flatten()
title = "RGB %s Histogram, mean=%.2f, std=%.2f" % (ch_name, np.mean(ch), np.std(ch))
return np_histogram(ch, title, bins=256)
def np_rgb_r_histogram(rgb):
"""
Obtain RGB R channel histogram as a NumPy array image.
Args:
rgb: Image as RGB NumPy array.
Returns:
Histogram of RGB R channel as a NumPy array image.
"""
hist = np_rgb_channel_histogram(rgb, 0, "R")
return hist
def np_rgb_g_histogram(rgb):
"""
Obtain RGB G channel histogram as a NumPy array image.
Args:
rgb: Image as RGB NumPy array.
Returns:
Histogram of RGB G channel as a NumPy array image.
"""
hist = np_rgb_channel_histogram(rgb, 1, "G")
return hist
def np_rgb_b_histogram(rgb):
"""
Obtain RGB B channel histogram as a NumPy array image.
Args:
rgb: Image as RGB NumPy array.
Returns:
Histogram of RGB B channel as a NumPy array image.
"""
hist = np_rgb_channel_histogram(rgb, 2, "B")
return hist
def pil_hue_histogram(h):
"""
Create Matplotlib histogram of hue values for an HSV image and return the histogram as a PIL image.
Args:
h: Hue values as a 1-dimensional int NumPy array (scaled 0 to 360)
Returns:
Matplotlib histogram of hue values converted to a PIL image.
"""
np_hist = np_hsv_hue_histogram(h)
pil_hist = util.np_to_pil(np_hist)
return pil_hist
def display_image_with_hsv_hue_histogram(np_rgb, text=None, scale_up=False):
"""
Display an image with its corresponding hue histogram.
Args:
np_rgb: RGB image tile as a NumPy array
text: Optional text to display above image
scale_up: If True, scale up image to display by slide.SCALE_FACTOR
"""
hsv = filter.filter_rgb_to_hsv(np_rgb)
h = filter.filter_hsv_to_h(hsv)
np_hist = np_hsv_hue_histogram(h)
hist_r, hist_c, _ = np_hist.shape
if scale_up:
np_rgb = np.repeat(np_rgb, slide.SCALE_FACTOR, axis=1)
np_rgb = np.repeat(np_rgb, slide.SCALE_FACTOR, axis=0)
img_r, img_c, img_ch = np_rgb.shape
if text is not None:
np_t = np_text(text)
t_r, t_c, _ = np_t.shape
t_i_c = max(t_c, img_c)
t_i_r = t_r + img_r
t_i = np.zeros([t_i_r, t_i_c, img_ch], dtype=np.uint8)
t_i.fill(255)
t_i[0:t_r, 0:t_c] = np_t
t_i[t_r:t_r + img_r, 0:img_c] = np_rgb
np_rgb = t_i # for simplicity assign title+image to image
img_r, img_c, img_ch = np_rgb.shape
r = max(img_r, hist_r)
c = img_c + hist_c
combo = np.zeros([r, c, img_ch], dtype=np.uint8)
combo.fill(255)
combo[0:img_r, 0:img_c] = np_rgb
combo[0:hist_r, img_c:c] = np_hist
pil_combo = util.np_to_pil(combo)
pil_combo.show()
def display_image(np_rgb, text=None, scale_up=False):
"""
Display an image with optional text above image.
Args:
np_rgb: RGB image tile as a NumPy array
text: Optional text to display above image
scale_up: If True, scale up image to display by slide.SCALE_FACTOR
"""
if scale_up:
np_rgb = np.repeat(np_rgb, slide.SCALE_FACTOR, axis=1)
np_rgb = np.repeat(np_rgb, slide.SCALE_FACTOR, axis=0)
img_r, img_c, img_ch = np_rgb.shape
if text is not None:
np_t = np_text(text)
t_r, t_c, _ = np_t.shape
t_i_c = max(t_c, img_c)
t_i_r = t_r + img_r
t_i = np.zeros([t_i_r, t_i_c, img_ch], dtype=np.uint8)
t_i.fill(255)
t_i[0:t_r, 0:t_c] = np_t
t_i[t_r:t_r + img_r, 0:img_c] = np_rgb
np_rgb = t_i
pil_img = util.np_to_pil(np_rgb)
pil_img.show()
def display_image_with_hsv_histograms(np_rgb, text=None, scale_up=False):
"""
Display an image with its corresponding HSV hue, saturation, and value histograms.
Args:
np_rgb: RGB image tile as a NumPy array
text: Optional text to display above image
scale_up: If True, scale up image to display by slide.SCALE_FACTOR
"""
hsv = filter.filter_rgb_to_hsv(np_rgb)
np_h = np_hsv_hue_histogram(filter.filter_hsv_to_h(hsv))
np_s = np_hsv_saturation_histogram(filter.filter_hsv_to_s(hsv))
np_v = np_hsv_value_histogram(filter.filter_hsv_to_v(hsv))
h_r, h_c, _ = np_h.shape
s_r, s_c, _ = np_s.shape
v_r, v_c, _ = np_v.shape
if scale_up:
np_rgb = np.repeat(np_rgb, slide.SCALE_FACTOR, axis=1)
np_rgb = np.repeat(np_rgb, slide.SCALE_FACTOR, axis=0)
img_r, img_c, img_ch = np_rgb.shape
if text is not None:
np_t = np_text(text)
t_r, t_c, _ = np_t.shape
t_i_c = max(t_c, img_c)
t_i_r = t_r + img_r
t_i = np.zeros([t_i_r, t_i_c, img_ch], dtype=np.uint8)
t_i.fill(255)
t_i[0:t_r, 0:t_c] = np_t
t_i[t_r:t_r + img_r, 0:img_c] = np_rgb
np_rgb = t_i # for simplicity assign title+image to image
img_r, img_c, img_ch = np_rgb.shape
hists_c = max(h_c, s_c, v_c)
hists_r = h_r + s_r + v_r
hists = np.zeros([hists_r, hists_c, img_ch], dtype=np.uint8)
hists[0:h_r, 0:h_c] = np_h
hists[h_r:h_r + s_r, 0:s_c] = np_s
hists[h_r + s_r:h_r + s_r + v_r, 0:v_c] = np_v
r = max(img_r, hists_r)
c = img_c + hists_c
combo = np.zeros([r, c, img_ch], dtype=np.uint8)
combo.fill(255)
combo[0:img_r, 0:img_c] = np_rgb
combo[0:hists_r, img_c:c] = hists
pil_combo = util.np_to_pil(combo)
pil_combo.show()
def display_image_with_rgb_histograms(np_rgb, text=None, scale_up=False):
"""
Display an image with its corresponding RGB histograms.
Args:
np_rgb: RGB image tile as a NumPy array
text: Optional text to display above image
scale_up: If True, scale up image to display by slide.SCALE_FACTOR
"""
np_r = np_rgb_r_histogram(np_rgb)
np_g = np_rgb_g_histogram(np_rgb)
np_b = np_rgb_b_histogram(np_rgb)
r_r, r_c, _ = np_r.shape
g_r, g_c, _ = np_g.shape
b_r, b_c, _ = np_b.shape
if scale_up:
np_rgb = np.repeat(np_rgb, slide.SCALE_FACTOR, axis=1)
np_rgb = np.repeat(np_rgb, slide.SCALE_FACTOR, axis=0)
img_r, img_c, img_ch = np_rgb.shape
if text is not None:
np_t = np_text(text)
t_r, t_c, _ = np_t.shape
t_i_c = max(t_c, img_c)
t_i_r = t_r + img_r
t_i = np.zeros([t_i_r, t_i_c, img_ch], dtype=np.uint8)
t_i.fill(255)
t_i[0:t_r, 0:t_c] = np_t
t_i[t_r:t_r + img_r, 0:img_c] = np_rgb
np_rgb = t_i # for simplicity assign title+image to image
img_r, img_c, img_ch = np_rgb.shape
hists_c = max(r_c, g_c, b_c)
hists_r = r_r + g_r + b_r
hists = np.zeros([hists_r, hists_c, img_ch], dtype=np.uint8)
hists[0:r_r, 0:r_c] = np_r
hists[r_r:r_r + g_r, 0:g_c] = np_g
hists[r_r + g_r:r_r + g_r + b_r, 0:b_c] = np_b
r = max(img_r, hists_r)
c = img_c + hists_c
combo = np.zeros([r, c, img_ch], dtype=np.uint8)
combo.fill(255)
combo[0:img_r, 0:img_c] = np_rgb
combo[0:hists_r, img_c:c] = hists
pil_combo = util.np_to_pil(combo)
pil_combo.show()
def pil_text(text, w_border=TILE_TEXT_W_BORDER, h_border=TILE_TEXT_H_BORDER, font_path=FONT_PATH,
font_size=TILE_TEXT_SIZE, text_color=TILE_TEXT_COLOR, background=TILE_TEXT_BACKGROUND_COLOR):
"""
Obtain a PIL image representation of text.
Args:
text: The text to convert to an image.
w_border: Tile text width border (left and right).
h_border: Tile text height border (top and bottom).
font_path: Path to font.
font_size: Size of font.
text_color: Tile text color.
background: Tile background color.
Returns:
PIL image representing the text.
"""
font = ImageFont.truetype(font_path, font_size)
x, y = ImageDraw.Draw(Image.new("RGB", (1, 1), background)).textsize(text, font)
image = Image.new("RGB", (x + 2 * w_border, y + 2 * h_border), background)
draw = ImageDraw.Draw(image)
draw.text((w_border, h_border), text, text_color, font=font)
return image
def np_text(text, w_border=TILE_TEXT_W_BORDER, h_border=TILE_TEXT_H_BORDER, font_path=FONT_PATH,
font_size=TILE_TEXT_SIZE, text_color=TILE_TEXT_COLOR, background=TILE_TEXT_BACKGROUND_COLOR):
"""
Obtain a NumPy array image representation of text.
Args:
text: The text to convert to an image.
w_border: Tile text width border (left and right).
h_border: Tile text height border (top and bottom).
font_path: Path to font.
font_size: Size of font.
text_color: Tile text color.
background: Tile background color.
Returns:
NumPy array representing the text.
"""
pil_img = pil_text(text, w_border, h_border, font_path, font_size,
text_color, background)
np_img = util.pil_to_np_rgb(pil_img)
return np_img
def display_tile(tile, rgb_histograms=True, hsv_histograms=True):
"""
Display a tile with its corresponding RGB and HSV histograms.
Args:
tile: The Tile object.
rgb_histograms: If True, display RGB histograms.
hsv_histograms: If True, display HSV histograms.
"""
text = "S%03d R%03d C%03d\n" % (tile.slide_num, tile.r, tile.c)
text += "Score:%4.2f Tissue:%5.2f%% CF:%2.0f SVF:%4.2f QF:%4.2f\n" % (
tile.score, tile.tissue_percentage, tile.color_factor, tile.s_and_v_factor, tile.quantity_factor)
text += "Rank #%d of %d" % (tile.rank, tile.tile_summary.num_tiles())
np_scaled_tile = tile.get_np_scaled_tile()
if np_scaled_tile is not None:
small_text = text + "\n \nSmall Tile (%d x %d)" % (np_scaled_tile.shape[1], np_scaled_tile.shape[0])
if rgb_histograms and hsv_histograms:
display_image_with_rgb_and_hsv_histograms(np_scaled_tile, small_text, scale_up=True)
elif rgb_histograms:
display_image_with_rgb_histograms(np_scaled_tile, small_text, scale_up=True)
elif hsv_histograms:
display_image_with_hsv_histograms(np_scaled_tile, small_text, scale_up=True)
else:
display_image(np_scaled_tile, small_text, scale_up=True)
np_tile = tile.get_np_tile()
text += " based on small tile\n \nLarge Tile (%d x %d)" % (np_tile.shape[1], np_tile.shape[0])
if rgb_histograms and hsv_histograms:
display_image_with_rgb_and_hsv_histograms(np_tile, text)
elif rgb_histograms:
display_image_with_rgb_histograms(np_tile, text)
elif hsv_histograms:
display_image_with_hsv_histograms(np_tile, text)
else:
display_image(np_tile, text)
def display_image_with_rgb_and_hsv_histograms(np_rgb, text=None, scale_up=False):
"""
Display a tile with its corresponding RGB and HSV histograms.
Args:
np_rgb: RGB image tile as a NumPy array
text: Optional text to display above image
scale_up: If True, scale up image to display by slide.SCALE_FACTOR
"""
hsv = filter.filter_rgb_to_hsv(np_rgb)
np_r = np_rgb_r_histogram(np_rgb)
np_g = np_rgb_g_histogram(np_rgb)
np_b = np_rgb_b_histogram(np_rgb)
np_h = np_hsv_hue_histogram(filter.filter_hsv_to_h(hsv))
np_s = np_hsv_saturation_histogram(filter.filter_hsv_to_s(hsv))
np_v = np_hsv_value_histogram(filter.filter_hsv_to_v(hsv))
r_r, r_c, _ = np_r.shape
g_r, g_c, _ = np_g.shape
b_r, b_c, _ = np_b.shape
h_r, h_c, _ = np_h.shape
s_r, s_c, _ = np_s.shape
v_r, v_c, _ = np_v.shape
if scale_up:
np_rgb = np.repeat(np_rgb, slide.SCALE_FACTOR, axis=1)
np_rgb = np.repeat(np_rgb, slide.SCALE_FACTOR, axis=0)
img_r, img_c, img_ch = np_rgb.shape
if text is not None:
np_t = np_text(text)
t_r, t_c, _ = np_t.shape
t_i_c = max(t_c, img_c)
t_i_r = t_r + img_r
t_i = np.zeros([t_i_r, t_i_c, img_ch], dtype=np.uint8)
t_i.fill(255)
t_i[0:t_r, 0:t_c] = np_t
t_i[t_r:t_r + img_r, 0:img_c] = np_rgb
np_rgb = t_i # for simplicity assign title+image to image
img_r, img_c, img_ch = np_rgb.shape
rgb_hists_c = max(r_c, g_c, b_c)
rgb_hists_r = r_r + g_r + b_r
rgb_hists = np.zeros([rgb_hists_r, rgb_hists_c, img_ch], dtype=np.uint8)
rgb_hists[0:r_r, 0:r_c] = np_r
rgb_hists[r_r:r_r + g_r, 0:g_c] = np_g
rgb_hists[r_r + g_r:r_r + g_r + b_r, 0:b_c] = np_b
hsv_hists_c = max(h_c, s_c, v_c)
hsv_hists_r = h_r + s_r + v_r
hsv_hists = np.zeros([hsv_hists_r, hsv_hists_c, img_ch], dtype=np.uint8)
hsv_hists[0:h_r, 0:h_c] = np_h
hsv_hists[h_r:h_r + s_r, 0:s_c] = np_s
hsv_hists[h_r + s_r:h_r + s_r + v_r, 0:v_c] = np_v
r = max(img_r, rgb_hists_r, hsv_hists_r)
c = img_c + rgb_hists_c + hsv_hists_c
combo = np.zeros([r, c, img_ch], dtype=np.uint8)
combo.fill(255)
combo[0:img_r, 0:img_c] = np_rgb
combo[0:rgb_hists_r, img_c:img_c + rgb_hists_c] = rgb_hists
combo[0:hsv_hists_r, img_c + rgb_hists_c:c] = hsv_hists
pil_combo = util.np_to_pil(combo)
pil_combo.show()
def rgb_to_hues(rgb):
"""
Convert RGB NumPy array to 1-dimensional array of hue values (HSV H values in degrees).
Args:
rgb: RGB image as a NumPy array
Returns:
1-dimensional array of hue values in degrees
"""
hsv = filter.filter_rgb_to_hsv(rgb, display_np_info=False)
h = filter.filter_hsv_to_h(hsv, display_np_info=False)
return h
def hsv_saturation_and_value_factor(rgb):
"""
Function to reduce scores of tiles with narrow HSV saturations and values since saturation and value standard
deviations should be relatively broad if the tile contains significant tissue.
Example of a blurred tile that should not be ranked as a top tile:
../data/tiles_png/006/TUPAC-TR-006-tile-r58-c3-x2048-y58369-w1024-h1024.png
Args:
rgb: RGB image as a NumPy array
Returns:
Saturation and value factor, where 1 is no effect and less than 1 means the standard deviations of saturation and
value are relatively small.
"""
hsv = filter.filter_rgb_to_hsv(rgb, display_np_info=False)
s = filter.filter_hsv_to_s(hsv)
v = filter.filter_hsv_to_v(hsv)
s_std = np.std(s)
v_std = np.std(v)
if s_std < 0.05 and v_std < 0.05:
factor = 0.4
elif s_std < 0.05:
factor = 0.7
elif v_std < 0.05:
factor = 0.7
else:
factor = 1
factor = factor ** 2
return factor
def hsv_purple_deviation(hsv_hues):
"""
Obtain the deviation from the HSV hue for purple.
Args:
hsv_hues: NumPy array of HSV hue values.
Returns:
The HSV purple deviation.
"""
purple_deviation = np.sqrt(np.mean(np.abs(hsv_hues - HSV_PURPLE) ** 2))
return purple_deviation
def hsv_pink_deviation(hsv_hues):
"""
Obtain the deviation from the HSV hue for pink.
Args:
hsv_hues: NumPy array of HSV hue values.
Returns:
The HSV pink deviation.
"""
pink_deviation = np.sqrt(np.mean(np.abs(hsv_hues - HSV_PINK) ** 2))
return pink_deviation
def hsv_purple_pink_factor(rgb):
"""
Compute scoring factor based on purple and pink HSV hue deviations and degree to which a narrowed hue color range
average is purple versus pink.
Args:
rgb: Image an NumPy array.
Returns:
Factor that favors purple (hematoxylin stained) tissue over pink (eosin stained) tissue.
"""
hues = rgb_to_hues(rgb)
hues = hues[hues >= 260] # exclude hues under 260
hues = hues[hues <= 340] # exclude hues over 340
if len(hues) == 0:
return 0 # if no hues between 260 and 340, then not purple or pink
pu_dev = hsv_purple_deviation(hues)
pi_dev = hsv_pink_deviation(hues)
avg_factor = (340 - np.average(hues)) ** 2
if pu_dev == 0: # avoid divide by zero if tile has no tissue
return 0
factor = pi_dev / pu_dev * avg_factor
return factor
def hsv_purple_vs_pink_average_factor(rgb, tissue_percentage):
"""
Function to favor purple (hematoxylin) over pink (eosin) staining based on the distance of the HSV hue average
from purple and pink.
Args:
rgb: Image as RGB NumPy array
tissue_percentage: Amount of tissue on the tile
Returns:
Factor, where >1 to boost purple slide scores, <1 to reduce pink slide scores, or 1 no effect.
"""
factor = 1
# only applies to slides with a high quantity of tissue
if tissue_percentage < TISSUE_HIGH_THRESH:
return factor
hues = rgb_to_hues(rgb)
hues = hues[hues >= 200] # Remove hues under 200
if len(hues) == 0:
return factor
avg = np.average(hues)
# pil_hue_histogram(hues).show()
pu = HSV_PURPLE - avg
pi = HSV_PINK - avg
pupi = pu + pi
# print("Av: %4d, Pu: %4d, Pi: %4d, PuPi: %4d" % (avg, pu, pi, pupi))
# Av: 250, Pu: 20, Pi: 80, PuPi: 100
# Av: 260, Pu: 10, Pi: 70, PuPi: 80
# Av: 270, Pu: 0, Pi: 60, PuPi: 60 ** PURPLE
# Av: 280, Pu: -10, Pi: 50, PuPi: 40
# Av: 290, Pu: -20, Pi: 40, PuPi: 20
# Av: 300, Pu: -30, Pi: 30, PuPi: 0
# Av: 310, Pu: -40, Pi: 20, PuPi: -20
# Av: 320, Pu: -50, Pi: 10, PuPi: -40
# Av: 330, Pu: -60, Pi: 0, PuPi: -60 ** PINK
# Av: 340, Pu: -70, Pi: -10, PuPi: -80
# Av: 350, Pu: -80, Pi: -20, PuPi: -100
if pupi > 30:
factor *= 1.2
if pupi < -30:
factor *= .8
if pupi > 0:
factor *= 1.2
if pupi > 50:
factor *= 1.2
if pupi < -60:
factor *= .8
return factor
class TileSummary:
"""
Class for tile summary information.
"""
slide_num = None
orig_w = None
orig_h = None
orig_tile_w = None
orig_tile_h = None
scale_factor = slide.SCALE_FACTOR
scaled_w = None
scaled_h = None
scaled_tile_w = None
scaled_tile_h = None
mask_percentage = None
num_row_tiles = None
num_col_tiles = None
count = 0
high = 0
medium = 0
low = 0
none = 0
def __init__(self, slide_num, orig_w, orig_h, orig_tile_w, orig_tile_h, scaled_w, scaled_h, scaled_tile_w,
scaled_tile_h, tissue_percentage, num_col_tiles, num_row_tiles):
self.slide_num = slide_num
self.orig_w = orig_w
self.orig_h = orig_h
self.orig_tile_w = orig_tile_w
self.orig_tile_h = orig_tile_h
self.scaled_w = scaled_w
self.scaled_h = scaled_h
self.scaled_tile_w = scaled_tile_w
self.scaled_tile_h = scaled_tile_h
self.tissue_percentage = tissue_percentage
self.num_col_tiles = num_col_tiles
self.num_row_tiles = num_row_tiles
self.tiles = []
def __str__(self):
return summary_title(self) + "\n" + summary_stats(self)
def mask_percentage(self):
"""
Obtain the percentage of the slide that is masked.
Returns:
The amount of the slide that is masked as a percentage.
"""
return 100 - self.tissue_percentage
def num_tiles(self):
"""
Retrieve the total number of tiles.
Returns:
The total number of tiles (number of rows * number of columns).
"""
return self.num_row_tiles * self.num_col_tiles
def tiles_by_tissue_percentage(self):
"""
Retrieve the tiles ranked by tissue percentage.
Returns:
List of the tiles ranked by tissue percentage.
"""
sorted_list = sorted(self.tiles, key=lambda t: t.tissue_percentage, reverse=True)
return sorted_list
def tiles_by_score(self):
"""
Retrieve the tiles ranked by score.
Returns:
List of the tiles ranked by score.
"""
sorted_list = sorted(self.tiles, key=lambda t: t.score, reverse=True)
return sorted_list
def top_tiles(self):
"""
Retrieve the top-scoring tiles.
Returns:
List of the top-scoring tiles.
"""
sorted_tiles = self.tiles_by_score()
top_tiles = sorted_tiles[:NUM_TOP_TILES]
return top_tiles
def get_tile(self, row, col):
"""
Retrieve tile by row and column.
Args:
row: The row
col: The column
Returns:
Corresponding Tile object.
"""
tile_index = (row - 1) * self.num_col_tiles + (col - 1)
tile = self.tiles[tile_index]
return tile
def display_summaries(self):
"""
Display summary images.
"""
summary_and_tiles(self.slide_num, display=True, save_summary=False, save_data=False, save_top_tiles=False)
class Tile:
"""
Class for information about a tile.
"""
def __init__(self, tile_summary, slide_num, np_scaled_tile, tile_num, r, c, r_s, r_e, c_s, c_e, o_r_s, o_r_e, o_c_s,
o_c_e, t_p, color_factor, s_and_v_factor, quantity_factor, score):
self.tile_summary = tile_summary
self.slide_num = slide_num
self.np_scaled_tile = np_scaled_tile
self.tile_num = tile_num
self.r = r
self.c = c
self.r_s = r_s
self.r_e = r_e
self.c_s = c_s
self.c_e = c_e
self.o_r_s = o_r_s
self.o_r_e = o_r_e
self.o_c_s = o_c_s
self.o_c_e = o_c_e
self.tissue_percentage = t_p
self.color_factor = color_factor
self.s_and_v_factor = s_and_v_factor
self.quantity_factor = quantity_factor
self.score = score
def __str__(self):
return "[Tile #%d, Row #%d, Column #%d, Tissue %4.2f%%, Score %0.4f]" % (
self.tile_num, self.r, self.c, self.tissue_percentage, self.score)
def __repr__(self):
return "\n" + self.__str__()
def mask_percentage(self):
return 100 - self.tissue_percentage
def tissue_quantity(self):
return tissue_quantity(self.tissue_percentage)
def get_pil_tile(self):
return tile_to_pil_tile(self)
def get_np_tile(self):
return tile_to_np_tile(self)
def save_tile(self):
save_display_tile(self, save=True, display=False)
def display_tile(self):
save_display_tile(self, save=False, display=True)
def display_with_histograms(self):
display_tile(self, rgb_histograms=True, hsv_histograms=True)
def get_np_scaled_tile(self):
return self.np_scaled_tile
def get_pil_scaled_tile(self):
return util.np_to_pil(self.np_scaled_tile)
class TissueQuantity(Enum):
NONE = 0
LOW = 1
MEDIUM = 2
HIGH = 3
def dynamic_tiles(slide_num, small_tile_in_tile=False):
"""
Generate tile summary with top tiles using original WSI training slide without intermediate image files saved to
file system.
Args:
slide_num: The slide number.
small_tile_in_tile: If True, include the small NumPy image in the Tile objects.
Returns:
TileSummary object with list of top Tile objects. The actual tile images are not retrieved until the
Tile get_tile() methods are called.
"""
np_img, large_w, large_h, small_w, small_h = slide.slide_to_scaled_np_image(slide_num)
filt_np_img = filter.apply_image_filters(np_img)
tile_summary = score_tiles(slide_num, filt_np_img, (large_w, large_h, small_w, small_h), small_tile_in_tile)
return tile_summary
def dynamic_tile(slide_num, row, col, small_tile_in_tile=False):
"""
Generate a single tile dynamically based on slide number, row, and column. If more than one tile needs to be
retrieved dynamically, dynamic_tiles() should be used.
Args:
slide_num: The slide number.
row: The row.
col: The column.
small_tile_in_tile: If True, include the small NumPy image in the Tile objects.
Returns:
Tile tile object.
"""
tile_summary = dynamic_tiles(slide_num, small_tile_in_tile)
tile = tile_summary.get_tile(row, col)
return tile
# if __name__ == "__main__":
# tile = dynamic_tile(2, 29, 16, True)
# tile.display_with_histograms()
# singleprocess_filtered_images_to_tiles()
# multiprocess_filtered_images_to_tiles()
| StarcoderdataPython |
8128162 | from os import system
f_op=('1. evaluation', '2. gradiens', '3. tangent plane', '4. integral', '5. linear integral', '6. surface integral', '7. indefinite integral', '8. limit', '9. Taylor series', '10. max or min', '11. plot', '0. exit')
F_op=('1. evaluation', '2. divergence', '3. curl', '4. path integral', '5. flux integral', '6. green theorem', '7. stokes theorem', '8. gauss theorem', '0. exit')
cart = ('x', 'y', 'z')
print('Hello there!\nThis is a user interface for the calculus module.\n')
#returns an array in Rn
def get_array(n):
print('insert the value of x0')
x0 = []
for i in range(n):
b=float(input(f'{cart[i]} = '))
x0.append(b)
return tuple(x0)
#handles the recursive menu and the loop escape
def r_menu():
c = int(input('press 0 to exit or 9 to return to main menu\n'))
if c == 0:
print('Bye')
return -1
if c == 9:
menu()
#menu
def menu():
print('Chose an option:')
print('1. salar function')
print('2. vectorial field')
a = int(input())
fp=open('exe.py', 'w')
fp.write('from calculus import*\n')
if a == 1:
print('which operation do you wish to execute?')
for i in f_op:
print(i)
b = int(input())
g=input('function=')
n=int(input('n='))
fp.write(f'f = function({g}, {n})\n')
if b == 1:
x0=get_array(n)
fp.write(f'x0={x0}\nf.eval(x0).prnt() \n')
if b == 2:
fp.write('f.grad().prnt()')
if b == 3:
if n != 2:
print('invalid option')
r_menu()
if n == 2:
x0=get_array(n)
fp.write(f'x0={x0}\nf.t_pl(x0).prnt()\n')
if b == 4:
if n == 1:
print('insert interval')
inf=int(input('a='))
sup=int(input('b='))
fp.write(f's=set_1(Interval({inf}, {sup}))\nprint(f.integral(s))')
if n == 2:
t=input('is the set y or x simple?')
pol=input('is the set in polar coordinates?')
infx=input('f(x) inf=')
supx=input('f(x) sup=')
inf=input('a=')
sup=input('b=')
fp.write(f's=set_2({t}, Interval({infx}, {supx}), Interval({inf}, {sup}), 2, {pol})\n')
fp.write('f.integral(s)')
if n == 3:
t=input('what is the axis of reference?')
pol=input('is the set in polar coordinates?')
infz=input('f(z)=')
supz=input('f(z)=')
print('insert the 2-dimensional set')
t1=input('is the set y or x simple?')
pol1=input('is the set in polar coordinates?')
infx=input('f(x)=')
supx=input('f(x)=')
inf=input('a=')
sup=input('b=')
fp.write(f'set=set_2({t1}, Interval({infx}, {supx}), Interval({inf}, {sup}), 2, {pol1})\ns=set_3({t}, Interval({infz}, {supz}), set, 3, {pol})\nprint(f.integral(s))')
if b == 5:
print('insert gamma function')
if n == 2:
f1=input('f1(x)=')
f2=input('f2(x)=')
inf=input('lower bound=')
sup=input('upper bound=')
fp.write(f'gamma=field_2(function({f1}, 1), function({f2}, 1), 2, set_1(Interval({inf}, {sup})))\nprint(f.l_integral(gamma))')
if n == 3:
f1=input('f1(x)=')
f2=input('f2(x)=')
f3=input('f3(x)=')
inf=input('lower bound=')
sup=input('upper bound=')
fp.write(f'gamma=field(function({f1}, 1), function({f2}, 1), function({f3}, 1), 3, set_1(Interval({inf}, {sup})))\nprint(simplify(f.l_integral(gamma)))')
else :
print('invalid option')
r_menu()
if b == 6:
print('insert sigma function')
t=input('axis of reference=')
s=input('function g(x, y)=')
print('insert set')
t1=input('is the set x or y simple?')
pol=input('is the set in polar coordinates?')
infx=input('f1(x)=')
supx=input('f2(x)=')
inf=input('a=')
sup=input('b=')
vec=input('is the normal vector positive or negative?')
fp.write(f"sigma=sigma({t}, {s}, set_2({t1}, Interval({infx}, {supx}), Interval({inf}, {sup}), 2, {pol}), False, '{vec}')\nprint(simplify(f.s_integral(sigma)))")
if b == 7:
var=input('insert variable')
fp.write(f'i=f.i_integral({var})\nprint(i.f)')
if b == 8:
var=input('variable=')
x0=input('x0=')
fp.write(f'print(f.lim({var}, {x0}))')
if b == 9:
var=input('variable=')
x0=input('x0=')
degr=input('degree=')
fp.write(f't=f.taylor({var}, {x0}, {degr})\nprint(t.f)')
if b == 10:
x0=get_array(n)
fp.write(f'f.max_min({x0})')
if b == 11:
print('insert interval')
l=input('lower bound=')
u=input('uppper bound=')
fp.write(f'f.plot(set_1(Interval({l}, {u})))')
if b == 0:
r_menu()
if a == 2:
for i in F_op:
print(i)
b = int(input())
n=int(input('dimension='))
if n == 2:
f1=input('f1=')
f2=input('f2=')
fp.write(f'F=field_2(function({f1}, 2), function({f2}, 2))\n')
if n == 3:
f1=input('f1=')
f2=input('f2=')
f3=input('f3=')
fp.write(f'F=field(function({f1}, 3), function({f2}, 3), function({f3}, 3))\n')
if b == 1:
x0=get_array(n)
fp.write(f'F.eval({x0}).prnt()')
if b == 2:
if n == 2:
print('invalid option')
r_menu()
if n == 3:
fp.write(f'F.div().prnt()')
if b == 3:
if n == 2:
print('invalid option')
r_menu()
if n == 3:
fp.write(f'F.curl().prnt()')
if b == 4:
print('insert gamma function')
if n == 2:
g1=input('g1=')
g2=input('g2=')
inf=input('lower bound=')
sup=input('upper bound=')
fp.write(f'gamma=field_2(function({g1}, 2), function({g2}, 2), 2, set_1(Interval({inf}, {sup})))\n')
if n == 3:
g1=input('g1=')
g2=input('g2=')
g3=input('g3=')
inf=input('lower bound=')
sup=input('upper bound=')
fp.write(f'gamma=field(function({g1}, 3), function({g2}, 3), function({g3}, 3), 3, set_1(Interval({inf}, {sup})))\n')
fp.write('print(F.p_integral(gamma))')
if b == 5:
if n == 3:
print('insert sigma function')
t=input('axis of reference=')
s=input('function g(x, y)=')
print('insert set')
t1=input('is the set x or y simple?')
pol=input('is the set in polar coordinates?')
infx=input('f1(x)=')
supx=input('f2(x)=')
inf=input('a=')
sup=input('b=')
vec=input('is the normal vector positive or negative?')
fp.write(f"sigma=sigma({t}, {s}, set_2({t1}, Interval({infx}, {supx}), Interval({inf}, {sup})), {pol},'{vec}')\n")
fp.write(f'print(F.flux(sigma))')
else:
print('invalid option')
r_menu()
if b == 6:
if n == 2:
print('insert set')
t=input('is the set x or y simple?')
pol=input('is the set in polar coordinates?')
infx=input('f1(x)=')
supx=input('f2(x)=')
inf=input('a=')
sup=input('b=')
fp.write(f'set=set_2({t}, Interval({infx}, {supx}), Interval({inf}, {sup}), 2, {pol})\n')
fp.write('print(F.green(set))')
else :
print('invalid option')
r_menu()
if b == 7:
if n == 3:
print('insert sigma function')
t=input('axis of reference=')
s=input('function g(x, y)=')
print('insert set')
t1=input('is the set x or y simple?')
pol=input('is the set in polar coordinates?')
infx=input('f1(x)=')
supx=input('f2(x)=')
inf=input('a=')
sup=input('b=')
vec=input('is the normal vector positive or negative?')
fp.write(f"sigma=sigma({t}, {s}, set_2({t1}, Interval({infx}, {supx}), Interval({inf}, {sup})), {pol},'{vec}')\n")
fp.write(f'print(F.stokes(sigma))')
else:
print('invalid option')
r_menu()
if b == 8:
if n == 3:
print('insert set\n')
t=input('what is the axis of reference?')
pol=input('is the set in polar coordinates?')
infz=input('f(z)=')
supz=input('f(z)=')
print('insert the 2-dimensional set')
t1=input('is the set y or x simple?')
pol1=input('is the set in polar coordinates?')
infx=input('f(x)=')
supx=input('f(x)=')
inf=input('a=')
sup=input('b=')
fp.write(f'set2=set_2({t1}, Interval({infx}, {supx}), Interval({inf}, {sup}), 2, {pol1})\ns=set_3({t}, Interval({infz}, {supz}), set2, 3, {pol})\n')
fp.write('print(F.gauss(s))')
else :
print('invalid option')
r_menu()
if b == 9:
system('python plotting.py')
if b == 0:
r_menu()
if a != 1 and a != 2:
r_menu()
fp.close()
system('python3 ~/Desktop/python/calculus/exe.py')
r_menu()
menu()
| StarcoderdataPython |
1675990 | import argparse
import sys
import os
import traceback
from biokbase.CompressionBasedDistance.Client import _read_inifile, ServerError as CBDServerError
from biokbase.CompressionBasedDistance.Helpers import get_config, parse_input_file, start_job
desc1 = '''
NAME
cbd-buildmatrixlocal -- build a distance matrix to compare microbiota samples
on local system
SYNOPSIS
'''
desc2 = '''
DESCRIPTION
Build a distance matrix from a set of sequence files for microbiota
comparisons. Compression based distance uses the relative compression
of combined and individual datasets to quantify overlaps between
microbial communities. The job started to build the distance matrix is
run on the local system.
See cbd-buildmatrix for a complete description of all arguments.
'''
desc3 = '''
EXAMPLES
Build a distance matrix for a set of sequence files where the format is
determined by the file extension:
> cbd-buildmatrixlocal mystudy.list
Build a distance matrix for a set of fastq sequence files:
> cbd-buildmatrixlocal --format fastq mystudy.list
SEE ALSO
cbd-buildmatrix
cbd-getmatrix
cbd-filtermatrix
cbd-plotmatrix
AUTHORS
<NAME>
'''
if __name__ == "__main__":
# Parse options.
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, prog='cbd-buildmatrixlocal', epilog=desc3)
parser.add_argument('inputPath', help='path to file with list of input sequence files', action='store', default=None)
parser.add_argument('-f', '--format', help='format of input sequence files', action='store', dest='format', default=None)
parser.add_argument('-s', '--scale', help='scale for distance matrix values', action='store', dest='scale', default='std')
parser.add_argument('-t', '--trim', help='trim sequence reads to the specified length', action='store', dest='sequenceLen', type=int, default=0)
parser.add_argument('--min-reads', help='minimum number of reads each sequence file must contain', action='store', dest='minReads', type=int, default=0)
parser.add_argument('--max-reads', help='maximum number of reads to process from each sequence file', action='store', dest='maxReads', type=int, default=0)
parser.add_argument('--extreme', help='use extreme compression (slower but hopefully better compression ratio)', action='store_true', dest='extreme', default=False)
parser.add_argument('-e', '--show-error', help='show detailed information for an exception', action='store_true', dest='showError', default=False)
usage = parser.format_usage()
parser.description = desc1 + ' ' + usage + desc2
parser.usage = argparse.SUPPRESS
args = parser.parse_args()
# Create input parameters for build_matrix() function.
input = dict()
input['scale'] = args.scale
input['sequence_length'] = args.sequenceLen
input['min_reads'] = args.minReads
input['max_reads'] = args.maxReads
if args.extreme:
input['extreme'] = 1
else:
input['extreme'] = 0
input['node_ids'] = list()
input['file_paths'] = list()
# Get an authentication token for the current user.
auth = _read_inifile()
# Parse the input file with the list of sequence files.
(fileList, extensions, numMissingFiles) = parse_input_file(args.inputPath)
if numMissingFiles > 0:
exit(1)
# Set the format based on the sequence file extension if the format argument was not specified.
if args.format is None:
if len(extensions) == 1:
input['format'] = extensions.keys()[0]
else:
print "The format of the sequence files could not be determined. Set the format with the --format argument."
exit(1)
else:
input['format'] = args.format
# For each file, add it to the list.
for filename in fileList:
input['file_paths'].append(filename)
# Submit a job to build the distance matrix.
try:
jobid = start_job(get_config(None), auth, input)
except Exception as e:
print 'Error starting job: '+e.message
if args.showError:
traceback.print_exc(file=sys.stdout)
exit(1)
print "Job '%s' submitted" %(jobid)
exit(0)
| StarcoderdataPython |
36373 | <reponame>nw13slx/thyme
import logging
import numpy as np
from glob import glob
from os.path import getmtime, isfile
from os import remove
from thyme import Trajectory
from thyme.parsers.monty import read_pattern, read_table_pattern
from thyme.routines.folders import find_folders, find_folders_matching
from thyme._key import *
from thyme.parsers.lammps_pizza_log import log as lammps_log
from thyme.parsers.lammps_pizza_dump import *
fl_num = r"([+-]?\d+.\d+[eE]?[+-]?\d*)"
sfl_num = r"\s+([+-]?\d+.\d+[eE]?[+-]?\d*)"
snum = r"\s+([+-]?\d+)"
nc_fl_num = r"[+-]?\d+.\d+[eE]?[+-]?\d*"
head_str = """ITEM: TIMESTEP
{timestep}
ITEM: NUMBER OF ATOMS
{natom}
ITEM: BOX BOUNDS pp pp pp
0 {lx}
0 {ly}
0 {lz}
ITEM: ATOMS id type x y z {type_str}"""
def write(name, trj, color_key="", spe2num={}):
if isfile(name):
remove(name)
keys = [POSITION]
type_str = ""
for key in trj.per_frame_attrs:
if key == FORCE:
type_str += " fx fy fz"
keys += [FORCE]
elif key == VELOCITY:
type_str += " vx vy vz"
keys += [VELOCITY]
elif key == color_key:
type_str += " q"
keys += [color_key]
fout = open(name, "w+")
for i in range(trj.nframes):
frame = trj.get_frame(i)
cell = frame[CELL]
off_dia_sum = np.sum(np.abs(cell)) - np.trace(np.abs(cell))
if off_dia_sum > 0:
raise NotImplementedError()
natom = frame[NATOMS]
hs = head_str.format(
lx=cell[0, 0],
ly=cell[1, 1],
lz=cell[2, 2],
timestep=i,
natom=natom,
type_str=type_str,
)
species = np.unique(frame[SPECIES])
base = len(spe2num)
if base == 0:
base = 1
spe2num.update(
{spe: i + base for i, spe in enumerate(species) if spe not in spe2num}
)
string = f"{hs}"
for j in range(natom):
string += f"\n{j+1} {spe2num[frame[SPECIES][j]]} "
for key in keys:
string += " " + " ".join([f"{value}" for value in frame[key][j]])
print(string, file=fout)
logging.info(f"write {name}")
fout.close()
logging.info(f"spe2num {spe2num}")
def from_file(filename):
data = dump(filename, 0)
data.read_all(allow_overlap=True)
col_id = data.names["id"]
col_type = data.names["type"]
x_id = data.names["x"]
y_id = data.names["y"]
z_id = data.names["z"]
if "fx" in data.names:
fx_id = data.names["fx"]
fy_id = data.names["fy"]
fz_id = data.names["fz"]
remaining_names = [
(i, name)
for i, name in enumerate(data.names)
if name not in ["id", "type", "x", "y", "z", "fx", "fy", "fz"]
]
list_trj = []
for i in range(data.nsnaps):
if i % 1000 == 0:
logging.info(f"{i} / {data.nsnaps}")
snap = data.snaps[i]
cols = np.vstack(snap.atoms)
ids = np.argsort(cols[:, col_id])
species = cols[:, col_type][ids]
pos = np.hstack(
(
cols[:, x_id].reshape([-1, 1]),
cols[:, y_id].reshape([-1, 1]),
cols[:, z_id].reshape([-1, 1]),
)
)
lx = snap.xhi - snap.xlo
ly = snap.yhi - snap.ylo
lz = snap.zhi - snap.zlo
d = {
CELL: np.diag([lx, ly, lz]).reshape([1, 3, 3]),
POSITION: pos[ids].reshape([1, -1, 3]),
SPECIES: species,
PER_FRAME_ATTRS: [POSITION, CELL],
FIXED_ATTRS: [SPECIES, NATOMS],
}
if "fx" in data.names:
force = np.hstack(
(
cols[:, fx_id].reshape([-1, 1]),
cols[:, fy_id].reshape([-1, 1]),
cols[:, fz_id].reshape([-1, 1]),
)
)[ids]
d.update({FORCE: force.reshape([1, -1, 3])})
d[PER_FRAME_ATTRS] += [FORCE]
d.update({name: cols[:, i].reshape([1, -1]) for i, name in remaining_names})
d[PER_FRAME_ATTRS] += [name for i, name in remaining_names]
_trj = Trajectory.from_dict(d)
list_trj += [_trj]
trj = Trajectory.stack(list_trj)
return trj
def read_log(filename):
l = lammps_log(filename, 0)
l.next()
data = np.array(l.data)
return l.names, data
| StarcoderdataPython |
3477511 | <filename>roses/rosebiology/apps.py
from django.apps import AppConfig
class RosebiologyConfig(AppConfig):
name = 'rosebiology'
| StarcoderdataPython |
38123 | <filename>snaps/openstack/tests/create_user_tests.py<gh_stars>0
# Copyright (c) 2017 Cable Television Laboratories, Inc. ("CableLabs")
# and others. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import uuid
from snaps.openstack.create_user import OpenStackUser, UserSettings
from snaps.openstack.tests.os_source_file_test import OSComponentTestCase
from snaps.openstack.utils import keystone_utils
__author__ = 'spisarski'
class UserSettingsUnitTests(unittest.TestCase):
"""
Tests the construction of the UserSettings class
"""
def test_no_params(self):
with self.assertRaises(Exception):
UserSettings()
def test_empty_config(self):
with self.assertRaises(Exception):
UserSettings(**dict())
def test_name_only(self):
with self.assertRaises(Exception):
UserSettings(name='foo')
def test_config_with_name_only(self):
with self.assertRaises(Exception):
UserSettings(**{'name': 'foo'})
def test_name_pass_enabled_str(self):
with self.assertRaises(Exception):
UserSettings(name='foo', password='<PASSWORD>', enabled='true')
def test_config_with_name_pass_enabled_str(self):
with self.assertRaises(Exception):
UserSettings(
**{'name': 'foo', 'password': '<PASSWORD>', 'enabled': 'true'})
def test_name_pass_only(self):
settings = UserSettings(name='foo', password='<PASSWORD>')
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.password)
self.assertIsNone(settings.project_name)
self.assertIsNone(settings.email)
self.assertTrue(settings.enabled)
def test_config_with_name_pass_only(self):
settings = UserSettings(**{'name': 'foo', 'password': '<PASSWORD>'})
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.password)
self.assertIsNone(settings.project_name)
self.assertIsNone(settings.email)
self.assertTrue(settings.enabled)
def test_all(self):
settings = UserSettings(name='foo', password='<PASSWORD>',
project_name='proj-foo', email='<EMAIL>',
enabled=False)
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.password)
self.assertEqual('proj-foo', settings.project_name)
self.assertEqual('<EMAIL>', settings.email)
self.assertFalse(settings.enabled)
def test_config_all(self):
settings = UserSettings(**{'name': 'foo', 'password': '<PASSWORD>',
'project_name': 'proj-foo',
'email': '<EMAIL>',
'enabled': False})
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.password)
self.assertEqual('proj-foo', settings.project_name)
self.assertEqual('<EMAIL>', settings.email)
self.assertFalse(settings.enabled)
class CreateUserSuccessTests(OSComponentTestCase):
"""
Test for the CreateImage class defined in create_image.py
"""
def setUp(self):
"""
Instantiates the CreateImage object that is responsible for downloading
and creating an OS image file within OpenStack
"""
guid = str(uuid.uuid4())[:-19]
guid = self.__class__.__name__ + '-' + guid
self.user_settings = UserSettings(
name=guid + '-name',
password=<PASSWORD> + <PASSWORD>',
roles={'admin': self.os_creds.project_name},
domain_name=self.os_creds.user_domain_name)
self.keystone = keystone_utils.keystone_client(self.os_creds)
# Initialize for cleanup
self.user_creator = None
def tearDown(self):
"""
Cleans the image and downloaded image file
"""
if self.user_creator:
self.user_creator.clean()
def test_create_user(self):
"""
Tests the creation of an OpenStack user.
"""
self.user_creator = OpenStackUser(self.os_creds, self.user_settings)
created_user = self.user_creator.create()
self.assertIsNotNone(created_user)
retrieved_user = keystone_utils.get_user(self.keystone,
self.user_settings.name)
self.assertIsNotNone(retrieved_user)
self.assertEqual(created_user, retrieved_user)
def test_create_user_2x(self):
"""
Tests the creation of an OpenStack user twice to ensure it only creates
one.
"""
self.user_creator = OpenStackUser(self.os_creds, self.user_settings)
created_user = self.user_creator.create()
self.assertIsNotNone(created_user)
retrieved_user = keystone_utils.get_user(self.keystone,
self.user_settings.name)
self.assertIsNotNone(retrieved_user)
self.assertEqual(created_user, retrieved_user)
# Create user for the second time to ensure it is the same
user2 = OpenStackUser(self.os_creds, self.user_settings).create()
self.assertEqual(retrieved_user, user2)
def test_create_delete_user(self):
"""
Tests the creation of an OpenStack user then delete.
"""
# Create Image
self.user_creator = OpenStackUser(self.os_creds, self.user_settings)
created_user = self.user_creator.create()
self.assertIsNotNone(created_user)
keystone_utils.delete_user(self.keystone, created_user)
# Delete user
self.user_creator.clean()
self.assertIsNone(self.user_creator.get_user())
def test_create_admin_user(self):
"""
Tests the creation of an OpenStack user.
"""
self.user_creator = OpenStackUser(self.os_creds, self.user_settings)
created_user = self.user_creator.create()
self.assertIsNotNone(created_user)
retrieved_user = keystone_utils.get_user(self.keystone,
self.user_settings.name)
self.assertIsNotNone(retrieved_user)
self.assertEqual(created_user, retrieved_user)
role = keystone_utils.get_role_by_name(self.keystone, 'admin')
self.assertIsNotNone(role)
os_proj = keystone_utils.get_project(
keystone=self.keystone, project_name=self.os_creds.project_name)
user_roles = keystone_utils.get_roles_by_user(
self.keystone, retrieved_user, os_proj)
self.assertIsNotNone(user_roles)
self.assertEqual(1, len(user_roles))
self.assertEqual(role.id, user_roles[0].id)
| StarcoderdataPython |
1996208 | from marshmallow import fields
from .exceptions import UnsupportedValueError
def handle_length(schema, field, validator, parent_schema):
"""Adds validation logic for ``marshmallow.validate.Length``, setting the
values appropriately for ``fields.List``, ``fields.Nested``, and
``fields.String``.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.Length): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: A, possibly, new JSON Schema that has been post processed and
altered.
Raises:
UnsupportedValueError: Raised if the `field` is something other than
`fields.List`, `fields.Nested`, or `fields.String`
"""
if isinstance(field, fields.String):
minKey = "minLength"
maxKey = "maxLength"
elif isinstance(field, (fields.List, fields.Nested)):
minKey = "minItems"
maxKey = "maxItems"
else:
raise UnsupportedValueError(
"In order to set the Length validator for JSON "
"schema, the field must be either a List, Nested or a String"
)
if validator.min:
schema[minKey] = validator.min
if validator.max:
schema[maxKey] = validator.max
if validator.equal:
schema[minKey] = validator.equal
schema[maxKey] = validator.equal
return schema
def handle_one_of(schema, field, validator, parent_schema):
"""Adds the validation logic for ``marshmallow.validate.OneOf`` by setting
the JSONSchema `enum` property to the allowed choices in the validator.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.OneOf): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: New JSON Schema that has been post processed and
altered.
"""
schema["enum"] = list(validator.choices)
schema["enumNames"] = list(validator.labels)
return schema
def handle_equal(schema, field, validator, parent_schema):
"""Adds the validation logic for ``marshmallow.validate.Equal`` by setting
the JSONSchema `enum` property to value of the validator.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.Equal): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: New JSON Schema that has been post processed and
altered.
"""
# Deliberately using `enum` instead of `const` for increased compatibility.
#
# https://json-schema.org/understanding-json-schema/reference/generic.html#constant-values
# It should be noted that const is merely syntactic sugar for an enum with a single element [...]
schema["enum"] = [validator.comparable]
return schema
def handle_range(schema, field, validator, parent_schema):
"""Adds validation logic for ``marshmallow.validate.Range``, setting the
values appropriately ``fields.Number`` and it's subclasses.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.Range): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: New JSON Schema that has been post processed and
altered.
Raises:
UnsupportedValueError: Raised if the `field` is not an instance of
`fields.Number`.
"""
if not isinstance(field, fields.Number):
raise UnsupportedValueError(
"'Range' validator for non-number fields is not supported"
)
if validator.min is not None:
# marshmallow 2 includes minimum by default
# marshmallow 3 supports "min_inclusive"
min_inclusive = getattr(validator, "min_inclusive", True)
if min_inclusive:
schema["minimum"] = validator.min
else:
schema["exclusiveMinimum"] = validator.min
if validator.max is not None:
# marshmallow 2 includes maximum by default
# marshmallow 3 supports "max_inclusive"
max_inclusive = getattr(validator, "max_inclusive", True)
if max_inclusive:
schema["maximum"] = validator.max
else:
schema["exclusiveMaximum"] = validator.max
return schema
def handle_regexp(schema, field, validator, parent_schema):
"""Adds validation logic for ``marshmallow.validate.Regexp``, setting the
values appropriately ``fields.String`` and it's subclasses.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.Regexp): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: New JSON Schema that has been post processed and
altered.
Raises:
UnsupportedValueError: Raised if the `field` is not an instance of
`fields.String`.
"""
if not isinstance(field, fields.String):
raise UnsupportedValueError(
"'Regexp' validator for non-string fields is not supported"
)
schema["pattern"] = validator.regex.pattern
return schema
| StarcoderdataPython |
9600090 | # -*- coding: utf-8 -*-
from ..errors import JsonRpcBatchSizeError
from ..errors import handle_middleware_exceptions
from ..request import JussiJSONRPCRequest
from ..typedefs import HTTPRequest
from ..validators import limit_broadcast_transaction_request
@handle_middleware_exceptions
async def check_limits(request: HTTPRequest) -> None:
# pylint: disable=no-member
if request.method == 'POST':
jsonrpc_request = request.json
if isinstance(jsonrpc_request, JussiJSONRPCRequest):
limit_broadcast_transaction_request(jsonrpc_request,
limits=request.app.config.limits)
elif isinstance(jsonrpc_request, list):
if len(jsonrpc_request) > request.app.config.jsonrpc_batch_size_limit:
raise JsonRpcBatchSizeError(jrpc_batch_size=len(jsonrpc_request),
jrpc_batch_size_limit=request.app.config.jsonrpc_batch_size_limit)
for single_jsonrpc_request in jsonrpc_request:
limit_broadcast_transaction_request(single_jsonrpc_request,
limits=request.app.config.limits)
| StarcoderdataPython |
4911254 | <reponame>pombredanne/ruffus<filename>ruffus/test/test_follows_mkdir.py
#!/usr/bin/env python
from __future__ import print_function
import unittest
from ruffus import follows, pipeline_run, Pipeline, mkdir
import sys
"""
test_follows_mkdir.py
"""
import os
tempdir = os.path.relpath(os.path.abspath(os.path.splitext(__file__)[0])) + "/"
# add grandparent to search path for testing
grandparent_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, grandparent_dir)
# module name = script name without extension
module_name = os.path.splitext(os.path.basename(__file__))[0]
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Tasks
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
directories = [os.path.abspath(tempdir + 'a'), tempdir + 'b']
@follows(mkdir(tempdir), mkdir(directories), mkdir(tempdir + 'c'), mkdir(tempdir + 'd', tempdir + 'e'), mkdir(tempdir + 'e'))
def task_which_makes_directories():
pass
class Test_task_mkdir(unittest.TestCase):
def setUp(self):
"""
"""
pass
def tearDown(self):
"""
delete directories
"""
for d in 'abcde':
fullpath = os.path.join(os.path.dirname(__file__), tempdir, d)
os.rmdir(fullpath)
os.rmdir(tempdir)
def test_mkdir(self):
pipeline_run(multiprocess=10, verbose=0, pipeline="main")
for d in 'abcde':
fullpath = os.path.join(os.path.dirname(__file__), tempdir, d)
self.assertTrue(os.path.exists(fullpath))
def test_newstyle_mkdir(self):
test_pipeline = Pipeline("test")
test_pipeline.follows(task_which_makes_directories, mkdir(directories), mkdir(
tempdir + 'c'), mkdir(tempdir + 'd', tempdir + 'e'), mkdir(tempdir + 'e'))
test_pipeline.run(multiprocess=10, verbose=0)
for d in 'abcde':
fullpath = os.path.join(os.path.dirname(__file__), tempdir, d)
self.assertTrue(os.path.exists(fullpath))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
381728 | <reponame>mayi140611/mayiutils_n1<gh_stars>0
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@file: read_config_file.py
@time: 2019-08-06 13:41
"""
def read_config_file(fp: str, mode='r', encoding='utf8', prefix='#') -> dict:
"""
读取文本文件,忽略空行,忽略prefix开头的行,返回字典
:param fp: 配置文件路径
:param mode:
:param encoding:
:param prefix:
:return:
"""
with open(fp, mode, encoding=encoding) as f:
ll = f.readlines()
ll = [i for i in ll if all([i.strip(), i.startswith(prefix) == False])]
params = {i.split('=')[0].strip(): i.split('=')[1].strip() for i in ll}
print(params)
return params
| StarcoderdataPython |
45263 | class FormClosedEventArgs(EventArgs):
"""
Provides data for the System.Windows.Forms.Form.FormClosed event.
FormClosedEventArgs(closeReason: CloseReason)
"""
@staticmethod
def __new__(self, closeReason):
""" __new__(cls: type,closeReason: CloseReason) """
pass
CloseReason = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value that indicates why the form was closed.
Get: CloseReason(self: FormClosedEventArgs) -> CloseReason
"""
| StarcoderdataPython |
6465121 | <reponame>iklasky/timemachines<gh_stars>100-1000
from timemachines.skaters.suc.successorinclusion import using_successor
if using_successor:
from successor.skaters.scalarskaters.allscalarskaters import SCALAR_SKATERS
SUCCESSOR_SKATERS = SCALAR_SKATERS
else:
SUCCESSOR_SKATERS = [] | StarcoderdataPython |
4969961 | <reponame>smallwater94/yt-concat<filename>yt_concate/pipeline/steps/download_captions.py<gh_stars>0
# 下載字幕
from youtube_transcript_api import YouTubeTranscriptApi
from yt_concate.pipeline.steps.step import Step
import pickle
class DownloadCaptions(Step):
def process(self, transporter, inputs, utils):
print('下載字幕中')
for yto in transporter:
if utils.caption_file_exists(yto):
print('已經下載過了喔:', yto.v_id)
continue
try:
# 使用 srt 變量和 .get_transcript() 函數獲得的字典列表
srt = YouTubeTranscriptApi.get_transcript(yto.v_id)
except:
print('非英文或無字幕,網址: ', yto.url)
continue
print('寫入中', yto.url)
with open(yto.get_caption_filepath(), 'wb', ) as f:
pickle.dump(srt, f)
return transporter
| StarcoderdataPython |
1748531 | from abaqusConstants import *
class DamageEvolution:
"""The DamageEvolution object specifies material properties to define the evolution of
damage.
Notes
-----
This object can be accessed by:
.. code-block:: python
import material
mdb.models[name].materials[name].ductileDamageInitiation.damageEvolution
mdb.models[name].materials[name].fldDamageInitiation.damageEvolution
mdb.models[name].materials[name].flsdDamageInitiation.damageEvolution
mdb.models[name].materials[name].hashinDamageInitiation.damageEvolution
mdb.models[name].materials[name].johnsonCookDamageInitiation.damageEvolution
mdb.models[name].materials[name].maxeDamageInitiation.damageEvolution
mdb.models[name].materials[name].maxpeDamageInitiation.damageEvolution
mdb.models[name].materials[name].maxpsDamageInitiation.damageEvolution
mdb.models[name].materials[name].maxsDamageInitiation.damageEvolution
mdb.models[name].materials[name].mkDamageInitiation.damageEvolution
mdb.models[name].materials[name].msfldDamageInitiation.damageEvolution
mdb.models[name].materials[name].quadeDamageInitiation.damageEvolution
mdb.models[name].materials[name].quadsDamageInitiation.damageEvolution
mdb.models[name].materials[name].shearDamageInitiation.damageEvolution
import odbMaterial
session.odbs[name].materials[name].ductileDamageInitiation.damageEvolution
session.odbs[name].materials[name].fldDamageInitiation.damageEvolution
session.odbs[name].materials[name].flsdDamageInitiation.damageEvolution
session.odbs[name].materials[name].hashinDamageInitiation.damageEvolution
session.odbs[name].materials[name].johnsonCookDamageInitiation.damageEvolution
session.odbs[name].materials[name].maxeDamageInitiation.damageEvolution
session.odbs[name].materials[name].maxpeDamageInitiation.damageEvolution
session.odbs[name].materials[name].maxpsDamageInitiation.damageEvolution
session.odbs[name].materials[name].maxsDamageInitiation.damageEvolution
session.odbs[name].materials[name].mkDamageInitiation.damageEvolution
session.odbs[name].materials[name].msfldDamageInitiation.damageEvolution
session.odbs[name].materials[name].quadeDamageInitiation.damageEvolution
session.odbs[name].materials[name].quadsDamageInitiation.damageEvolution
session.odbs[name].materials[name].shearDamageInitiation.damageEvolution
The table data for this object are:
- If *type*=DISPLACEMENT, and *softening*=LINEAR, and *mixedModeBehavior*=MODE_INDEPENDENT, the table data specify the following:
- Equivalent total or Plastic displacement at failure, measured from the time of damage initiation.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=ENERGY, and *softening*=LINEAR, and *mixedModeBehavior*=MODE_INDEPENDENT, the table data specify the following:
- Fracture energy.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=DISPLACEMENT, and *softening*=LINEAR, and *mixedModeBehavior*=TABULAR, the table data specify the following:
- Total displacement at failure, measured from the time of damage initiation.
- Appropriate mode mix ratio.
- Appropriate mode mix ratio (if relevant, for three-dimensional problems with anisotropic shear behavior).
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=ENERGY, and *softening*=LINEAR, and *mixedModeBehavior*=TABULAR, the table data specify the following:
- Fracture energy.
- Appropriate mode mix ratio.
- Appropriate mode mix ratio (if relevant, for three-dimensional problems with anisotropic shear behavior).
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=DISPLACEMENT, and *softening*=EXPONENTIAL, and *mixedModeBehavior*=MODE_INDEPENDENT, the table data specify the following:
- Equivalent total or Plastic displacement at failure, measured from the time of damage initiation.
- Exponential law parameter.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=ENERGY, and *softening*=EXPONENTIAL, and *mixedModeBehavior*=MODE_INDEPENDENT, the table data specify the following:
- Fracture energy.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=DISPLACEMENT, and *softening*=EXPONENTIAL, and *mixedModeBehavior*=TABULAR, the table data specify the following:
- Total displacement at failure, measured from the time of damage initiation.
- Exponential law parameter.
- Appropriate mode mix ratio.
- Appropriate mode mix ratio (if relevant, for three-dimensional problems with anisotropic shear behavior).
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=ENERGY, and *softening*=EXPONENTIAL, and *mixedModeBehavior*=TABULAR, the table data specify the following:
- Fracture energy.
- Appropriate mode mix ratio.
- Appropriate mode mix ratio (if relevant, for three-dimensional problems with anisotropic shear behavior).
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=DISPLACEMENT, and *softening*=TABULAR, and *mixedModeBehavior*=MODE_INDEPENDENT, the table data specify the following:
- Damage variable.
- Equivalent total or Plastic displacement, measured from the time of damage initiation.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=DISPLACEMENT, and *softening*=TABULAR, and *mixedModeBehavior*=TABULAR, the table data specify the following:
- Damage variable.
- Equivalent total or Plastic displacement, measured from the time of damage initiation.
- Appropriate mode mix ratio.
- Appropriate mode mix ratio (if relevant, for three-dimensional problems with anisotropic shear behavior).
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=ENERGY, and *softening*=LINEAR or EXPONENTIAL, and *mixedModeBehavior*=POWER_LAW or BK, the table data specify the following:
- Normal mode fracture energy.
- Shear mode fracture energy for failure in the first shear direction.
- Shear mode fracture energy for failure in the second shear direction.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=ENERGY, *softening*=LINEAR and constructor for [DamageInitiation](https://help.3ds.com/2022/english/DSSIMULIA_Established/SIMACAEKERRefMap/simaker-c-damageinitiationpyc.htm?ContextScope=all)=HashinDamageInitiation the table data specify the following:
- Fiber tensile fracture energy.
- Fiber compressive fracture energy.
- Matrix tensile fracture energy.
- Matrix compressive fracture energy.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
The corresponding analysis keywords are:
- DAMAGE EVOLUTION
"""
def __init__(self, type: SymbolicConstant, table: tuple, degradation: SymbolicConstant = MAXIMUM,
temperatureDependency: Boolean = OFF, dependencies: int = 0,
mixedModeBehavior: SymbolicConstant = MODE_INDEPENDENT,
modeMixRatio: SymbolicConstant = ENERGY, power: float = None,
softening: SymbolicConstant = LINEAR):
"""This method creates a DamageEvolution object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].materials[name].ductileDamageInitiation\
- .DamageEvolution
mdb.models[name].materials[name].fldDamageInitiation.DamageEvolution
mdb.models[name].materials[name].flsdDamageInitiation.DamageEvolution
mdb.models[name].materials[name].hashinDamageInitiation\
- .DamageEvolution
mdb.models[name].materials[name].johnsonCookDamageInitiation\
- .DamageEvolution
mdb.models[name].materials[name].maxeDamageInitiation.DamageEvolution
mdb.models[name].materials[name].maxpeDamageInitiation.DamageEvolution
mdb.models[name].materials[name].maxpsDamageInitiation.DamageEvolution
mdb.models[name].materials[name].maxsDamageInitiation.DamageEvolution
mdb.models[name].materials[name].mkDamageInitiation.DamageEvolution
mdb.models[name].materials[name].msfldDamageInitiation.DamageEvolution
mdb.models[name].materials[name].quadeDamageInitiation.DamageEvolution
mdb.models[name].materials[name].quadsDamageInitiation.DamageEvolution
mdb.models[name].materials[name].shearDamageInitiation.DamageEvolution
session.odbs[name].materials[name].ductileDamageInitiation\
- .DamageEvolution
session.odbs[name].materials[name].fldDamageInitiation.DamageEvolution
session.odbs[name].materials[name].flsdDamageInitiation\
- .DamageEvolution
session.odbs[name].materials[name].hashinDamageInitiation\
- .DamageEvolution
session.odbs[name].materials[name].johnsonCookDamageInitiation\
- .DamageEvolution
session.odbs[name].materials[name].maxeDamageInitiation\
- .DamageEvolution
session.odbs[name].materials[name].maxpeDamageInitiation\
- .DamageEvolution
session.odbs[name].materials[name].maxpsDamageInitiation\
- .DamageEvolution
session.odbs[name].materials[name].maxsDamageInitiation\
- .DamageEvolution
session.odbs[name].materials[name].mkDamageInitiation.DamageEvolution
session.odbs[name].materials[name].msfldDamageInitiation\
- .DamageEvolution
session.odbs[name].materials[name].quadeDamageInitiation\
- .DamageEvolution
session.odbs[name].materials[name].quadsDamageInitiation\
- .DamageEvolution
session.odbs[name].materials[name].shearDamageInitiation\
- .DamageEvolution
Parameters
----------
type
A SymbolicConstant specifying the type of damage evolution. Possible values are
DISPLACEMENT and ENERGY.
table
A sequence of sequences of Floats specifying the items described below.
degradation
A SymbolicConstant specifying the degradation. Possible values are MAXIMUM and
MULTIPLICATIVE. The default value is MAXIMUM.
temperatureDependency
A Boolean specifying whether the data depend on temperature. The default value is OFF.
dependencies
An Int specifying the number of field variable dependencies. The default value is 0.
mixedModeBehavior
A SymbolicConstant specifying the mixed mode behavior. Possible values are
MODE_INDEPENDENT, TABULAR, POWER_LAW, and BK. The default value is MODE_INDEPENDENT.
modeMixRatio
A SymbolicConstant specifying the mode mix ratio. Possible values are ENERGY and
TRACTION. The default value is ENERGY.
power
None or a Float specifying the exponent in the power law or the Benzeggagh-Kenane
criterion that defines the variation of fracture energy with mode mix for cohesive
elements. The default value is None.
softening
A SymbolicConstant specifying the softening. Possible values are LINEAR, EXPONENTIAL,
and TABULAR. The default value is LINEAR.
Returns
-------
A DamageEvolution object.
Raises
------
RangeError
"""
pass
def setValues(self):
"""This method modifies the DamageEvolution object.
Raises
------
RangeError
"""
pass
| StarcoderdataPython |
3544309 | import pysmurf
import numpy as np
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import os
import seaborn as sns
import glob
S = pysmurf.SmurfControl(make_logfile=False,
epics_root='test_epics',
cfg_file='/usr/local/controls/Applications/'+\
'smurf/pysmurf/pysmurf/cfg_files/'+\
'experiment_fp28_smurfsrv04.cfg',
no_dir=True)
datadir = '/data/smurf_data/20190216/1550347814/outputs'
datafiles = glob.glob(os.path.join(datadir, '*.dat'))
#datafile = os.path.join(datadir, '1550348586.dat')
for datafile in datafiles:
t, d, m = S.read_stream_data(datafile)
# Extract useful values
dirname, filename = os.path.split(datafile)
timestamp = filename.split('.')[0]
# Channels with IV curves
ivch = np.array([16,32,64,165,171,179,197,203,213,222,256,389,395,398,415,421,427,447])
d = d[m[2][ivch]]
# Take PCA
pca = PCA(svd_solver='full')
pca.fit(d.T)
d2 = pca.transform(d.T).T
coeff = pca.components_.T
fig, ax = plt.subplots(6,3, figsize=(12,12), sharex=True)
for i in np.arange(18):
y = i // 3
x = i % 3
ax[y,x].plot(d2[i])
ax[y,x].set_title('Mode {}'.format(i))
if y == 5:
ax[y,x].set_xlabel('Sample')
if x == 0:
ax[y,x].set_ylabel('Amp')
fig.suptitle(timestamp)
plt.savefig(os.path.join(dirname, '{}_modes.png'.format(timestamp)),
bbox_inches='tight')
plt.close()
fig, ax = plt.subplots(2,1, figsize=(5,7));
sns.heatmap(coeff, cmap='RdBu', vmin=-1, vmax=1, ax=ax[0])
ax[0].set_xlabel('Mode')
ax[0].set_ylabel('Channel')
ax[1].plot(pca.explained_variance_ratio_, '.')
ax[1].set_xlabel('Channel')
ax[1].set_ylabel('Variance ratio')
fig.suptitle(timestamp)
plt.savefig(os.path.join(dirname, '{}_amplitudes.png'.format(timestamp)),
bbox_inches='tight')
plt.close()
#cm = plt.get_cmap('viridis')
#n_mode = 5
#for i, ch in enumerate(ivch):
# plt.figure()
# plt.plot(d[i]-np.mean(d[i]), 'k')
# plt.title(ch)
# for j in np.arange(n_mode):
# plt.plot(coeff[j,i]*d2[i], color=cm(j/n_mode), alpha=.5,
# label='Mode {}'.format(j))
# plt.legend()
# plt.savefig(os.path.join(dirname, '{}_ch{:03}.png'.format(timestamp, ch)),
# bbox_inches='tight')
# plt.close()
| StarcoderdataPython |
8008664 | <gh_stars>10-100
""" Process images with trained model """
from __future__ import print_function
import argparse
import os
from utilities.input_correction import Correction
__author__ = '<NAME>'
CHECKPOINT_PATH = './checkpoints/'
OUTPUT_PATH = './results/'
BATCH_SIZE = 128
CHECKPOINT_NAME = 'checkpoint.best.hdf5'
def main():
""" parse parameters from command line and start processing images """
parser = argparse.ArgumentParser()
add_arg = parser.add_argument
add_arg('-i', dest='input', type=str, required=True, help='directory of images')
add_arg('-s', dest='shape', type=int, required=True, nargs='+', help='width, height, channel of image')
add_arg('-o', dest='output', type=str, default=OUTPUT_PATH, help='directory to store processed images, default %s' % OUTPUT_PATH)
add_arg('-b', dest='batch', type=int, default=BATCH_SIZE, help='batch size, default %s' % BATCH_SIZE)
add_arg('--checkpoint-path', dest='path', type=str, default=CHECKPOINT_PATH, help='path to save checkpoint files, default %s' % CHECKPOINT_PATH)
add_arg('--checkpoint-name', dest='name', type=str, default=CHECKPOINT_NAME, help='the name of checkpoint file, default %s' % CHECKPOINT_NAME)
add_arg('--cpu-only', dest='cpu', action='store_true', help='whether use cpu only or not, default False')
args = parser.parse_args()
if args.cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = ''
assert len(args.shape) == 3
corr = Correction().correct
params = {
'img_shape': tuple(args.shape),
'img_dir': corr(args.input),
'res_dir': corr(args.output),
'checkpoint_path': corr(args.path),
'checkpoint_name': args.name,
'batch_size': args.batch
}
assert os.path.exists(params['checkpoint_path'])
print('Image directory: %s' % params['img_dir'])
print('Output directory: %s' % params['res_dir'])
print('Checkpoint path: %s' % params['checkpoint_path'])
print('Checkpoint name: %s' % params['checkpoint_name'])
print('Shape of image: %s' %(params['img_shape'],))
print('Batch size: %s' % params['batch_size'])
print('Running on %s' % ('CPU' if args.cpu else 'GPU'))
if not os.path.exists(params['res_dir']):
os.makedirs(params['res_dir'])
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from enhancer import Enhancer
enhancer = Enhancer(**params)
enhancer.load_data(process=True)
enhancer.load_model()
try:
enhancer.process()
except (KeyboardInterrupt, SystemExit):
print('Abort!')
if __name__ == '__main__':
main()
| StarcoderdataPython |
4960265 | import pytest
from .fixtures import *
@pytest.mark.parametrize(
"original_df",
[
make_table(cols=1, astype="pandas"),
make_table(sorted_datetime_index, cols=1, astype="pandas"),
make_table(sorted_string_index, cols=1, astype="pandas"),
],
ids=["int index", "datetime index", "string index"],
)
def test_pandas_series_io(original_df, store):
# Arrange
original_df = make_table(sorted_string_index, cols=1, astype="pandas")
original_df = original_df.squeeze()
partition_size = get_partition_size(original_df,
NUMBER_OF_PARTITIONS)
store.write_table(TABLE_NAME,
original_df,
partition_size=partition_size)
# Act
df = store.read_pandas(TABLE_NAME)
# Assert
assert df.equals(original_df)
| StarcoderdataPython |
167944 | from django.db import models
# Create your models here.
class QuesModel(models.Model):
question = models.CharField(max_length=200, null=True)
op1 = models.CharField(max_length=200, null=True)
op2 = models.CharField(max_length=200, null=True)
op3 = models.CharField(max_length=200, null=True)
op4 = models.CharField(max_length=200, null=True)
ans = models.CharField(max_length=200, null=True)
def __str__(self):
return self.question
| StarcoderdataPython |
5076763 | from numbers import Number
class Coordinates2D(list):
def __add__(self, other):
# print('--- ADD ---')
return self.__class__([self[0] + other[0], self[1] + other[1]])
def __iadd__(self, other):
# print('--- IADD ---')
return self.__class__([self[0] + other[0], self[1] + other[1]])
def __mul__(self, other):
# print('--- MUL ---')
if not isinstance(other, Number):
return super().__mul__(other)
return self.__class__([self[0] * other, self[1] * other])
def __rmul__(self, other):
# print('--- RMUL ---')
if not isinstance(other, Number):
return super().__rmul__(other)
return self.__class__([self[0] * other, self[1] * other])
def set_value(self, value):
assert len(value) == 2
self[0], self[1] = value
@property
def x(self):
return self[0]
@property
def y(self):
return self[1]
@classmethod
def from_instruction(cls, instruction):
"""Only accepts compass directions"""
direction, value = instruction[0], int(instruction[1:])
if direction == 'N':
coord = cls([0, value])
elif direction == 'S':
coord = cls([0, -value])
elif direction == 'E':
coord = cls([value, 0])
elif direction == 'W':
coord = cls([-value, 0])
else:
raise ValueError()
return coord
| StarcoderdataPython |
1951860 | from django import template
from django.template.loader import render_to_string
from django.core.urlresolvers import reverse
import time
register = template.Library()
@register.simple_tag
def get_monitoring_table(data):
context = {}
context["data"] = data
context["empty_data_holder"] = "<b></b>"
return render_to_string("custom/shared/monitoring_table.html", context)
return ''
| StarcoderdataPython |
5028558 | """
desispec.skymag
============
Utility function to compute the sky magnitude per arcmin2 based from the measured sky model
of an exposure and a static model of the instrument throughput.
"""
import os,sys
import numpy as np
import fitsio
from astropy import units, constants
from desiutil.log import get_logger
from speclite import filters
from desispec.io import read_sky,findfile,specprod_root,read_average_flux_calibration
from desispec.calibfinder import findcalibfile
average_calibrations = dict()
decam_filters = None
# only read once per process
def _get_average_calibration(filename) :
"""
Use a dictionnary referenced by a global variable
to keep a copy of the calibration
instead of reading it at each function call.
"""
global average_calibrations
if not filename in average_calibrations :
average_calibrations[filename] = read_average_flux_calibration(filename)
return average_calibrations[filename]
# only read once per process
def _get_decam_filters() :
global decam_filters
if decam_filters is None :
log = get_logger()
log.info("read decam filters")
decam_filters = filters.load_filters("decam2014-g", "decam2014-r", "decam2014-z")
return decam_filters
# AR grz-band sky mag / arcsec2 from sky-....fits files
# AR now using work-in-progress throughput
# AR still provides a better agreement with GFAs than previous method
def compute_skymag(night, expid, specprod_dir=None):
"""
Computes the sky magnitude for a given exposure. Uses the sky model
and apply a fixed calibration for which the fiber aperture loss
is well understood.
Args:
night: int, YYYYMMDD
expid: int, exposure id
specprod_dir: str, optional, specify the production directory.
default is $DESI_SPECTRO_REDUX/$SPECPROD
returns (gmag,rmag,zmag) AB magnitudes per arcsec2, tuple with 3 float values
"""
log=get_logger()
# AR/DK DESI spectra wavelengths
wmin, wmax, wdelta = 3600, 9824, 0.8
fullwave = np.round(np.arange(wmin, wmax + wdelta, wdelta), 1)
cslice = {"b": slice(0, 2751), "r": slice(2700, 5026), "z": slice(4900, 7781)}
# AR (wmin,wmax) to "stich" all three cameras
wstich = {"b": (wmin, 5780), "r": (5780, 7570), "z": (7570, 9824)}
if specprod_dir is None :
specprod_dir = specprod_root()
# AR looking for a petal with brz sky and ivar>0
sky_spectra = []
for spec in range(10) :
sky = np.zeros(fullwave.shape)
ok = True
for camera in ["b","r","z"] :
camspec="{}{}".format(camera,spec)
filename = findfile("sky",night=night,expid=expid,camera=camspec,specprod_dir=specprod_dir)
if not os.path.isfile(filename) :
log.warning("skipping {}-{:08d}-{} : missing {}".format(night,expid,spec,filename))
ok = False
break
fiber=0
skyivar=fitsio.read(filename,"IVAR")[fiber]
if np.all(skyivar==0) :
log.warning("skipping {}-{:08d}-{} : ivar=0 for {}".format(night,expid,spec,filename))
ok=False
break
skyflux=fitsio.read(filename,0)[fiber]
skywave=fitsio.read(filename,"WAVELENGTH")
header=fitsio.read_header(filename)
exptime=header["EXPTIME"]
# for now we use a fixed calibration as used in DESI-6043 for which we know what was the fiber aperture loss
cal_filename="{}/spec/fluxcalib/fluxcalibnight-{}-20201216.fits".format(os.environ["DESI_SPECTRO_CALIB"],camera)
# apply the correction from
fiber_acceptance_for_point_sources = 0.60 # see DESI-6043
mean_fiber_diameter_arcsec = 1.52 # see DESI-6043
fiber_area_arcsec = np.pi*(mean_fiber_diameter_arcsec/2)**2
acal = _get_average_calibration(cal_filename)
flux = np.interp(fullwave[cslice[camera]], skywave, skyflux)
sky[cslice[camera]] = flux / exptime / acal.value() * fiber_acceptance_for_point_sources / fiber_area_arcsec * 1e-17 # ergs/s/cm2/A/arcsec2
if not ok : continue # to next spectrograph
sky_spectra.append(sky)
if len(sky_spectra)==0 : return (99.,99.,99.)
if len(sky_spectra)==1 :
sky = sky_spectra[0]
else :
sky = np.mean(np.array(sky_spectra),axis=0) # mean over petals/spectrographs
# AR integrate over the DECam grz-bands
filts = _get_decam_filters()
# AR zero-padding spectrum so that it covers the DECam grz passbands
# AR looping through filters while waiting issue to be solved (https://github.com/desihub/speclite/issues/64)
sky_pad, fullwave_pad = sky.copy(), fullwave.copy()
for i in range(len(filts)):
sky_pad, fullwave_pad = filts[i].pad_spectrum(sky_pad, fullwave_pad, method="zero")
mags = filts.get_ab_magnitudes(sky_pad * units.erg / (units.cm ** 2 * units.s * units.angstrom),fullwave_pad * units.angstrom).as_array()[0]
return mags # AB mags for flux per arcsec2
| StarcoderdataPython |
9755292 | <filename>system_desing.py
# -- --------------------------------------------------------------------------------------------------- -- #
# -- project: A python project for algorithmic trading in FXCM -- #
# -- --------------------------------------------------------------------------------------------------- -- #
# -- script: requirements.txt : text file with the required libraries for the project -- #
# -- author: YOUR GITHUB USER NAME -- #
# -- license: MIT License -- #
# -- --------------------------------------------------------------------------------------------------- -- #
# -- Template repository: https://github.com/IFFranciscoME/trading-project -- #
# -- --------------------------------------------------------------------------------------------------- -- #
from cmath import nan
import pandas as pd
import numpy as np
#import data as dt
import functions as fn
import ta
#dt.con.is_connected()
#Criterio 1
#mid OHLC de 30 min de EUR/USD
#Agosto 2021 a Ene 2022
#data_ohlc = dt.fxcm_ohlc('BTC/USD', 'H4' , '2018-01-31 00:00:00', '2021-12-31 23:59:59')
data_ohlc = pd.read_csv("BTCUSD.csv", index_col="Date")
# Visualizar
#len(data_ohlc)
#data_ohlc.head(5)
#data_ohlc.tail(5)
#descripcion
#data_ohlc.describe()
#data_ohlc.info()
#separar conjuntos de entrenamiento, validacion y prueba
train_ohlc = data_ohlc.loc['31/01/2018' :'31/01/2020']
#train_ohlc.head(5)
#train_ohlc.tail(5)
val_ohlc = data_ohlc.loc['01/02/2021' : '31/03/2021']
#val_ohlc.head(5)
#val_ohlc.tail(5)
#val_ohlc.describe()
test_ohlc = data_ohlc.loc['01/04/2021' : '31/12/2021']
#test_ohlc.head(5)
#test_ohlc.tail(5)
#test_ohlc.describe()
def proceso_completo(cierre, open, comision,\
short_length, long_length, take_profit, stop_loss, capital):
cierre = cierre
open = open
comision = comision
short_length = short_length
long_length = long_length
take_profit = take_profit
stop_loss = stop_loss
capital = capital
short_ema = fn.ema(serie = cierre, length = short_length)
long_ema = fn.ema(serie = cierre, length = long_length)
señales = fn.signals(short_ema=short_ema, long_ema=long_ema, serie=cierre)
señales_index = fn.signal_index(lista = señales)
operaciones = fn.operations(lista = señales_index, precios=open)
rend_operacion = fn.open_price_profit(lista = operaciones)
rendimiento = fn.profit(lista = rend_operacion, comision = comision,\
take_profit = take_profit, stop_loss = stop_loss)
flujo = fn.capital_flow(lista = rendimiento, capital = capital)
resultados = [señales, señales_index, operaciones, rend_operacion, rendimiento, flujo]
return resultados
| StarcoderdataPython |
3361601 |
import luigi
import subprocess
from os.path import join, dirname, basename
from ..utils.cap_task import CapTask
from ..config import PipelineConfig
from ..utils.conda import CondaPackage
from ..databases.taxonomic_db import TaxonomicDB
from ..preprocessing.clean_reads import CleanReads
class KrakenUniq(CapTask):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pkg = CondaPackage(
package="krakenuniq==0.5.8",
executable="krakenuniq",
channel="bioconda",
config_filename=self.config_filename,
)
self.config = PipelineConfig(self.config_filename)
self.out_dir = self.config.out_dir
self.db = TaxonomicDB(config_filename=self.config_filename)
self.reads = CleanReads(
sample_name=self.sample_name,
pe1=self.pe1,
pe2=self.pe2,
config_filename=self.config_filename
)
@classmethod
def _module_name(cls):
return 'krakenuniq'
def requires(self):
return self.pkg, self.db, self.reads
@classmethod
def version(cls):
return 'v0.1.0'
@classmethod
def dependencies(cls):
return ['krakenuniq==0.5.8', TaxonomicDB, CleanReads]
def output(self):
return {
'report': self.get_target('report', 'tsv'),
'read_assignments': self.get_target('read_assignments', 'tsv'),
}
def _run(self):
report_path = self.output()['report'].path
read_assignments = self.output['read_assignments'].path
cmd = (
f'{self.pkg.bin} '
f'--report-file {report_path} '
'--gzip-compressed '
'--fastq-input '
f'--threads {self.cores} '
'--paired '
'--preload '
f'--db {self.db.krakenuniq_db} '
f'{self.reads.output()["clean_reads"][0].path} '
f'{self.reads.output()["clean_reads"][1].path} '
f'> {read_assignments}'
)
self.run_cmd(cmd)
| StarcoderdataPython |
1871174 | """
hnn_geppetto.py
Initialise Geppetto, this class contains methods to connect the application with the Geppetto based UI
"""
import logging
import os
import sys
from contextlib import redirect_stdout
from jupyter_geppetto import synchronization
from . import nwb_data_manager
from .nwb_model_interpreter.nwb_reader import NWBReader
class NWBGeppetto(): # pytest: no cover
def __init__(self):
# use to decide whether or not to update the canvas in the front end
logging.debug("Initializing the original model")
synchronization.context = { 'nwb_geppetto': self }
def get_data(self):
with redirect_stdout(sys.__stdout__):
return {
"metadata": {},
"isDocker": os.path.isfile('/.dockerenv'),
"currentFolder": os.getcwd()
}
def set_nwb_file(self, nwbfilename):
main = __import__('__main__')
import pynwb
main.nwbfilename = nwb_data_manager.get_file_path(nwbfilename)
main.pynwb = pynwb
self.nwb_reader = NWBReader(main.nwbfilename)
main.nwb_reader = self.nwb_reader
main.nwbfile = self.nwb_reader.nwbfile
def main(nwbfilename): # pytest: no cover
logging.info("Initialising NWB UI")
geppetto = NWBGeppetto()
geppetto.set_nwb_file(nwbfilename)
main = __import__('__main__')
from nwbwidgets import nwb2widget
main.nwb2widget = nwb2widget
main.show = lambda: nwb2widget(main.nwbfile)
logging.info("NWB UI initialised") | StarcoderdataPython |
9622338 | <gh_stars>1-10
#!/usr/bin/env python3
import os
import sys
import math
import pdb
from local import *
import time
import torch
import numpy as np
import torch.nn as nn
import torch.nn.init as init
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import torchbiomed.transforms as biotransforms
import torchbiomed.loss as bioloss
import torchbiomed.utils as utils
import SimpleITK as sitk
import shutil
import setproctitle
import vnet
import DataManager as DM
import promise12
import make_graph
from functools import reduce
import operator
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv3d') != -1:
nn.init.kaiming_normal_(m.weight)
m.bias.data.zero_()
def datestr():
now = time.gmtime()
return '{}{:02}{:02}_{:02}{:02}'.format(now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min)
def save_checkpoint(state, path, prefix, filename='checkpoint.pth.tar'):
prefix_save = os.path.join(path, prefix)
name = prefix_save + '_' + filename
torch.save(state, name)
def inference(params, args, loader, model):
src = params['ModelParams']['dirInfer']
dst = params['ModelParams']['dirResult']
model.eval()
# assume single GPU / batch size 1
for batch_idx, data in enumerate(loader):
data, id = data
id = id[0]
itk_img = sitk.ReadImage(os.path.join(src, id))
origin = np.array(list(reversed(itk_img.GetOrigin())))
spacing = np.array(list(reversed(itk_img.GetSpacing())))
# pdb.set_trace()
_, _, z, y, x = data.shape # need to subset shape of 3-d. by Chao.
# convert names to batch tensor
if args.cuda:
data.pin_memory()
data = data.cuda()
with torch.no_grad():
data = Variable(data)
output = model(data)
_, output = output.max(1)
output = output.view((x, y, z))
# pdb.set_trace()
output = output.cpu()
print("save {}".format(id))
utils.save_updated_image(output, os.path.join(dst, id + "_predicted.mhd"), origin, spacing)
# performing post-train test:
# train.py --resume <model checkpoint> --i <input directory (*.mhd)> --save <output directory>
def noop(x):
return x
def main(params, args):
best_prec1 = 100. # accuracy? by Chao
args.cuda = not args.no_cuda and torch.cuda.is_available()
resultDir = 'results/vnet.base.{}'.format(datestr())
nll = True
if args.dice:
nll = False
weight_decay = args.weight_decay
setproctitle.setproctitle(resultDir)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
print("build vnet")
model = vnet.VNet(elu=False, nll=nll)
batch_size = args.batchSz
torch.cuda.set_device(0) # why do I have to add this line? It seems the below line is useless to apply GPU devices. By Chao.
model = nn.parallel.DataParallel(model, device_ids=[0])
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.evaluate, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
else:
model.apply(weights_init)
if nll:
train = train_nll
test = test_nll
else:
train = train_dice
test = test_dice
print(' + Number of params: {}'.format(
sum([p.data.nelement() for p in model.parameters()])))
if args.cuda:
model = model.cuda()
if os.path.exists(resultDir):
shutil.rmtree(resultDir)
os.makedirs(resultDir, exist_ok=True)
# transform
trainTransform = transforms.Compose([
transforms.ToTensor()
])
testTransform = transforms.Compose([
transforms.ToTensor()
])
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
print("\nloading training set")
dataManagerTrain = DM.DataManager(params['ModelParams']['dirTrain'],
params['ModelParams']['dirResult'],
params['DataManagerParams'])
dataManagerTrain.loadTrainingData() # required
numpyImages = dataManagerTrain.getNumpyImages()
numpyGT = dataManagerTrain.getNumpyGT()
trainSet = promise12.PROMISE12(mode='train', images=numpyImages, GT=numpyGT, transform=trainTransform)
trainLoader = DataLoader(trainSet, batch_size=batch_size, shuffle=True, **kwargs)
print("\nloading test set")
dataManagerTest = DM.DataManager(params['ModelParams']['dirTest'],
params['ModelParams']['dirResult'],
params['DataManagerParams'])
dataManagerTest.loadTestingData() # required
numpyImages = dataManagerTest.getNumpyImages()
numpyGT = dataManagerTest.getNumpyGT()
testSet = promise12.PROMISE12(mode='test', images=numpyImages, GT=numpyGT, transform=testTransform)
testLoader = DataLoader(testSet, batch_size=batch_size, shuffle=True, **kwargs)
if args.opt == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=1e-1,
momentum=0.99, weight_decay=weight_decay)
elif args.opt == 'adam':
optimizer = optim.Adam(model.parameters(), weight_decay=weight_decay)
elif args.opt == 'rmsprop':
optimizer = optim.RMSprop(model.parameters(), weight_decay=weight_decay)
trainF = open(os.path.join(resultDir, 'train.csv'), 'w')
testF = open(os.path.join(resultDir, 'test.csv'), 'w')
for epoch in range(1, args.nEpochs + 1):
adjust_opt(args.opt, optimizer, epoch)
train(args, epoch, model, trainLoader, optimizer, trainF)
testDice = test(args, epoch, model, testLoader, optimizer, testF) # err is accuracy??? by Chao.
save_checkpoint({'epoch': epoch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1}, path=resultDir, prefix="vnet")
os.system('./plot.py {} {} &'.format(len(trainLoader), resultDir))
trainF.close()
testF.close()
# inference, i.e. output predicted mask for test data in .mhd
if params['ModelParams']['dirInfer'] != '':
print("loading inference data")
dataManagerInfer = DM.DataManager(params['ModelParams']['dirInfer'],
params['ModelParams']['dirResult'],
params['DataManagerParams'])
dataManagerInfer.loadInferData() # required. Create .loadInferData??? by Chao.
numpyImages = dataManagerInfer.getNumpyImages()
inferSet = promise12.PROMISE12(mode='infer', images=numpyImages, GT=None, transform=testTransform)
inferLoader = DataLoader(inferSet, batch_size=batch_size, shuffle=True, **kwargs)
inference(params, args, inferLoader, model)
# def train_nll(args, epoch, model, trainLoader, optimizer, trainF):
# model.train()
# nProcessed = 0
# nTrain = len(trainLoader.dataset)
# for batch_idx, output in enumerate(trainLoader):
# data, target, id = output
# if args.cuda:
# data, target = data.cuda(), target.cuda()
# data, target = Variable(data), Variable(target)
# optimizer.zero_grad()
# output = model(data)
# target = target.view(target.numel())
# loss = F.nll_loss(output, target)
# dice_loss = bioloss.dice_error(output, target)
# # make_graph.save('/tmp/t.dot', loss.creator); assert(False)
# loss.backward()
# optimizer.step()
# nProcessed += len(data)
# pred = output.data.max(1)[1] # get the index of the max log-probability
# incorrect = pred.ne(target.data).cpu().sum()
# err = 100.*incorrect/target.numel()
# partialEpoch = epoch + batch_idx / len(trainLoader) - 1
# print('Train Epoch: {:.2f} [{}/{} ({:.0f}%)]\tLoss: {:.4f}\tError: {:.3f}\t Dice: {:.6f}'.format(
# partialEpoch, nProcessed, nTrain, 100. * batch_idx / len(trainLoader),
# loss.data[0], err, dice_loss))
#
# trainF.write('{},{},{}\n'.format(partialEpoch, loss.data[0], err))
# trainF.flush()
#
# def test_nll(args, epoch, model, testLoader, optimizer, testF):
# model.eval()
# test_loss = 0
# dice_loss = 0
# incorrect = 0
# numel = 0
# for data, target in testLoader:
# if args.cuda:
# data, target = data.cuda(), target.cuda()
# data, target = Variable(data, volatile=True), Variable(target)
# target = target.view(target.numel())
# numel += target.numel()
# output = model(data)
# test_loss += F.nll_loss(output, target, weight=weights).data[0]
# dice_loss += bioloss.dice_error(output, target)
# pred = output.data.max(1)[1] # get the index of the max log-probability
# incorrect += pred.ne(target.data).cpu().sum()
#
# test_loss /= len(testLoader) # loss function already averages over batch size
# dice_loss /= len(testLoader)
# err = 100.*incorrect/numel
# print('\nTest set: Average loss: {:.4f}, Error: {}/{} ({:.3f}%) Dice: {:.6f}\n'.format(
# test_loss, incorrect, numel, err, dice_loss))
#
# testF.write('{},{},{}\n'.format(epoch, test_loss, err))
# testF.flush()
# return err
def train_dice(args, epoch, model, trainLoader, optimizer, trainF):
model.train()
nProcessed = 0
nTrain = len(trainLoader.dataset)
for batch_idx, output in enumerate(trainLoader):
data, target, id = output
# print("training with {}".format(id[0]))
target = target[0,:,:,:].view(-1) # right? added by Chao.
if args.cuda:
data, target = data.cuda(), target.cuda()
data = Variable(data)
target = Variable(target)
# data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
# pdb.set_trace()
loss = bioloss.dice_loss(output, target)
# make_graph.save('/tmp/t.dot', loss.creator); assert(False)
loss.backward()
optimizer.step()
nProcessed += len(data)
err = 100.*(1. - loss.data[0]) # loss.data[0] is dice coefficient? By Chao.
# partialEpoch = epoch + batch_idx / len(trainLoader) - 1
# print('Train Epoch: {:.2f} [{}/{} ({:.0f}%)]\tLoss: {:.8f}\tError: {:.8f}'.format(
# partialEpoch, nProcessed, nTrain, 100. * batch_idx / len(trainLoader),
# loss.data[0], err))
print('\nFor trainning: Epoch: {} \tdice_coefficient: {:.4f}\tError: {:.4f}\n'.format(
epoch, loss.data[0], err))
# trainF.write('{},{},{}\n'.format(partialEpoch, loss.data[0], err))
trainF.write('{},{},{}\n'.format(epoch, loss.data[0], err))
trainF.flush()
def test_dice(args, epoch, model, testLoader, optimizer, testF):
model.eval()
test_dice = 0
incorrect = 0
for batch_idx, output in enumerate(testLoader):
data, target, id = output
# print("testing with {}".format(id[0]))
target = target[0,:,:,:].view(-1) # right? added by Chao.
if args.cuda:
data, target = data.cuda(), target.cuda()
data = Variable(data)
target = Variable(target)
output = model(data)
dice = bioloss.dice_loss(output, target).data[0]
test_dice += dice
incorrect += (1. - dice)
nTotal = len(testLoader)
test_dice /= nTotal # loss function already averages over batch size
err = 100.*incorrect/nTotal
# print('\nTest set: Average Dice Coeff: {:.4f}, Error: {}/{} ({:.0f}%)\n'.format(
# test_loss, incorrect, nTotal, err))
#
# testF.write('{},{},{}\n'.format(epoch, test_loss, err))
print('\nFor testing: Epoch:{}\tAverage Dice Coeff: {:.4f}\tError:{:.4f}\n'.format(epoch, test_dice, err))
testF.write('{},{},{}\n'.format(epoch, test_dice, err))
testF.flush()
return test_dice
def adjust_opt(optAlg, optimizer, epoch):
if optAlg == 'sgd':
if epoch < 150:
lr = 1e-1
elif epoch == 150:
lr = 1e-2
elif epoch == 225:
lr = 1e-3
else:
return
for param_group in optimizer.param_groups:
param_group['lr'] = lr
| StarcoderdataPython |
5007670 | from threading import Thread
def func1(length):
sum_f1 = 0
for x in range(0, length):
sum_f1 += x
print('Sum is {}'.format(sum_f1))
def func2(length):
""" Computes the sum of squares"""
sum_f2 = 0
for x in range(0, length):
sum_f2 += x * x
print('Sum of squares is {}'.format(sum_f2))
def func3(length):
""" Computes the sum of cubes"""
sum_f3 = 0
for x in range(0, length):
sum_f3 += x ** 3
print('Sum of cubes is {}'.format(sum_f3))
# Threading part
def do_threading():
length = 3
thread_simple = Thread(target=func1, args=(length,))
thread_square = Thread(target=func2, args=(length,))
thread_cube = Thread(target=func3, args=(length,))
# Start Execution
thread_simple.start()
thread_square.start()
thread_cube.start()
# Call the joint function
thread_simple.join()
thread_square.join()
thread_cube.join()
do_threading()
| StarcoderdataPython |
3572009 | class Solution:
def XXX(self, n: int) -> str:
if n==1:
return '1'
elif n==2:
return '11'
x=self.XXX(n-1)
y=''
count=1
for i in range(len(x)):
if i<len(x)-1 and x[i+1]==x[i]:
count+=1
else:
y+=str(count)
y+=str(x[i])
count=1
return y
| StarcoderdataPython |
5022471 | # -*- coding: utf-8 -*-
# Created by restran on 2017/9/15
from __future__ import unicode_literals, absolute_import
"""
影子密码
请分析下列密文进行解密 8842101220480224404014224202480122 得到flag,flag为8位大写字母
有7个0,拆开得到8个字符,然后把这些数字加起来,得到8个数字,表示26个字母中第几个字母
88421 0 122 0 48 0 2244 0 4 0 142242 0 248 0 122
23 5 12 12 4 15 14 5
"""
import string
def decode(data):
data = data.strip()
split_list = data.split('0')
data = [sum([int(t) for t in item]) for item in split_list]
result = ''
for i in data:
result += string.ascii_uppercase[i - 1]
return result
def main():
d = '8842101220480224404014224202480122'
r = decode(d)
print(r)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1863754 | import bisect
import collections
from tenet.util.log import pmsg
#-----------------------------------------------------------------------------
# analysis.py -- Trace Analysis
#-----------------------------------------------------------------------------
#
# This file should contain logic to further process, augment, optimize or
# annotate Tenet traces when a binary analysis framework such as IDA /
# Binary Ninja is available to a trace reader.
#
# As of now (v0.2) the only added analysis we do is to try and map
# ASLR'd trace addresses to executable opened in the database.
#
# In the future, I imagine this file will be used to indexing events
# such as function calls, returns, entry and exit to unmapped regions,
# service pointer annotations, and much more.
#
class TraceAnalysis(object):
"""
A high level, debugger-like interface for querying Tenet traces.
"""
def __init__(self, trace, dctx):
self._dctx = dctx
self._trace = trace
self._remapped_regions = []
self._unmapped_entry_points = []
self.slide = None
self._analyze()
#-------------------------------------------------------------------------
# Public
#-------------------------------------------------------------------------
def rebase_pointer(self, address):
"""
Return a rebased version of the given address, if one exists.
"""
for m1, m2 in self._remapped_regions:
#print(f"m1 start: {m1[0]:08X} address: {address:08X} m1 end: {m1[1]:08X}")
#print(f"m2 start: {m2[0]:08X} address: {address:08X} m2 end: {m2[1]:08X}")
if m1[0] <= address <= m1[1]:
return address + (m2[0] - m1[0])
if m2[0] <= address <= m2[1]:
return address - (m2[0] - m1[0])
return address
def get_prev_mapped_idx(self, idx):
"""
Return the previous idx to fall within a mapped code region.
"""
index = bisect.bisect_right(self._unmapped_entry_points, idx) - 1
try:
return self._unmapped_entry_points[index]
except IndexError:
return -1
#-------------------------------------------------------------------------
# Analysis
#-------------------------------------------------------------------------
def _analyze(self):
"""
Analyze the trace against the binary loaded by the disassembler.
"""
self._analyze_aslr()
self._analyze_unmapped()
def _analyze_aslr(self):
"""
Analyze trace execution to resolve ASLR mappings against the disassembler.
"""
dctx, trace = self._dctx, self._trace
# get *all* of the instruction addresses from disassembler
instruction_addresses = dctx.get_instruction_addresses()
#
# bucket the instruction addresses from the disassembler
# based on non-aslr'd bits (lower 12 bits, 0xFFF)
#
binary_buckets = collections.defaultdict(list)
for address in instruction_addresses:
bits = address & 0xFFF
binary_buckets[bits].append(address)
# get the set of unique, executed addresses from the trace
trace_addresses = trace.ip_addrs
#
# scan the executed addresses from the trace, and discard
# any that cannot be bucketed by the non ASLR-d bits that
# match the open executable
#
trace_buckets = collections.defaultdict(list)
for executed_address in trace_addresses:
bits = executed_address & 0xFFF
if bits not in binary_buckets:
continue
trace_buckets[bits].append(executed_address)
#
# this is where things get a little bit interesting. we compute the
# distance between addresses in the trace and disassembler buckets
#
# the distance that appears most frequently is likely to be the ASLR
# slide to align the disassembler imagebase and trace addresses
#
slide_buckets = collections.defaultdict(list)
for bits, bin_addresses in binary_buckets.items():
for executed_address in trace_buckets[bits]:
for disas_address in bin_addresses:
distance = disas_address - executed_address
slide_buckets[distance].append(executed_address)
# basically the executable 'range' of the open binary
disas_low_address = instruction_addresses[0]
disas_high_address = instruction_addresses[-1]
# convert to set for O(1) lookup in following loop
instruction_addresses = set(instruction_addresses)
#
# loop through all the slide buckets, from the most frequent distance
# (ASLR slide) to least frequent. the goal now is to sanity check the
# ranges to find one that seems to couple tightly with the disassembler
#
for k in sorted(slide_buckets, key=lambda k: len(slide_buckets[k]), reverse=True):
expected = len(slide_buckets[k])
#
# TODO: uh, if it's getting this small, I don't feel comfortable
# selecting an ASLR slide. the user might be loading a tiny trace
# with literally 'less than 10' unique instructions (?) that
# would map to the database
#
if expected < 10:
continue
hit, seen = 0, 0
for address in trace_addresses:
# add the ASLR slide for this bucket to a traced address
rebased_address = address + k
# the rebased address seems like it falls within the disassembler ranges
if disas_low_address <= rebased_address < disas_high_address:
seen += 1
# but does the address *actually* exist in the disassembler?
if rebased_address in instruction_addresses:
hit += 1
#
# the first *high* hit ratio is almost certainly the correct
# ASLR, practically speaking this should probably be 1.00, but
# I lowered it a bit to give a bit of flexibility.
#
# NOTE/TODO: a lower 'hit' ratio *could* occur if a lot of
# undefined instruction addresses in the disassembler get
# executed in the trace. this could be packed code / malware,
# in which case we will have to perform more aggressive analysis
#
if (hit / seen) > 0.95:
#print(f"ASLR Slide: {k:08X} Quality: {hit/seen:0.2f} (h {hit} s {seen} e {expected})")
slide = k
break
#
# if we do not break from the loop, we failed to find an adequate
# slide, which is very bad.
#
# NOTE/TODO: uh what do we do if we fail the ASLR slide?
#
else:
self.slide = None
return False
#
# TODO: err, lol this is all kind of dirty. should probably refactor
# and clean up this whole 'remapped_regions' stuff.
#
m1 = [disas_low_address, disas_high_address]
if slide < 0:
m2 = [m1[0] - slide, m1[1] - slide]
else:
m2 = [m1[0] + slide, m1[1] + slide]
self.slide = slide
self._remapped_regions.append((m1, m2))
return True
def _analyze_unmapped(self):
"""
Analyze trace execution to identify entry/exit to unmapped segments.
"""
if self.slide is None:
return
# alias for readability and speed
trace, ips = self._trace, self._trace.ip_addrs
lower_mapped, upper_mapped = self._remapped_regions[0][1]
#
# for speed, pull out the 'compressed' ip indexes that matched mapped
# (known) addresses within the disassembler context
#
mapped_ips = set()
for i, address in enumerate(ips):
if lower_mapped <= address <= upper_mapped:
mapped_ips.add(i)
last_good_idx = 0
unmapped_entries = []
# loop through each segment in the trace
for seg in trace.segments:
seg_ips = seg.ips
seg_base = seg.base_idx
# loop through each executed instruction in this segment
for relative_idx in range(0, seg.length):
compressed_ip = seg_ips[relative_idx]
# the current instruction is in an unmapped region
if compressed_ip not in mapped_ips:
# if we were in a known/mapped region previously, then save it
if last_good_idx:
unmapped_entries.append(last_good_idx)
last_good_idx = 0
# if we are in a good / mapped region, update our current idx
else:
last_good_idx = seg_base + relative_idx
#print(f" - Unmapped Entry Points: {len(unmapped_entries)}")
self._unmapped_entry_points = unmapped_entries
| StarcoderdataPython |
273157 | import torch
import torchvision.utils as vutils
import glob, os
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--save_image", type=bool, default=False)
parser.add_argument("--save_video", type=bool, default=False)
def get_files():
files = glob.glob("*.pt")
files = [(file, int(file.split('.')[0].split('-')[-1])) for file in files]
files.sort(key=lambda f: f[1])
files = [f[0] for f in files]
return files
if __name__ == "__main__":
args = parser.parse_args()
img_list = []
files = get_files()
for file in files:
module = torch.jit.load(file, map_location=torch.device("cpu"))
images = list(module.parameters())[0].detach().cpu().transpose(2, 3)
img_list.append(vutils.make_grid(images, padding=2, normalize=True))
if args.save_image:
# save fake images to directory
directory = os.path.splitext(file)[0]
if not os.path.exists(directory):
os.mkdir(directory)
images = [images[i].reshape(3, 64, 64) for i in range(64)]
for i, image in enumerate(images):
vutils.save_image(image,
directory + '/' + str(i) + '.png',
normalize=True)
if args.save_video:
fig = plt.figure(figsize=(8, 8))
plt.axis("off")
ims = [[plt.imshow(np.transpose(i, (1, 2, 0)), animated=True)]
for i in img_list]
ani = animation.ArtistAnimation(fig,
ims,
interval=1000,
repeat_delay=1000,
blit=True)
writer = animation.writers['ffmpeg']
writer = writer(fps=1, metadata=dict(artist='Me'), bitrate=1800)
ani.save("dcgan.mp4", writer)
| StarcoderdataPython |
3459625 | from sanic import Sanic
from sanic.response import json
from imageboard.danbooru import Danbooru
app = Sanic("ihaboard-scrapper")
app.config.FORWARDED_HOST = "temporary_string"
def to_real_bool(string):
bool_map = {
"0": False,
"1": True,
"true": True,
"false": False,
"y": True,
"n": False,
"yes": True,
"no": False,
0: False,
1: True,
True: True,
False: False,
}
if isinstance(string, str):
string = string.lower()
return bool_map.get(string, False)
@app.get("/danbooru")
async def danbooru_requests(request):
params = request.args
tags = params.get("search", "")
do_random_search = to_real_bool(params.get("random", "0"))
tags = [t for t in tags.split("+")]
dbi = Danbooru(False)
if do_random_search:
results = await dbi.random_search(tags)
else:
results = await dbi.search(tags)
await dbi.shutoff()
return json(results, ensure_ascii=False, encode_html_chars=True, escape_forward_slashes=False, indent=4)
@app.get("/safebooru")
async def safebooru_request(request):
params = request.args
tags = params.get("search", "")
do_random_search = to_real_bool(params.get("random", "0"))
tags = [t for t in tags.split("+")]
dbi = Danbooru(True)
if do_random_search:
results = await dbi.random_search(tags)
else:
results = await dbi.search(tags)
await dbi.shutoff()
return json(results, ensure_ascii=False, encode_html_chars=True, escape_forward_slashes=False, indent=4)
if __name__ == "__main__":
app.run("127.0.0.1", 6969, debug=True, access_log=True, auto_reload=True)
| StarcoderdataPython |
11227114 | #!/usr/bin/env python
# coding: utf-8
import sys
sys.path.append('../')
# Config
from utils.misc import *
from config.UnityML_Agent import *
# Environment
from unityagents import UnityEnvironment
# Agent
from agent.DDPG import Agent
from agent.ExperienceReplay import ReplayBuffer
# Hyperparameter optimizer
import optuna
# Initialize environment object
params = HYPERPARAMS['Reacher']
env = UnityEnvironment(file_name='{:s}_Linux/{:s}.x86_64'.format(params['env_name'],params['env_name']))
# Get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# Reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# Get environment parameter
number_of_agents = len(env_info.agents)
action_size = brain.vector_action_space_size
state_size = len(env_info.vector_observations[0])
print('Number of agents : ', number_of_agents)
print('Number of actions : ', action_size)
print('Dimension of state space : ', state_size)
# Save results to csv file
log_filename = 'hyperparameter_optimization'
hyperscores = []
def train_agent(actor_learning_rate, critic_learning_rate, fc_units, thau, batch_size):
# Set tunable parameters
params['actor_hidden_layers'] = [int(fc_units), int(fc_units/2)]
params['critic_hidden_layers'] = [int(fc_units), int(fc_units/2)]
params['actor_learning_rate'] = actor_learning_rate
params['critic_learning_rate'] = critic_learning_rate
params['thau'] = thau
params['batch_size'] = int(batch_size)
# Create agent instance
print("Created agent with following hyperparameter values:")
pprint.pprint(params)
# Initialize agent
agent = Agent(state_size=state_size, action_size=action_size, param=params, seed=0)
# Initialize replay buffer
memory = ReplayBuffer(action_size, params['replay_size'], params['batch_size'], seed=0)
update_interval = params['update_interval']
replay_start = params['replay_initial']
""" Training loop """
scores = [] # list containing scores from each episode
scores_window = deque(maxlen=params['scores_window_size']) # last (window_size) scores
filemeta = "{:s}_{:s}_{:.1E}_{:.1E}_{:d}_{:.1E}_{:d}_solved{:d}"
for i_episode in range(1, params['train_episodes']+1):
# Reset the environment
env_info = env.reset(train_mode=True)[brain_name]
agent.reset()
# Capture the current state
state = env_info.vector_observations[0]
# Reset score collector
score = 0
# One episode loop
step = 0
done = False
while not done:
# Action selection
action = agent.act(state)
# Take action and get rewards and new state
env_info = env.step(action)[brain_name]
next_state = env_info.vector_observations[0]
reward = env_info.rewards[0]
done = env_info.local_done[0] # if next is terminal state
# Store experience
memory.push(state, action, reward, next_state, done)
# Update Q-Learning
step += 1
if (step % update_interval) == 0 and len(memory) > replay_start:
# Rechyperparameter_optimizationall experiences (miniBatch)
experiences = memory.recall()
# Train agent
agent.learn(experiences)
# State transition
state = next_state
# Update total score
score += reward
# Push to score list
scores_window.append(score)
scores.append([score, np.mean(scores_window), np.std(scores_window)])
# Print episode summary
print('\r#TRAIN Episode:{}, Score:{:.2f}, Average Score:{:.2f}'.format(i_episode, score, np.mean(scores_window)), end="")
if i_episode % 100 == 0:
print('\r#TRAIN Episode:{}, Score:{:.2f}, Average Score:{:.2f}'.format(i_episode, score, np.mean(scores_window)))
if np.mean(scores_window) >= params['stop_scores']:
print('\nEnvironment solved in {:d} episodes!\tAverageimport time Score: {:.2f}'.format(i_episode-params['scores_window_size'], np.mean(scores_window)))
break
""" End of the Training """
print('\n')
# Filename string
filename = filemeta.format( params['env_name'],agent.name, \
params['actor_learning_rate'], \
params['critic_learning_rate'], \
fc_units,params['thau'], \
params['batch_size'], i_episode-100)
agent.export_network('./models/{:s}'.format(filename))
# Export scores to csv file
df = pandas.DataFrame(scores,columns=['scores','average_scores','std'])
df.to_csv('./scores/{:s}.csv'.format(filename), sep=',',index=False)
hyperscores.append([params['actor_learning_rate'], params['critic_learning_rate'], fc_units, params['thau'], params['batch_size'], np.mean(scores_window), i_episode-params['scores_window_size']])
log_df = pandas.DataFrame(hyperscores,columns=['actor_learning_rate', 'critic_learning_rate', 'fc_units', 'thau', 'batch_size', 'i_episode'])
log_df.to_csv('scores/{:s}.csv'.format(log_filename))
return (params['stop_scores']-np.mean(scores_window))
def objective(trial):
# Optuna objective function
actor_learning_rate = trial.suggest_categorical('actor_learning_rate', [1e-4, 5e-4, 1e-3])
critic_learning_rate = trial.suggest_categorical('critic_learning_rate', [1e-4, 5e-4, 1e-3])
fc_units = trial.suggest_categorical('fc_units', [64, 128, 256])
thau = 1e-3 #trial.suggest_categorical('thau', [1e-3, 2e-3])
batch_size = trial.suggest_categorical('batch_size', [128, 256])
return train_agent(actor_learning_rate, critic_learning_rate, fc_units, thau, batch_size)
# Create a new Optuna study object.
study = optuna.create_study()
# Invoke optimization of the objective function.
study.optimize(objective , n_trials=300, n_jobs=1)
# Print and Save result to .csv file
print('Best value: {} (params: {})\n'.format(study.best_value, study.best_params))
# Close the environment
env.close()
| StarcoderdataPython |
31475 | # -*- encoding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import re
from addonpayments.utils import GenerationUtils
class TestGenerationUtils:
def test_generate_hash(self):
"""
Test Hash generation success case.
"""
test_string = '20120926112654.thestore.ORD453-11.00.Successful.3737468273643.79347'
secret = 'mysecret'
expected_result = '368df010076481d47a21e777871012b62b976339'
result = GenerationUtils.generate_hash(test_string, secret)
assert expected_result == result
def test_generate_timestamp(self):
"""
Test timestamp generation. Hard to test this in a meaningful way. Checking length and valid characters.
"""
result = GenerationUtils().generate_timestamp()
match = re.match(r'([0-9]{14})', result)
assert match
def test_generate_order_id(self):
"""
Test order Id generation. Hard to test this in a meaningful way. Checking length and valid characters.
"""
result = GenerationUtils().generate_order_id()
match = re.match(r'[A-Za-z0-9-_]{32}', result)
assert match
| StarcoderdataPython |
3472865 |
import numpy as _np
import pandas as _pd
import matplotlib.pyplot as _plt
from scipy import fftpack as _fftpack
from scipy.signal import welch as _welch
# from scipy.signal.spectral import _spectral_helper
# from johnspythonlibrary2 import Plot as _plot
# from johnspythonlibrary2.Plot import subTitle as _subTitle, finalizeFigure as _finalizeFigure, finalizeSubplot as _finalizeSubplot
from johnspythonlibrary2.Process.Misc import check_dims as _check_dims
from johnspythonlibrary2.Process.Spectral import fft as _fft
from johnspythonlibrary2.Process.Spectral import calcPhaseDifference as _calcPhaseDifference
import xarray as _xr
from scipy.stats import _binned_statistic
from scipy.optimize import minimize as _minimize
###############################################################################
#%% Dispersion plots
def dispersion_plot(video_data_1D, nperseg_dim1=1000, dim2='theta', dim2_final='m', vmin=None, vmax=None, plot=True, f_units='Hz'):
"""
Calculates a dispersion plot from a 1D video dataset
Parameters
----------
video_data_1D : xarray.core.dataarray.DataArray
1D video data. dims = ['t', spatial (e.g. theta or r)]. Time must be first.
nperseg_dim1 : int or None
int - Welch FFT averaging is applied to the time data where nperseg is the window size. The output will be real.
None - Standard FFT is applied to the time data (i.e. no windowing). The output will be complex.
dim2 : str
The name of the spatial dimension
dim2_final : str
The name of the spatial dimension after the FFT is applied
vmin : float
Lower limit of the colorbar scale
vmax : float
Upper limit of the colorbar scale
plot : bool
True causes the plot to be produced.
f_units : str
Name of the frequency units. (e.g. if t=t*1e3 is the input, then specify f_units='kHz'.)
Returns
-------
X_2D : xarray.core.dataarray.DataArray
Dipserion relationship. Values are real if nperseg_dim1 is a number. Complex if nperseg_dim1 is None.
"""
## Check dimensions
_check_dims(video_data_1D, dims=['t',dim2])
if video_data_1D.dims[0]!='t':
raise Exception("The first dimension needs to be time, 't'")
## FFT along dim2 (the spatial dimension)
if True:
# preliminary steps
dtheta = float(video_data_1D[dim2][1] -
video_data_1D[dim2][0]) / (2 * _np.pi)
m = _fftpack.fftfreq(len(video_data_1D[dim2]), d=dtheta)
# perform FFT
X = _np.fft.fft(video_data_1D, axis=1)
X = _xr.DataArray(X, dims=['t', dim2_final],
coords=[video_data_1D['t'], m]).sortby(dim2_final)
# return the results to the correct amplitude
N = len(video_data_1D[dim2])
X *= 1.0 / N # use 2.0/N only if you've trimmed the negative freqs
## FFT along time, t (dim1)
if True:
# preliminary steps
dt = float(X.t[1] - X.t[0])
# perform time-averaged (windowed) FFT if nperseg_dim1 is a number
if nperseg_dim1 is not None:
freq, X_2D = _welch( X.data, fs=1.0/dt, nperseg=nperseg_dim1,
noverlap=nperseg_dim1//2, return_onesided=True,
scaling='spectrum', axis=0)
# otherwise, perform standard fft
else:
freq = _fftpack.fftfreq(len(X['t']), d=dt)
X_2D = _np.fft.fft(X.data, axis=0)
N = len(video_data_1D['t'])
X_2D *= 1.0 / N # use 2.0/N only if you've trimmed the negative freqs
X_2D = _xr.DataArray(X_2D, dims=['f', dim2_final],
coords=[freq, X[dim2_final]]).sortby('f')
X_2D.attrs={'long_name':'Spectral density','units':'au'}
X_2D.f.attrs={'long_name':'FFT Frequency','units':f_units}
X_2D[dim2_final].attrs={'long_name': dim2_final,'units':''}
if plot==True:
# convert to absolute value and take log10 (for vetter visualization)
a=_np.log10(_np.abs(X_2D))
a.attrs={'long_name':'Spectral density','units':'au, log10'}
# set vmin and vmax (color scaling limits)
if type(vmin)==type(None):
vmin=float(a.min())
if type(vmax)==type(None):
vmax=float(a.max())#+0.5
# plot
fig, ax = _plt.subplots()
a.plot(ax=ax, vmin=vmin, vmax=vmax)
ax.set_title('dispersion plot')
return X_2D
def dispersion_plot_2points(da1, da2, x_separation=1, nperseg=None, plot=True):
# https://scholar.colorado.edu/downloads/qj72p7185
# https://aip.scitation.org/doi/pdf/10.1063/1.2889424
# https://aip.scitation.org/doi/pdf/10.1063/1.331279
"""
filename='C:\\Users\\jwbrooks\\data\\marcels_thesis_data\\20A_5sccm_5mm_6.29.2019_7.07 PM.mat'
matData=jpl2.ReadWrite.mat_to_dict(filename)
t=matData['t'].reshape(-1)
da1=xr.DataArray(matData['s1'].reshape(-1), dims='t', coords=[t])
da2=xr.DataArray(matData['s4'].reshape(-1), dims='t', coords=[t])
x_separation=3e-3
"""
# check input
_check_dims(da1,'t')
_check_dims(da2,'t')
# parameters
nperseg=20000
N_k=50
N_f=1000
# initialize arrays
S=_np.zeros((N_k,N_f),dtype=float)
count=_np.zeros((N_k,N_f),dtype=int)
def calc_fft_and_k(x1,x2):
fft1=_fft(x1, plot=False).sortby('f')
fft2=_fft(x2, plot=False).sortby('f')
s=_np.real(0.5*(_np.conj(fft1)*fft1+_np.conj(fft2)*fft2))
phase_diff,_,_=_calcPhaseDifference(fft1, fft2, plot=False)
k=phase_diff/x_separation
# k_bins=_np.linspace(k.data.min(),k.data.max(),N_k+1)
# f_bins=_np.linspace(k.f.data.min(),k.f.data.max(),N_f+1)
return s, k
# calculate bin sizes
s,k=calc_fft_and_k(da1,da2)
k_bins=_np.linspace(k.data.min(),k.data.max(),N_k+1)
f_bins=_np.linspace(k.f.data.min(),k.f.data.max(),N_f+1)
# itegrate through each time window
segs=_np.arange(0,len(da1),nperseg)
for i,seg in enumerate(segs):
if len(da1[seg:seg+nperseg])<nperseg:
pass
else:
print(seg)
#
# fft1=fft(da1[seg:seg+nperseg], plot=False).sortby('f')
# fft2=fft(da2[seg:seg+nperseg], plot=False).sortby('f')
# s=_np.real(0.5*(_np.conj(fft1)*fft1+_np.conj(fft2)*fft2))
#
# phase_diff,_,_=calcPhaseDifference(fft1, fft2, plot=False)
# k=phase_diff/x_separation
#
# if i == 0:
# k_bins=_np.linspace(k.data.min(),k.data.max(),N_k+1)
# f_bins=_np.linspace(k.f.data.min(),k.f.data.max(),N_f+1)
#
s,k=calc_fft_and_k(da1[seg:seg+nperseg], da2[seg:seg+nperseg])
data=_pd.DataFrame()
data['f']=s.f.data
data['S']=s.data
data['k']=k.data
for a in range(N_k):
for b in range(N_f):
c=data.where((data['k']>k_bins[a])&(data['k']<k_bins[a+1])&(data['f']>f_bins[b])&(data['f']<f_bins[b+1])).dropna()
count[a,b]+=len(c)
S[a,b]=S[a,b]+c['S'].sum()
count[count==0]=1 # prevent divide by 0 issues
S=_xr.DataArray(S/count, dims=['k','f'],coords=[ (k_bins[1:]+k_bins[0:-1])/2, (f_bins[1:]+f_bins[0:-1])/2])
if plot==True:
fig,ax=_plt.subplots()
count=_xr.DataArray(count, dims=['k','f'],coords=[ (k_bins[1:]+k_bins[0:-1])/2, (f_bins[1:]+f_bins[0:-1])/2])
count.plot(ax=ax)
fig,ax=_plt.subplots()
_np.log10(S).plot(ax=ax)
return S
#%% binning
def _solve_for_bin_edges(numberBins=100):
return _np.linspace(-_np.pi, _np.pi, numberBins + 1)
def create_radial_mask(video, ri=0.9, ro=1.1, fillValue=_np.nan, plot=False):
"""
Calculate radial mask
Parameters
----------
video : xarray.core.dataarray.DataArray
the video
ri : float
inner radius of mask
ro : float
outer radius of mask
fillValue : int,float
Fill value for the masked region. 0 or np.nan is standard.
Returns
-------
mask : numpy.ndarray (2D)
Mask with 1s in the "keep" region and fillValue
in the "masked-out" region
Examples
--------
Example 1 ::
video = create_fake_video_data()
video, _ = scale_video_spatial_gaussian(video)
mask=create_radial_mask(video, plot=True)
"""
R, _ = calc_video_polar_coordinates(video)
mask = _np.ones(R.shape)
mask[(R > ro) | (R < ri)] = fillValue
if plot:
temp = _xr.DataArray(mask, dims=['y', 'x'],
coords=[video.y, video.x])
fig, ax = _plt.subplots()
temp.plot(ax=ax)
return mask
def calc_video_polar_coordinates(video, plot=False):
"""
Creates polar coordinates for the video
Example 1 ::
video = create_fake_video_data()
video, _ = scale_video_spatial_gaussian(video)
calc_video_polar_coordinates(video, plot=True)
"""
X, Y = _np.meshgrid(video.x, video.y)
R = _np.sqrt(X ** 2 + Y ** 2)
Theta = _np.arctan2(Y, X)
if plot:
X = _xr.DataArray(X, dims=['y', 'x'], coords=[video.y, video.x])
Y = _xr.DataArray(Y, dims=['y', 'x'], coords=[video.y, video.x])
R_temp = _xr.DataArray(R, dims=['y', 'x'], coords=[video.y, video.x])
Theta_temp = _xr.DataArray(Theta, dims=['y', 'x'],
coords=[video.y, video.x])
fig, ax = _plt.subplots(1, 4)
X.plot(ax=ax[0])
ax[0].set_title('X')
Y.plot(ax=ax[1])
ax[1].set_title('Y')
R_temp.plot(ax=ax[2])
ax[2].set_title('R')
Theta_temp.plot(ax=ax[3])
ax[3].set_title('Theta')
for i in range(4):
ax[i].set_aspect('equal')
return R, Theta
# azimuthal channel binning
def azimuthal_binning(video, numberBins, ri, ro, plot=False):
"""
Parameters
----------
video : xarray.core.dataarray.DataArray
the video
numberBins : int
Number of bins for binning. e.g. 100
ri : float
Inner radius for the azimuthal binning
ro : float
Outer radius for the azimuthal binning
plot : bool
Optional plots of results
Returns
-------
binned_data : xarray.core.dataarray.DataArray
2D binned video data with coordinates in theta and time.
Examples
--------
Example 1 ::
video = create_fake_video_data()
video, _ = scale_video_spatial_gaussian(video)
video = scale_video_amplitude(video, method='std')
azimuthal_binning(video, 100, ri=0.9, ro=1.1, plot=True)
"""
# binning subfunction
def binDataAndAverage(x, y, numberBins, plot=False):
"""
Bins data.
Parameters
----------
x : numpy.ndarray
independent variable
y : numpy.ndarray
dependent variable
numberBins : int
number of bins
plot : bool
Optional plot of results
Returns
-------
xarray.core.dataarray.DataArray
DataArray containing the binned results
Example
-------
Example 1::
x = np.linspace(0, 2 * np.pi, 1000) - np.pi
y = np.cos(x) + 1 * (np.random.rand(x.shape[0]) - 0.5)
numberBins = 100
bin_results = binDataAndAverage(x, y, numberBins, plot=True)
"""
bin_edges = _solve_for_bin_edges(numberBins)
# bin y(x) into discrete bins and average the values within each
y_binned, _, _ = _binned_statistic(x, y, bins=bin_edges,
statistic='mean')
x_bins = (bin_edges[:-1] + bin_edges[1:]) / 2
if plot:
da_raw = _xr.DataArray(y, dims=['x'], coords=[x]).sortby('x')
fig, ax = _plt.subplots()
da_raw.plot(ax=ax, label='raw data')
ax.plot(x_bins, y_binned, label='binned data',
marker='s', ms=3, linestyle='--')
ax.legend()
return _xr.DataArray(y_binned, dims='Theta', coords=[x_bins])
# create radial mask
R, Theta = calc_video_polar_coordinates(video)
mask = create_radial_mask(video, ri=ri, ro=ro)
# bin and average each time step in the data
binned_data = _np.zeros((video.t.shape[0], numberBins))
for i, t in enumerate(video.t.data):
unbinned_data = _pd.DataFrame()
unbinned_data['theta'] = Theta.reshape(-1)
unbinned_data['radius'] = R.reshape(-1)
unbinned_data['data'] = (video.sel(t=t).data * mask).reshape(-1)
unbinned_data = unbinned_data.dropna()
if i == 0 and plot:
plot2 = True
else:
plot2 = False
if i==0:
print('Average number of pixels per bin:',unbinned_data.shape[0]/numberBins)
out = binDataAndAverage(unbinned_data.theta.values,
unbinned_data.data.values,
numberBins, plot=plot2)
if i == 0:
number_of_NaNs = _np.isnan(out).sum()
if number_of_NaNs > 0:
print('NaNs encounted in binning: ', number_of_NaNs)
binned_data[i, :] = out
binned_data = _xr.DataArray(binned_data, dims=['t', 'theta'],
coords=[video.t.data.copy(), out.Theta])
if plot:
fig, ax = _plt.subplots()
binned_data.plot(ax=ax)
return binned_data
#%% Circular/annulus detection
def _circle(ax, xy=(0, 0), r=1, color='r', linestyle='-',
alpha=1, fill=False, label=''):
"""
Draws a circle on an AxesSubplot (ax) at origin=(xy) and radius=r
"""
circle1 = _plt.Circle(xy, r, color=color, alpha=alpha,
fill=fill, linestyle=linestyle)
ax.add_artist(circle1)
def scale_video_spatial_gaussian(video, guess=[], plot=False, verbose=False):
"""
Scale (center and normalize) the video's cartesian coordinates
using an annular Gaussian fit
Parameters
----------
video : xarray.core.dataarray.DataArray
the video
guess : list (empty or of 6 floats)
Guess values for the fit.
Default is an empty list, and a "reasonable" guess is used.
[amplitude, channel x center, channel y center,
channel radius, channel width, offset]
plot : bool
optional plot of the results
verbose : bool
optionally prints misc steps of the fit
Returns
-------
video : xarray.core.dataarray.DataArray
the video with coordinates scaled
fit_params : dict
Fit parameters
Examples
--------
Example 1 ::
video = create_fake_video_data()
video_scaled, params = scale_video_spatial_gaussian(video, plot=True,
verbose=True)
"""
# convert video to time averaged image
image = calc_video_time_average(video.copy())
# create Cartesian grid
X, Y = _np.meshgrid(image.x.data, image.y.data)
# annular Gaussian model, assumed form of the channel
def model(image, params):
a0, x0, y0, r0, sigma0, offset = params
def gaussian(a, r, sigma, R):
return a * _np.exp(-0.5 * ((R - r) / sigma) ** 2)
R0 = _np.sqrt((X - x0) ** 2 + (Y - y0) ** 2)
Z = gaussian(a0, r0, sigma0, R0) ** 1 + offset
return Z
# Generate a reasonable guess and guess image
if len(guess) < 6:
sh = image.shape
guess = [1, sh[1] // 2, sh[0] // 2, _np.min(sh) / 3, _np.min(sh) / 4, 4]
# Function that minimizes (i.e. fits) the parameters to the model
def min_func(params):
Z = model(image.data, params)
error = _np.abs((image.data - Z)).sum()
if verbose:
print('error = %.6f' % error)
return error
# perform fit
fit = _minimize(min_func, guess)
a0, x0, y0, r0, sigma0, offset = fit.x
fit_params = {'a0': a0, 'x0': x0, 'y0': y0, 'r0': r0,
'sigma0': sigma0, 'offset': offset}
# optional plot of results
if plot:
Z_fit = _xr.DataArray(model(image, fit.x),
dims=image.dims, coords=image.coords)
Z_guess = _xr.DataArray(model(image, guess),
dims=image.dims, coords=image.coords)
fig, ax = _plt.subplots(1, 2, sharey=True)
image.sel(x=x0, method='nearest').plot(ax=ax[0], label='data',
color='k')
Z_fit.sel(x=x0, method='nearest').plot(ax=ax[0], label='fit',
linestyle='--',
color='tab:blue')
ax[0].set_title('x=x0=%.1f' % x0)
image.sel(y=y0, method='nearest').plot(ax=ax[1], label='data',
color='k')
Z_fit.sel(y=y0, method='nearest').plot(ax=ax[1], label='fit',
linestyle='--',
color='tab:blue')
ax[1].set_title('y=y0=%.1f' % y0)
ax[0].legend()
ax[1].legend()
image['x'] = (image.x - x0) / r0
image['y'] = (image.y - y0) / r0
fig0, ax0 = _plt.subplots(1, 4)
ax0[0].imshow(image, origin='lower')
ax0[0].set_title('actual')
ax0[1].imshow(Z_guess, origin='lower')
ax0[1].set_title('guess')
ax0[2].imshow(Z_fit, origin='lower')
ax0[2].set_title('fit')
ax0[3].imshow(image, origin='lower')
ax0[3].set_title('actual with fit')
_circle(ax0[3], xy=(x0, y0), r=r0, fill=False, linestyle='--')
_circle(ax0[3], xy=(x0, y0), r=r0 + sigma0 * 1.5, fill=False)
_circle(ax0[3], xy=(x0, y0), r=r0 - sigma0 * 1.5, fill=False)
# apply correction to the video
video = video.copy()
video['x'] = (video.x - x0) / r0
video['y'] = (video.y - y0) / r0
return video, fit_params
#%% Video processing, misc
def calc_video_time_average(video, plot=False):
"""
calculate time averaged image
Examples
--------
Example 1 ::
video = create_fake_video_data()
video, _ = scale_video_spatial_gaussian(video)
mask = calc_video_time_average(video, plot=True)
"""
ave = video.mean(dim='t')
if plot:
fig, ax = _plt.subplots()
ave.plot(ax=ax)
ax.set_title('time average')
return ave
| StarcoderdataPython |
11348546 | <filename>orca_gazebo/scripts/reliable_odom.py
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Subscribe to /best_effort and republish /reliable."""
# This is a quick hack around https://github.com/ros-visualization/rqt/issues/187
import nav_msgs.msg
import rclpy
import rclpy.node
import rclpy.qos
# TODO Move to C++ as templated Node, params for QoS, param for overwrite stamp
class ReliableOdomNode(rclpy.node.Node):
def __init__(self):
super().__init__('reliable_odom')
self._sub = self.create_subscription(nav_msgs.msg.Odometry,
'best_effort',
self.callback,
rclpy.qos.qos_profile_sensor_data)
self._pub = self.create_publisher(nav_msgs.msg.Odometry,
'reliable',
rclpy.qos.qos_profile_services_default)
def callback(self, msg: nav_msgs.msg.Odometry) -> None:
# Overwrite stamp, allows node to be used with wall clock even during simulations
msg.header.stamp = self.get_clock().now().to_msg()
self._pub.publish(msg)
def main(args=None):
rclpy.init(args=args)
node = ReliableOdomNode()
try:
rclpy.spin(node)
except KeyboardInterrupt:
node.get_logger().info('ctrl-C detected, shutting down')
finally:
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| StarcoderdataPython |
4971704 | <reponame>kotetsu99/ai_music<filename>04-ai_camera.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import keras
import numpy as np
import cv2
import picamera
import picamera.array
import os, sys
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
# URL定数定義
PLAYLIST_URL = 'https://music.youtube.com/watch?v=0cGBTbjvwuo&list=RDAMVM0cGBTbjvwuo'
WEB_DRIVER = '/usr/bin/chromedriver'
# プログラム実行制限時間(分)
time_limit = 300
# 本人の名前
person = 'person'
# OpenCV物体検出サイズ定義
cv_width, cv_height = 100, 100
# OpenCV物体検出閾値
minN = 4
# 顔画像サイズ定義
img_width, img_height = 64, 64
# 顔検出用カスケードxmlファイルパス定義
cascade_xml = "haarcascade_frontalface_alt.xml"
# 学習用データセットのディレクトリパス
train_data_dir = 'dataset/02-face'
# データセットのサブディレクトリ名(クラス名)を取得
#classes = os.listdir(train_data_dir)
classes = ('others', 'person')
def main():
# 環境設定(ディスプレイの出力先をlocalhostにする)
os.environ['DISPLAY'] = ':0'
print('クラス名リスト = ', classes)
# 学習済ファイルの確認
if len(sys.argv)==1:
print('使用法: python 本ファイル名.py 学習済ファイル名.h5')
sys.exit()
savefile = sys.argv[1]
# モデルのロード
model = keras.models.load_model(savefile)
# ブラウザを起動
brws = setup_browser()
print('顔認識を開始')
with picamera.PiCamera() as camera:
with picamera.array.PiRGBArray(camera) as stream:
# カメラの解像度を320x320にセット
camera.resolution = (320, 320)
# カメラのフレームレートを15fpsにセット
camera.framerate = 15
# ホワイトバランスをfluorescent(蛍光灯)モードにセット
camera.awb_mode = 'fluorescent'
# 時間計測開始
start_time = time.time()
process_time = 0
# プレイヤー状態初期値設定
state = 'None'
# 制限時間まで顔認識実行
while process_time < time_limit :
# 本人認識フラグ初期化
person_flg = False
# プレイヤーの状態を確認
state = check_player_state(brws)
#print(state)
# stream.arrayにBGRの順で映像データを格納
camera.capture(stream, 'bgr', use_video_port=True)
# 顔認識
image, person_flg = detect_face(stream.array, model, person_flg)
# カメラ映像をウインドウに表示
cv2.imshow('frame', image)
# 'q'を入力でアプリケーション終了
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
# 本人が顔検出された場合
if (person_flg == True):
# プレイヤー画面が開いていない場合、プレイヤー画面に遷移
if state == 'None':
brws.get(PLAYLIST_URL)
player = brws.find_element_by_tag_name('body')
print('再生')
# プレイヤー画面が開いている場合、再生状態とする
elif state == 'Pause':
# プレイヤー再生
player.send_keys(Keys.SPACE)
print('再生')
# 本人が顔検出されない場合
elif (person_flg == False):
# プレイヤーが再生状態の場合
if state == 'Play':
# プレイヤー一時停止
player.send_keys(Keys.SPACE)
print('一時停止')
# streamをリセット
stream.seek(0)
stream.truncate()
# 経過時間(分)計算
process_time = (time.time() - start_time) / 60
#print('process_time = ', process_time, '[min]')
cv2.destroyAllWindows()
def detect_face(image, model, person_flg):
# グレースケール画像に変換
image_gs = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cascade = cv2.CascadeClassifier(cascade_xml)
# 顔検出の実行
face_list=cascade.detectMultiScale(image_gs, scaleFactor=1.1, minNeighbors=minN,minSize=(cv_width, cv_height))
# 顔が1つ以上検出された場合
if len(face_list) > 0:
for rect in face_list:
# 顔画像を生成
face_img = image[rect[1]:rect[1]+rect[3],rect[0]:rect[0]+rect[2]]
if face_img.shape[0] < cv_width or face_img.shape[1] < cv_height:
#print("too small")
continue
# 顔画像とサイズを定義
face_img = cv2.resize(face_img, (img_width, img_height))
# Keras向けにBGR->RGB変換、float型変換
face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB).astype(np.float32)
# 顔画像をAIに認識
name = predict_who(face_img, model)
#print(name)
# 顔近傍に矩形描画
cv2.rectangle(image, tuple(rect[0:2]), tuple(rect[0:2]+rect[2:4]), (0, 0, 255), thickness = 3)
# AIの認識結果(人物名)を元画像に矩形付きで表示
x, y, width, height = rect
cv2.putText(image, name, (x, y + height + 40), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 255) ,2)
# 画像保存
#cv2.imwrite(name + '.jpg', image)
if name == person :
person_flg = True
return image, person_flg
def predict_who(x, model):
# 画像データをテンソル整形
x = np.expand_dims(x, axis=0)
# 学習時に正規化してるので、ここでも正規化
x = x / 255
pred = model.predict(x)[0]
#print(pred)
# 本人:1 他人:0のようにラベル付けされている。
result = pred[0]
print('顔認識:本人である確率=' + str(100 * result) + '[%]')
# 50%を超えていれば本人と判定。50%以下は他人と判定。
if result > 0.5:
name = classes[1]
else:
name = classes[0]
# 1番予測確率が高い人物を返す
return name
def setup_browser():
# ブラウザ起動オプション設定
options = webdriver.ChromeOptions()
options.add_argument('--kiosk')
options.add_argument('--incognito')
options.add_argument('--disable-infobars')
options.add_argument('--disable-extensions')
# ブラウザ起動
brws = webdriver.Chrome(WEB_DRIVER, chrome_options=options)
brws.set_page_load_timeout(90)
return brws
def check_player_state(brws):
if len(brws.find_elements_by_id('play-pause-button')) > 0 :
player_element = brws.find_element_by_id('play-pause-button')
title = player_element.get_attribute('title')
# プレイヤーの再生状態に応じてstateを変更
if title == '一時停止':
state = 'Play'
elif title == '再生':
state = 'Pause'
else:
# stateは'None'とする
state = 'None'
return state
if __name__ == '__main__':
main()
| StarcoderdataPython |
5194424 | import sys
import HTTPRequester
import PayloadGenerator
requests_location = None
option = None
max_threads = None
delay = None
os = None
dt = 0
def present():
global option, os, dt
if option == 'so':
print '[+] Testing for Soap Injection: '
if option == 'pp':
print '[+] Testing for HTTP Parameter Pollution '
if option == 'ti':
print '[+] Testing for Template Injection '
if option == 'sr':
print '[+] Testing for Serializing/Deserializing Vulnerability '
if option == 'op':
print '[+] Testing for Oracle Padding Vulnerability '
if option == 'xxe':
print '[+] Testing for XML External Entity'
print '[+] FILE: ' + str(requests_location)
print '[+] Threads: ' + str(max_threads)
print '[+] Delay between requests: ' + str(delay)
print '-------------------------------------------------------------------------------------------------------'
def usage():
help = "You must use Burp Suite to get a file with all the requests you want to probe \n" \
"1) -f -> file location \n" \
"2) -o -> option. Use 'so' to test for soap injection or 'pp' to test for parameter polution \n" \
"3) -th -> quantity of threads" \
"4) -dl -> delay\n"
return help
def setOptions(arguments1):
global requests_location, option, max_threads, delay, os
arguments = ' '.join(arguments1)
requests_location = arguments.split("-f", 1)[1].split(" ")[1]
option = arguments.split("-o", 1)[1].split(" ")[1]
if '-th' not in arguments:
max_threads = 1
else:
max_threads = int(arguments.split("-th", 1)[1].split(" ")[1])
if '-dl' not in arguments:
delay = 0
else:
delay = arguments.split("-dl", 1)[1].split(" ")[1]
def get_params():
global requests_location, option
params = [requests_location, option]
return params
def main():
global requests_location, option, max_threads, delay, os, dt
try:
setOptions(sys.argv[1:])
except Exception as msg:
print usage()
sys.exit()
present()
HTTPRequester.set_file(requests_location)
HTTPRequester.set_params(option, max_threads, delay, os, int(dt))
HTTPRequester.HTTP_request()
main() | StarcoderdataPython |
38680 | <reponame>the-zebulan/CodeWars
from collections import Counter
from itertools import chain
def id_best_users(*args):
best_users = set.intersection(*(set(a) for a in args))
cnt = Counter(chain(*args))
users = {}
for k, v in cnt.iteritems():
if k in best_users:
users.setdefault(v, []).append(k)
return [[k, sorted(v)] for k, v in sorted(users.iteritems(), reverse=True)]
| StarcoderdataPython |
1634710 | <gh_stars>1-10
"""
MIT License
Copyright (c) 2020 Myer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import discord
from discord.ext import commands, menus
class Skywars(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.icon = "https://static.myer.wtf/hypixel/skywars.png"
@commands.group(aliases=["sw"], invoke_without_command=True)
@commands.max_concurrency(1, per=commands.BucketType.user)
async def skywars(self, ctx, query=None):
player = await ctx.bot.hypixel.player.get(ctx=ctx, query=query)
stats = (
self.get_stats_embed(player),
self.get_stats_embed(player, player.skywars.insane.solo),
self.get_stats_embed(player, player.skywars.insane.doubles),
self.get_stats_embed(player, player.skywars.normal.solo),
self.get_stats_embed(player, player.skywars.normal.doubles)
)
wlr = (
self.get_wlr_embed(player),
self.get_wlr_embed(player, player.skywars.insane.solo),
self.get_wlr_embed(player, player.skywars.insane.doubles),
self.get_wlr_embed(player, player.skywars.normal.solo),
self.get_wlr_embed(player, player.skywars.normal.doubles)
)
kdr = (
self.get_kdr_embed(player),
self.get_kdr_embed(player, player.skywars.insane.solo),
self.get_kdr_embed(player, player.skywars.insane.doubles),
self.get_kdr_embed(player, player.skywars.normal.solo),
self.get_kdr_embed(player, player.skywars.normal.doubles)
)
stats = SkywarsMenu(stats, wlr, kdr)
await stats.start(ctx)
@staticmethod
def get_description(mode):
if hasattr(mode, "games_played"):
return f"Winstreak: {mode.winstreak}\n" \
f"Games Played: {mode.games_played:,d}"
else:
return f"Winstreak: {mode.winstreak}"
def get_stats_embed(self, player, mode=None):
if not mode:
mode = player.skywars # overall stats
return discord.Embed(
color=player.skywars.prestige.color,
title=f"[{player.skywars.prestige.star}{self.bot.static.star}] {player.display}",
description=self.get_description(mode)
).add_field(
name="Kills",
value=f"{mode.kills.kills:,d}"
).add_field(
name="Deaths",
value=f"{mode.kills.deaths:,d}"
).add_field(
name="K/D",
value=mode.kills.ratio.ratio
).add_field(
name="Wins",
value=f"{mode.wins.wins:,d}"
).add_field(
name="Losses",
value=f"{mode.wins.losses:,d}"
).add_field(
name="W/L",
value=mode.wins.ratio.ratio
).set_author(
name=f"Currently Viewing: {mode}",
icon_url=self.icon
)
def get_wlr_embed(self, player, mode=None):
if not mode:
mode = player.skywars # overall
return discord.Embed(
color=player.skywars.prestige.color,
title=f"[{player.skywars.prestige.star}{self.bot.static.star}] {player.display}",
).add_field(
name="Wins",
value=f"{mode.wins.wins:,d}"
).add_field(
name="Losses",
value=f"{mode.wins.losses:,d}"
).add_field(
name="W/L",
value=mode.wins.ratio.ratio
).add_field(
name=f"To {mode.wins.ratio.next} WLR",
value=f"{mode.wins.ratio.increase():,d} needed"
).set_author(
name=f"Currently Viewing: {mode} WLR",
icon_url=self.icon
)
def get_kdr_embed(self, player, mode=None):
if not mode:
mode = player.skywars # overall
return discord.Embed(
color=player.skywars.prestige.color,
title=f"[{player.skywars.prestige.star}{self.bot.static.star}] {player.display}",
).add_field(
name="Kills",
value=f"{mode.kills.kills:,d}"
).add_field(
name="Deaths",
value=f"{mode.kills.deaths:,d}"
).add_field(
name="K/D",
value=mode.kills.ratio.ratio
).add_field(
name=f"To {mode.kills.ratio.next} KDR",
value=f"{mode.kills.ratio.increase():,d} needed"
).set_author(
name=f"Currently Viewing: {mode} KDR",
icon_url=self.icon
)
class SkywarsMenu(menus.Menu):
def __init__(self, stats, wlr, kdr):
super().__init__(timeout=300.0)
self.stats = stats
self.wlr = wlr
self.kdr = kdr
self.index = 0
self.display = stats # default display mode is stats
def increment_index(self):
if abs(self.index + 1) > len(self.display) - 1:
self.index = 0 # loop back
else:
self.index += 1
def decrement_index(self):
if abs(self.index - 1) > len(self.display) - 1:
self.index = 0 # loop back
else:
self.index -= 1
async def send_initial_message(self, ctx, channel):
return await ctx.reply(embed=self.display[self.index])
@menus.button("\u21A9")
async def on_first(self, payload):
return await self.message.edit(embed=self.display[0])
@menus.button("\u2B05")
async def on_arrow_backwards(self, payload):
self.decrement_index()
return await self.message.edit(embed=self.display[self.index])
@menus.button("\u23F9")
async def on_stop(self, payload):
self.stop()
@menus.button("\u27A1")
async def on_arrow_forward(self, payload):
self.increment_index()
return await self.message.edit(embed=self.display[self.index])
@menus.button("\u21AA")
async def on_arrow_last(self, payload):
return await self.message.edit(embed=self.display[-1])
@menus.button("<:stats:795017651277135883>")
async def on_stats(self, payload):
self.display = self.stats
return await self.message.edit(embed=self.display[self.index])
@menus.button("<:kdr:802191344377528401>")
async def on_kdr(self, payload):
self.display = self.kdr
return await self.message.edit(embed=self.display[self.index])
@menus.button("<:wlr:795017651726450758>")
async def on_wlr(self, payload):
self.display = self.wlr
return await self.message.edit(embed=self.display[self.index])
def setup(bot):
bot.add_cog(Skywars(bot))
print("COGS > Reloaded cogs.minecraft.hypixel.skywars")
| StarcoderdataPython |
304745 | <gh_stars>10-100
"""Communicates with databases using repository pattern and service patterns"""
__version__ = '0.27.0'
from dbdaora.cache import CacheType, TTLDaoraCache
from dbdaora.circuitbreaker import AsyncCircuitBreaker
from dbdaora.data_sources.fallback import FallbackDataSource
from dbdaora.data_sources.fallback.dict import DictFallbackDataSource
from dbdaora.data_sources.memory import MemoryDataSource
from dbdaora.data_sources.memory.dict import DictMemoryDataSource
from dbdaora.exceptions import EntityNotFoundError, InvalidGeoSpatialDataError
from dbdaora.geospatial.entity import GeoSpatialData, GeoSpatialEntity
from dbdaora.geospatial.factory import make_service as make_geospatial_service
from dbdaora.geospatial.query import GeoSpatialQuery
from dbdaora.geospatial.repositories import GeoSpatialRepository
from dbdaora.geospatial.service import GeoSpatialService
from dbdaora.hash.factory import make_service as make_hash_service
from dbdaora.hash.query import HashQuery, HashQueryMany
from dbdaora.hash.repositories import HashData, HashEntity, HashRepository
from dbdaora.hash.service import HashService
from dbdaora.hashring import HashRing
from dbdaora.keys import FallbackKey
from dbdaora.query import Query, QueryMany
from dbdaora.repository import MemoryRepository
from dbdaora.service import CACHE_ALREADY_NOT_FOUND, Service
from dbdaora.service.builder import build as build_service
from dbdaora.service.builder import build_cache
from dbdaora.sorted_set.entity import (
SortedSetData,
SortedSetDictEntity,
SortedSetEntity,
)
from dbdaora.sorted_set.factory import make_service as make_sorted_set_service
from dbdaora.sorted_set.query import SortedSetQuery
from dbdaora.sorted_set.repositories import SortedSetRepository
from dbdaora.boolean.factory import ( # noqa isort:skip
make_service as make_boolean_service,
)
from dbdaora.boolean.repositories import BooleanRepository # noqa isort:skip
from dbdaora.boolean.service import BooleanService # noqa isort:skip
try:
from dbdaora.data_sources.fallback.datastore import (
DatastoreDataSource,
KindKeyDatastoreDataSource,
)
from dbdaora.hash.repositories.datastore import DatastoreHashRepository
from dbdaora.sorted_set.repositories.datastore import (
DatastoreSortedSetRepository,
)
from dbdaora.hash.service.datastore import DatastoreHashService
from dbdaora.geospatial.service.datastore import DatastoreGeoSpatialService
from dbdaora.geospatial.repositories.datastore import (
DatastoreGeoSpatialRepository,
)
from dbdaora.sorted_set.service.datastore import DatastoreSortedSetService
from dbdaora.boolean.repositories.datastore import (
DatastoreBooleanRepository,
)
except ImportError:
DatastoreDataSource = None # type: ignore
KindKeyDatastoreDataSource = None # type: ignore
DatastoreHashRepository = None # type: ignore
DatastoreSortedSetRepository = None # type: ignore
DatastoreHashService = None # type: ignore
DatastoreGeoSpatialService = None # type: ignore
DatastoreGeoSpatialRepository = None # type: ignore
DatastoreSortedSetService = None # type: ignore
DatastoreBooleanRepository = None # type: ignore
try:
from dbdaora.data_sources.memory.aioredis import (
AioRedisDataSource,
ShardsAioRedisDataSource,
make as make_aioredis_data_source,
)
except ImportError:
AioRedisDataSource = None # type: ignore
ShardsAioRedisDataSource = None # type: ignore
make_aioredis_data_source = None # type: ignore
try:
from dbdaora.data_sources.fallback.mongodb import (
MongoDataSource,
Key as MongoKey,
CollectionKeyMongoDataSource,
)
from dbdaora.hash.repositories.mongodb import MongodbHashRepository
from dbdaora.hash.service.mongodb import MongoHashService
from dbdaora.geospatial.service.mongodb import MongoGeoSpatialService
from dbdaora.geospatial.repositories.mongodb import (
MongodbGeoSpatialRepository,
)
from dbdaora.sorted_set.service.mongodb import MongoSortedSetService
from dbdaora.sorted_set.repositories.mongodb import (
MongodbSortedSetRepository,
)
except ImportError:
MongoDataSource = None # type: ignore
CollectionKeyMongoDataSource = None # type: ignore
MongodbHashRepository = None # type: ignore
MongoGeoSpatialService = None # type: ignore
MongodbGeoSpatialRepository = None # type: ignore
MongoHashService = None # type: ignore
MongoSortedSetService = None # type: ignore
MongodbSortedSetRepository = None # type: ignore
__all__ = [
'MemoryRepository',
'HashRepository',
'HashQuery',
'HashData',
'SortedSetRepository',
'SortedSetQuery',
'SortedSetEntity',
'DictFallbackDataSource',
'HashService',
'AsyncCircuitBreaker',
'HashRing',
'FallbackDataSource',
'MemoryDataSource',
'DictMemoryDataSource',
'build_service',
'CacheType',
'Service',
'EntityNotFoundError',
'HashQueryMany',
'make_hash_service',
'SortedSetData',
'SortedSetDictEntity',
'TTLDaoraCache',
'build_cache',
'BooleanRepository',
'DatastoreBooleanRepository',
'BooleanService',
'Query',
'QueryMany',
'InvalidGeoSpatialDataError',
'GeoSpatialQuery',
'GeoSpatialEntity',
'GeoSpatialService',
'GeoSpatialRepository',
'make_geospatial_service',
'GeoSpatialData',
'make_sorted_set_service',
'CACHE_ALREADY_NOT_FOUND',
'FallbackKey',
'HashEntity',
'make_boolean_service',
]
if AioRedisDataSource:
__all__.append('AioRedisDataSource')
if ShardsAioRedisDataSource:
__all__.append('ShardsAioRedisDataSource')
if make_aioredis_data_source:
__all__.append('make_aioredis_data_source')
if DatastoreDataSource:
__all__.append('DatastoreDataSource')
if KindKeyDatastoreDataSource:
__all__.append('KindKeyDatastoreDataSource')
if DatastoreHashRepository:
__all__.append('DatastoreHashRepository')
if DatastoreSortedSetRepository:
__all__.append('DatastoreSortedSetRepository')
if MongoDataSource:
__all__.append('MongoDataSource')
if CollectionKeyMongoDataSource:
__all__.append('CollectionKeyMongoDataSource')
if MongodbHashRepository:
__all__.append('MongodbHashRepository')
if MongoHashService:
__all__.append('MongoHashService')
if DatastoreHashService:
__all__.append('DatastoreHashService')
if MongoKey:
__all__.append('MongoKey')
if MongodbGeoSpatialRepository:
__all__.append('MongodbGeoSpatialRepository')
if MongoGeoSpatialService:
__all__.append('MongoGeoSpatialService')
if DatastoreGeoSpatialRepository:
__all__.append('DatastoreGeoSpatialRepository')
if DatastoreGeoSpatialService:
__all__.append('DatastoreGeoSpatialService')
if DatastoreSortedSetService:
__all__.append('DatastoreSortedSetService')
if MongodbSortedSetRepository:
__all__.append('MongodbSortedSetRepository')
if MongoSortedSetService:
__all__.append('MongoSortedSetService')
if DatastoreBooleanRepository:
__all__.append('DatastoreBooleanRepository')
| StarcoderdataPython |
9722939 | import pyqrcode
from tkinter import *
import tkinter.ttk as ttk
from ttkthemes import ThemedTk
from PIL import Image,ImageTk
win = ThemedTk(theme="equilux")
win.title("QR Code Generator")
win.config(background="#181818")
def Generate():
text = entryl.get()
qr = pyqrcode.create(text)
file_name = "my qrcode"
save_path = r'C:\Users\Jeceey\Downloads\ '
name = save_path+file_name+'.png'
qr.png(name, scale=10)
image = Image.open(name)
image = image.resize((400, 400), Image.ANTIALIAS)
image = ImageTk.PhotoImage(image)
win.imagelabel.config(image=image)
win.imagelabel = image
text = ttk.Label(win, text= "Enter text or link :")
text.grid(row=0, column=0, padx=0, pady=3)
entryl = ttk.Entry(win, width=40)
entryl.grid(row=0, column=1, padx=3, pady=3)
button = ttk.Button(win, text="Generate", command=Generate)
button.grid(row=0, column=2, padx=3, pady=3)
show_qr = ttk.Label(win, text="QR Code :")
show_qr.grid(row=1, column=0, padx=3, pady=3)
win.imagelabel = ttk.Label(win, background='#181818')
win.imagelabel.grid(row=2, column=0, padx=3, pady=3, columnspan=3)
win.mainloop() | StarcoderdataPython |
11248934 | VISDOMWINDOWS = {}
def line_plot(viz, title, x, y):
if title in VISDOMWINDOWS:
window = VISDOMWINDOWS[title]
viz.line(X=[x], Y=[y], win=window, update='append', opts={'title': title})
else:
window = viz.line(X=[x], Y=[y], opts={'title': title})
VISDOMWINDOWS[title] = window
def scatter_plot(viz, title, x):
if title in VISDOMWINDOWS:
window = VISDOMWINDOWS[title]
viz.scatter(X=x, win=window, update='replace', opts={'title': title})
else:
window = viz.scatter(X=x, opts={'title': title})
VISDOMWINDOWS[title] = window
def images_plot(viz, title, x):
if title in VISDOMWINDOWS:
window = VISDOMWINDOWS[title]
viz.images(x, win=window, opts={'title': title})
else:
window = viz.images(x, opts={'caption': title})
VISDOMWINDOWS[title] = window | StarcoderdataPython |
6411375 | <filename>python_backend/models/bpnet/bodyposenet_client.py
import argparse
from functools import partial
import logging
import os
import sys
from attrdict import AttrDict
import numpy as np
from tqdm import tqdm
import tritonclient.grpc as grpcclient
import tritonclient.http as httpclient
from tritonclient.utils import InferenceServerException
from tritonclient.utils import triton_to_np_dtype
from tao_triton.python.types import Frame, UserData
from tao_triton.python.postprocessing.bodyposenet_processor import BodyPoseNetPostprocessor
from tao_triton.python.model.bodyposenet_model import BodyPoseNetModel
logger = logging.getLogger(__name__)
TRITON_MODEL_DICT = {
'bodyposenet': BodyPoseNetModel
}
POSTPROCESSOR_DICT = {
'bodyposenet': BodyPoseNetPostprocessor
}
def completion_callback(user_data, result, error):
"""Callback function used for async_stream_infer()."""
user_data._completed_requests.put((result, error))
def convert_http_metadata_config(_metadata, _config):
"""Convert to the http metadata to class Dict."""
_model_metadata = AttrDict(_metadata)
_model_config = AttrDict(_config)
return _model_metadata, _model_config
def requestGenerator(batched_image_data, input_name, output_name, dtype, protocol,
num_classes=0):
"""Generator for triton inference requests.
Args:
batch_image_data (np.ndarray): Numpy array of a batch of images.
input_name (str): Name of the input array
output_name (list(str)): Name of the model outputs
dtype: Tensor data type for Triton
protocol (str): The protocol used to communicated between the Triton
server and TAO Toolkit client.
num_classes (int): The number of classes in the network.
Yields:
inputs
outputs
made_name (str): Name of the triton model
model_version (int): Version number
"""
if protocol == "grpc":
client = grpcclient
else:
client = httpclient
# Set the input data
inputs = [client.InferInput(input_name, batched_image_data.shape, dtype)]
inputs[0].set_data_from_numpy(batched_image_data)
outputs = [
client.InferRequestedOutput(
out_name, class_count=num_classes
) for out_name in output_name
]
yield inputs, outputs
def bodyposenet_predict(**FLAGS):
"""Sends image file path to client for inferences and returns postprocessed outputs
Raises:
Exception: If client creation fails
InferenceSeverException: If failed to retrieve model config, metadata or if inference is unsuccessful.
Returns:
dict: Contains key results which stores the keypoints detected for each image and skeleton edge names used for postprocessing
"""
log_level = "INFO"
if FLAGS['verbose']:
log_level = "DEBUG"
# Configure logging to get Maglev log messages.
logging.basicConfig(format='%(asctime)s [%(levelname)s] '
'%(name)s: %(message)s',
level=log_level)
if FLAGS['streaming'] and FLAGS['protocol'].lower() != "grpc":
raise Exception("Streaming is only allowed with gRPC protocol")
try:
if FLAGS['protocol'].lower() == "grpc":
# Create gRPC client for communicating with the server
triton_client = grpcclient.InferenceServerClient(
url=FLAGS['url'], verbose=FLAGS['verbose'])
else:
# Specify large enough concurrency to handle the
# the number of requests.
concurrency = 20 if FLAGS['async_set'] else 1
triton_client = httpclient.InferenceServerClient(
url=FLAGS['url'], verbose=FLAGS['verbose'], concurrency=concurrency)
except Exception as e:
print("client creation failed: " + str(e))
sys.exit(1)
# Make sure the model matches our requirements, and get some
# properties of the model that we need for preprocessing
try:
model_metadata = triton_client.get_model_metadata(
model_name=FLAGS['model_name'], model_version=FLAGS['model_version'])
except InferenceServerException as e:
print("failed to retrieve the metadata: " + str(e))
sys.exit(1)
try:
model_config = triton_client.get_model_config(
model_name=FLAGS['model_name'], model_version=FLAGS['model_version'])
except InferenceServerException as e:
print("failed to retrieve the config: " + str(e))
sys.exit(1)
if FLAGS['protocol'].lower() == "grpc":
model_config = model_config.config
else:
model_metadata, model_config = convert_http_metadata_config(
model_metadata, model_config)
triton_model = TRITON_MODEL_DICT[FLAGS['mode'].lower()].from_metadata(
model_metadata, model_config)
# Set target shape based on dimension format
if triton_model.data_format == 1: # NHWC:
target_shape = (triton_model.h, triton_model.w, triton_model.c)
elif triton_model.data_format == 2: # NCHW
target_shape = (triton_model.c, triton_model.h, triton_model.w)
npdtype = triton_to_np_dtype(triton_model.triton_dtype)
max_batch_size = triton_model.max_batch_size
frames = []
if os.path.isdir(FLAGS['image_filename']):
frames = [
Frame(os.path.join(FLAGS['image_filename'], f),
triton_model.data_format,
npdtype,
target_shape)
for f in os.listdir(FLAGS['image_filename'])
if os.path.isfile(os.path.join(FLAGS['image_filename'], f)) and
os.path.splitext(f)[-1] in [".jpg", ".jpeg", ".png"]
]
else:
frames = [
Frame(os.path.join(FLAGS['image_filename']),
triton_model.data_format,
npdtype,
target_shape)
]
# Send requests of FLAGS.batch_size images. If the number of
# images isn't an exact multiple of FLAGS.batch_size then just
# start over with the first images until the batch is filled.
requests = []
responses = []
result_filenames = []
request_ids = []
image_idx = 0
last_request = False
user_data = UserData()
args_postprocessor = [
FLAGS['batch_size'], frames, FLAGS['output_path'], triton_model.data_format
]
postprocessor = POSTPROCESSOR_DICT[FLAGS['mode'].lower()](
*args_postprocessor)
# Holds the handles to the ongoing HTTP async requests.
async_requests = []
sent_count = 0
if FLAGS['streaming']:
triton_client.start_stream(partial(completion_callback, user_data))
logger.info("Sending inference request for batches of data")
with tqdm(total=len(frames)) as pbar:
while not last_request:
input_filenames = []
repeated_image_data = []
for idx in range(FLAGS['batch_size']):
frame = frames[image_idx]
img = frame.load_image()
repeated_image_data.append(
triton_model.preprocess(
frame.as_numpy(img)
)
)
image_idx = (image_idx + 1) % len(frames)
if image_idx == 0:
last_request = True
if max_batch_size > 0:
batched_image_data = np.stack(repeated_image_data, axis=0)
else:
batched_image_data = repeated_image_data[0]
# Send request
try:
req_gen_args = [batched_image_data, triton_model.input_names,
triton_model.output_names, triton_model.triton_dtype,
FLAGS['protocol'].lower()]
req_gen_kwargs = {}
req_generator = requestGenerator(
*req_gen_args, **req_gen_kwargs)
for inputs, outputs in req_generator:
sent_count += 1
if FLAGS['streaming']:
triton_client.async_stream_infer(
FLAGS['model_name'],
inputs,
request_id=str(sent_count),
model_version=FLAGS['model_version'],
outputs=outputs)
elif FLAGS['async_set']:
if FLAGS['protocol'].lower() == "grpc":
triton_client.async_infer(
FLAGS['model_name'],
inputs,
partial(completion_callback, user_data),
request_id=str(sent_count),
model_version=FLAGS['model_version'],
outputs=outputs)
else:
async_requests.append(
triton_client.async_infer(
FLAGS['model_name'],
inputs,
request_id=str(sent_count),
model_version=FLAGS['model_version'],
outputs=outputs))
else:
responses.append(
triton_client.infer(FLAGS['model_name'],
inputs,
request_id=str(sent_count),
model_version=FLAGS['model_version'],
outputs=outputs))
except InferenceServerException as e:
print("inference failed: " + str(e))
if FLAGS['streaming']:
triton_client.stop_stream()
sys.exit(1)
pbar.update(FLAGS['batch_size'])
if FLAGS['streaming']:
triton_client.stop_stream()
if FLAGS['protocol'].lower() == "grpc":
if FLAGS['streaming'] or FLAGS['async_set']:
processed_count = 0
while processed_count < sent_count:
(results, error) = user_data._completed_requests.get()
processed_count += 1
if error is not None:
print("inference failed: " + str(error))
sys.exit(1)
responses.append(results)
else:
if FLAGS['async_set']:
# Collect results from the ongoing async requests
# for HTTP Async requests.
for async_request in async_requests:
responses.append(async_request.get_result())
results = {}
tensor_response = []
logger.info(
"Gathering responses from the server and post processing the inferenced outputs.")
processed_request = 0
with tqdm(total=len(frames)) as pbar:
while processed_request < sent_count:
response = responses[processed_request]
if FLAGS['protocol'].lower() == "grpc":
this_id = response.get_response().id
else:
this_id = response.get_response()["id"]
batch_results = postprocessor.apply(
response, this_id,
)
results = {**results, **{k: v.pop('results') for k,v in batch_results.items()}}
tensor_response.append(batch_results)
processed_request += 1
pbar.update(FLAGS['batch_size'])
logger.info("PASS")
final_results = {}
if FLAGS.get('return_tensor'):
final_results['tensor_response'] = tensor_response
final_results['results'] = results
final_results['skeleton_edge_names'] = postprocessor.params['skeleton_edge_names']
final_results['keypoints'] = postprocessor.params['keypoints']
return final_results
| StarcoderdataPython |
3225036 | #!/usr/bin/env python3
import csv
def read_employees(csv_file_location):
with open(csv_file_location) as file:
csv.register_dialect('empDialect', skipinitialspace=True, strict=True)
employee_file = csv.DictReader(open(csv_file_location), dialect = 'empDialect')
employee_list = []
for data in employee_file:
employee_list.append(data)
return employee_list
employee_list = read_employees('/home/student-02-7aae63c60bd7/data/employees.csv')
def process_data(employee_list):
department_list = []
for employee_data in employee_list:
department_list.append(employee_data['Department'])
department_data = {}
for department_name in set(department_list):
department_data[department_name] = department_list.count(department_name)
return department_data
dictionary = process_data(employee_list)
def write_report(dictionary, report_file):
with open(report_file, "w+") as f:
for k in sorted(dictionary):
f.write(str(k)+':'+str(dictionary[k])+'\n')
f.close()
write_report(dictionary, '/home/student-02-7aae63c60bd7/test_report.txt')
| StarcoderdataPython |
12840685 | <reponame>Roberto-Sartore/Python
"""Faça um programa que pergunte o preço de três produtos e informe qual produto você deve comprar,
sabendo que a decisão é sempre pelo mais barato"""
p1 = float(input('Digite a 1º valor R$ : ').replace(',', '.'))
p2 = float(input('Digite a 2º valor R$ : ').replace(',', '.'))
p3 = float(input('Digite a 3º valor R$ : ').replace(',', '.'))
if p1 < p2 and p1 < p3:
print ('O produto 1 é o mais barato!!')
elif p2 < p1 and p2 < p3:
print ('O produto 2 é o mais barato!!')
elif p3 < p1 and p3 < p2:
print ('O produto 3 é o mais barato!!')
#Se alguns numeros forem iguais
elif p1 == p2 and p1 and p2 < p3:
print ('O produto 1 e 2 são os mais baratos!!')
elif p1 == p3 and p1 and p3 < p2:
print ('O produto 1 e 3 são os mais baratos!!')
elif p2 == p3 and p2 and p3 < p1:
print ('O produto 2 e 3 são os mais baratos!!')
#Se todo os numero forem iguais
else:
print ('Todos os preços são iguais!!')
| StarcoderdataPython |
11309451 | <reponame>01-Meyitzade-01/TgKATILMA<filename>src/handlers/excepts.py
import pathlib, json # noqa: E401
import logging
from aiogram import Bot, types
CONFIG = json.load(open(pathlib.Path.cwd().joinpath("src/config.json")))
logger = logging.getLogger(__name__)
async def on_err(event: types.Update, exception: Exception):
bot = Bot.get_current()
# notifies the admins
for admin in CONFIG["ADMINS"]:
await bot.send_message(admin, f"{exception} ocurred on event: {event}")
# logs the error in the logger
logger.critical(f"<{exception}> ocurred on event: {event}")
return True
| StarcoderdataPython |
6678677 | """
Object's attributes cache.
"""
import json, traceback
from collections import OrderedDict, deque
from django.apps import apps
from django.conf import settings
from muddery.server.utils import utils
from muddery.server.utils.exception import MudderyError, ERR
from muddery.server.database.storage.memory_storage import MemoryStorage
def to_string(value):
# pack a value to a string.
data_type = type(value)
if value is None:
# inner types
str_value = json.dumps((value, data_type.__name__))
elif data_type in {str, int, float, bool, bytes}:
# inner types
str_value = json.dumps((value, data_type.__name__))
elif hasattr(value, "__iter__"):
# iterable value
if data_type in {dict, OrderedDict}:
str_value = json.dumps((dict((to_string(key), to_string(obj)) for key, obj in value.items()), data_type.__name__))
else:
try:
str_value = json.dumps((tuple(to_string(obj) for obj in value), data_type.__name__))
except Exception as e:
raise MudderyError(ERR.server_error, "The object could not be stored.")
else:
raise MudderyError(ERR.server_error, "The object can not store %s of %s." % (value, type(value)))
return str_value
def from_string(str_value):
# unpack a value from a string.
if str_value is None:
return None
try:
json_value, data_type = json.loads(str_value)
if data_type == "NoneType":
value = None
elif data_type in {"str", "int", "float", "bool", "bytes"}:
value = eval(data_type)(json_value)
elif data_type in {"dict", "OrderedDict"}:
value = eval(data_type)((from_string(key), from_string(item)) for key, item in json_value.items())
else:
value = eval(data_type)(from_string(item) for item in json_value)
except Exception as e:
raise MudderyError(ERR.server_error, "The object can not load %s." % str_value)
return value
class BaseObjectStorage(object):
"""
The storage of object attributes.
"""
# data storage
storage_class = None
storage = None
@classmethod
def save(cls, obj_id, key, value):
"""
Set an attribute.
Args:
obj_id: (number) object's id.
key: (string) attribute's key.
value: (any) attribute's value.
"""
to_save = to_string(value)
cls.storage.save(obj_id, key, to_save)
@classmethod
def save_keys(cls, obj_id, value_dict):
"""
Set attributes.
Args:
obj_id: (number) object's id.
value_dict: (dict) a dict of key-values.
"""
if value_dict:
with cls.storage.atomic():
for key, value in value_dict.items():
cls.storage.save(obj_id, key, to_string(value))
@classmethod
def has(cls, obj_id, key):
"""
Check if the attribute exists.
Args:
obj_id: (number) object's id.
key: (string) attribute's key.
"""
return cls.storage.has(obj_id, key)
@classmethod
def load(cls, obj_id, key, *default):
"""
Get the value of an attribute.
Args:
obj_id: (number) object's id.
key: (string) attribute's key.
default: (any or none) default value.
Raises:
KeyError: If `raise_exception` is set and no matching Attribute
was found matching `key` and no default value set.
"""
try:
value = cls.storage.load(obj_id, key)
return from_string(value)
except KeyError as e:
if len(default) > 0:
return default[0]
else:
raise e
@classmethod
def load_obj(cls, obj_id):
"""
Get values of an object.
Args:
obj_id: (number) object's id.
"""
values = cls.storage.load_category(obj_id, {})
return {key: from_string(value) for key, value in values.items()}
@classmethod
def delete(cls, obj_id, key):
"""
delete an attribute of an object.
Args:
obj_id: (number) object's id.
key: (string) attribute's key.
"""
cls.storage.delete(obj_id, key)
@classmethod
def remove_obj(cls, obj_id):
"""
Remove an object's all attributes.
Args:
obj_id: (number) object's id.
"""
cls.storage.delete_category(obj_id)
@classmethod
def atomic(cls):
"""
Guarantee the atomic execution of a given block.
"""
return cls.storage.atomic()
class DBObjectStorage(BaseObjectStorage):
"""
The storage of object attributes.
"""
# data storage
storage_class = utils.class_from_path(settings.DATABASE_ACCESS_OBJECT)
storage = storage_class("object_states", "obj_id", "key", "value")
class MemoryObjectStorage(BaseObjectStorage):
"""
The storage of object attributes.
"""
# data storage
storage_class = MemoryStorage
storage = storage_class()
| StarcoderdataPython |
11301885 | def stop():
pass
| StarcoderdataPython |
1809588 | import unittest
import asyncio
from suds.client import Client
import pandas as pd
from vat_check import get_dataframe_from_file, get_unique_VAT_numbers
from vat_check import get_vat_registration
class Test_vat_check(unittest.TestCase):
excel_file = './test_examples/example.xlsx'
csv_file = './test_examples/example.csv'
not_supported_file = './test_examples/example.txt'
missing_file = './test_examples/404.file'
COUNTRY_CODE = 'GB'
SUDS_CLIENT = Client(
'http://ec.europa.eu/taxation_customs/vies/checkVatService.wsdl')
def test_get_dataframe_from_file(self):
df = get_dataframe_from_file(self.excel_file)
df_csv = get_dataframe_from_file(self.csv_file)
self.assertIsInstance(df, pd.DataFrame)
self.assertIsInstance(df_csv, pd.DataFrame)
with self.assertRaises(SystemExit) as cm:
get_dataframe_from_file(self.missing_file)
self.assertEqual(cm.exception, 'File not found!')
with self.assertRaises(SystemExit) as cm:
get_dataframe_from_file(self.not_supported_file)
self.assertEqual(cm.exception,
'file type is not supported. Operation cancelled.') # noqa
def test_get_unique_VAT_numbers(self):
df = pd.read_excel(self.excel_file)
column_index = 0
series = get_unique_VAT_numbers(df, column_index)
self.assertIsInstance(series, pd.Series)
self.assertEqual(len(series), 19)
self.assertEqual(series[10], 297150384)
def test_get_vat_registration_success(self):
VALID_VAT_NUMBER = 297150384
loop = asyncio.get_event_loop()
success_result = loop.run_until_complete(get_vat_registration(
VALID_VAT_NUMBER,
self.COUNTRY_CODE,
self.SUDS_CLIENT))
loop.close()
self.assertIsInstance(success_result, dict)
self.assertEqual(success_result['status'], 'valid')
self.assertEqual(success_result['VAT'], VALID_VAT_NUMBER)
self.assertEqual(success_result['name'], 'Cardiff Food Store Limited')
self.assertEqual(success_result['address'],
'Unit 3 Hosking Industrial, Dumballs Road, Butetown, Cardiff') # noqa
self.assertEqual(success_result['postcode'], 'CF10 5FG')
def test_get_vat_registration_invalid(self):
INVALID_VAT_NUMBER = 1111111
loop = asyncio.new_event_loop()
invalid_result = loop.run_until_complete(get_vat_registration(
INVALID_VAT_NUMBER,
self.COUNTRY_CODE,
self.SUDS_CLIENT))
loop.close()
self.assertIsInstance(invalid_result, dict)
self.assertEqual(invalid_result['status'], 'VAT is not Valid')
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
11276783 | <reponame>beiyewp/lain-cli
from inspect import cleandoc
from urllib.parse import urlparse
from lain_cli.utils import (
RequestClientMixin,
context,
diff_dict,
ensure_str,
git,
rc,
tell_executor,
template_env,
)
def tell_webhook_client():
ctx = context()
obj = ctx.obj
config = obj['values'].get('webhook')
if not config:
return
clusters_to_notify = config.get('clusters') or set()
cluster = obj['cluster']
if clusters_to_notify and cluster not in clusters_to_notify:
return
hook_url = config['url']
pr = urlparse(hook_url)
if pr.netloc == 'open.feishu.cn':
return FeishuWebhook(hook_url)
raise NotImplementedError(f'webhook not implemented for {hook_url}')
class Webhook(RequestClientMixin):
endpoint = None
deploy_message_template = template_env.get_template('deploy-webhook-message.txt.j2')
k8s_secret_diff_template = template_env.get_template('k8s-secret-diff.txt.j2')
def __init__(self, endpoint=None):
self.endpoint = endpoint
def send_msg(self, msg):
raise NotImplementedError
def diff_k8s_secret(self, old, new):
secret_name = old['metadata']['name']
diff = diff_dict(old['data'], new['data'])
if not sum(len(l) for l in diff.values()):
# do not send notification on empty diff
return
ctx = context()
report = self.k8s_secret_diff_template.render(
secret_name=secret_name,
executor=tell_executor(),
cluster=ctx.obj['cluster'],
**diff,
)
return self.send_msg(report)
def send_deploy_message(self, stderr=None, rollback_revision=None):
ctx = context()
obj = ctx.obj
git_revision = obj.get('git_revision')
if git_revision:
res = git(
'log',
'-n',
'1',
'--pretty=format:%s',
git_revision,
check=False,
capture_output=True,
)
if rc(res):
commit_msg = ensure_str(res.stderr)
else:
commit_msg = ensure_str(res.stdout)
else:
commit_msg = 'N/A'
executor = tell_executor()
text = self.deploy_message_template.render(
executor=executor,
commit_msg=commit_msg,
stderr=stderr,
rollback_revision=rollback_revision,
**ctx.obj,
)
return self.send_msg(text)
class FeishuWebhook(Webhook):
def send_msg(self, msg):
payload = {
'msg_type': 'text',
'content': {
'text': cleandoc(msg),
},
}
return self.post(json=payload)
| StarcoderdataPython |
12312 | <filename>core/migrations/0010_wagtailsitepage_screenshot.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-21 23:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0009_wagtail112upgrade'),
]
operations = [
migrations.RenameField(
model_name='wagtailsitepage',
old_name='image_desktop',
new_name='site_screenshot',
),
migrations.RemoveField(
model_name='wagtailsitepage',
name='image_phone',
),
migrations.RemoveField(
model_name='wagtailsitepage',
name='image_tablet',
),
]
| StarcoderdataPython |
8148342 | class Configs():
years=['2018', '2017', '2016']
sleep_interval=0.01 | StarcoderdataPython |
8064101 | <reponame>Tomato1107/OpenQuadruped
import matplotlib.pyplot as plt
import numpy as np
from gen_suite import Path, Pose
# path parameters
contact_length = 30 # mm
dip_height = 20 # mm
dip_increment_0 = 10 # mm
dip_increment_1 = 5 # mm
waypoints = [Pose(0, 0, 0),
Pose(contact_length / 2, 0, 0),
Pose(contact_length / 4, dip_increment_0, 165),
Pose(-2 * contact_length / 3, dip_increment_1, -150),
Pose(-contact_length / 2, 0, 0),
Pose(0, 0, 0)]
path = Path(waypoints)
x, y = path.get_plot_values()
plt.axes().set_aspect('equal')
plt.title("Quadruped Foot Path: Quintic Hermite Splines")
plt.plot(x, y)
plt.plot([pose.x for pose in waypoints], [pose.y for pose in waypoints], 'ro')
plt.show()
| StarcoderdataPython |
356202 | <gh_stars>1-10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('bns',
'0048_wealthidxperdistrict_wealthidxperhh_wealthidxperlandscape_wealthidxpervillage'),
]
operations = [
migrations.RunSQL(
"""
CREATE OR REPLACE VIEW bns_wbi_hh_diff_2015_2017 AS
WITH wbi_2015 AS (
SELECT upper(bns_wbi_hh.hh_id) AS hh_id,
bns_wbi_hh.wbi
FROM bns_wbi_hh
WHERE bns_wbi_hh.dataset_year::double precision = 2015::double precision
), wbi_2017 AS (
SELECT b.dataset_uuid_id,
a.answer_id,
upper(b.hh_id) AS hh_id,
b.wbi,
b.hh_type_control,
b.hh_type_org_benef,
b.hh_type_other_benef,
b.village,
b.district,
b.landscape,
g.geom
FROM bns_wbi_hh b
JOIN bns_answer a ON a.hh_id = b.hh_id AND a.dataset_uuid_id = b.dataset_uuid_id
LEFT JOIN bns_answergps g ON a.answer_id = g.answer_id
WHERE b.dataset_year::double precision = 2017::double precision
)
SELECT row_number() OVER () AS gid,
wbi_2017.dataset_uuid_id,
wbi_2015.hh_id,
wbi_2017.village,
wbi_2017.district,
wbi_2017.landscape,
wbi_2017.hh_type_control,
wbi_2017.hh_type_org_benef,
wbi_2017.hh_type_other_benef,
wbi_2015.wbi AS wbi_2015,
wbi_2017.wbi AS wbi_2017,
wbi_2017.wbi - wbi_2015.wbi AS wbi_diff,
wbi_2017.geom
FROM wbi_2015
JOIN wbi_2017 ON wbi_2015.hh_id = wbi_2017.hh_id;
""",
reverse_sql="DROP VIEW bns_wbi_hh_diff_2015_2017;"),
migrations.RunSQL(
"""
CREATE OR REPLACE VIEW bns_wbi_village_diff_2015_2017 AS
SELECT row_number() OVER () AS gid,
bns_wbi_hh_diff_2015_2017.dataset_uuid_id,
bns_wbi_hh_diff_2015_2017.village,
bns_wbi_hh_diff_2015_2017.district,
bns_wbi_hh_diff_2015_2017.landscape,
bns_wbi_hh_diff_2015_2017.hh_type_control,
avg(bns_wbi_hh_diff_2015_2017.wbi_2015) AS avg_wbi_2015,
avg(bns_wbi_hh_diff_2015_2017.wbi_2017) AS avg_wbi_2017,
avg(bns_wbi_hh_diff_2015_2017.wbi_diff) AS avg_wbi_diff,
st_centroid(st_collect(bns_wbi_hh_diff_2015_2017.geom)) AS geom
FROM bns_wbi_hh_diff_2015_2017
GROUP BY bns_wbi_hh_diff_2015_2017.dataset_uuid_id, bns_wbi_hh_diff_2015_2017.village, bns_wbi_hh_diff_2015_2017.district, bns_wbi_hh_diff_2015_2017.landscape, bns_wbi_hh_diff_2015_2017.hh_type_control
ORDER BY bns_wbi_hh_diff_2015_2017.dataset_uuid_id, bns_wbi_hh_diff_2015_2017.landscape, bns_wbi_hh_diff_2015_2017.district, bns_wbi_hh_diff_2015_2017.village, bns_wbi_hh_diff_2015_2017.hh_type_control;
""",
reverse_sql="DROP VIEW bns_wbi_village_diff_2015_2017;")
] | StarcoderdataPython |
3301006 | <reponame>SpartanPlume/MysqldbPythonWrapper<gh_stars>0
"""File loading all the constants for python use"""
import argparse
import json
CONSTANTS_PATH = "./constants.json"
with open(CONSTANTS_PATH) as f:
data = json.load(f)
constants = argparse.Namespace(**data)
| StarcoderdataPython |
6635764 | # assignment 4 solution
def joints_to_hand(a1,a2,l1,l2):
Ex = l1 * cos(a1)
Ey = l1 * sin(a1)
Hx = Ex + (l2 * cos(a1+a2))
Hy = Ey + (l2 * sin(a1+a2))
return Ex,Ey,Hx,Hy
def minjerk(H1x,H1y,H2x,H2y,t,n):
"""
Given hand initial position H1x,H1y, final position H2x,H2y and movement duration t,
and the total number of desired sampled points n,
Calculates the hand path H over time T that satisfies minimum-jerk.
Flash, Tamar, and <NAME>. "The coordination of arm
movements: an experimentally confirmed mathematical model." The
journal of Neuroscience 5, no. 7 (1985): 1688-1703.
"""
T = linspace(0,t,n)
Hx = zeros(n)
Hy = zeros(n)
for i in range(n):
tau = T[i]/t
Hx[i] = H1x + ((H1x-H2x)*(15*(tau**4) - (6*tau**5) - (10*tau**3)))
Hy[i] = H1y + ((H1y-H2y)*(15*(tau**4) - (6*tau**5) - (10*tau**3)))
return T,Hx,Hy
# Question 1
l1 = 0.34
l2 = 0.46
angs = array([30.0,60.0,90.0]) * pi/180
figure(figsize=(5,10))
for i in range(3):
for j in range(3):
a1 = angs[i]
a2 = angs[j]
subplot(2,1,1)
plot(a1*180/pi,a2*180/pi,'r+')
ex,ey,hx,hy = joints_to_hand(a1,a2,l1,l2)
subplot(2,1,2)
plot(hx,hy,'r+')
for k in range(20):
a1n = a1 + randn()*(sqrt(3)*pi/180)
a2n = a2 + randn()*(sqrt(3)*pi/180)
subplot(2,1,1)
plot(a1n*180/pi,a2n*180/pi,'b.')
ex,ey,hx,hy = joints_to_hand(a1n,a2n,l1,l2)
subplot(2,1,2)
plot(hx,hy,'b.')
subplot(2,1,1)
axis('equal')
xlabel('SHOULDER ANGLE (deg)')
ylabel('ELBOW ANGLE (deg)')
subplot(2,1,2)
axis('equal')
xlabel('HAND POSITION X (m)')
ylabel('HAND POSITION Y (m)')
# Question 2
def hand_to_joints(hx,hy,l1,l2):
"""
Given hand position H=(hx,hy) and link lengths l1,l2,
returns joint angles A=(a1,a2)
"""
a2 = arccos(((hx*hx)+(hy*hy)-(l1*l1)-(l2*l2))/(2.0*l1*l2))
a1 = arctan(hy/hx) - arctan((l2*sin(a2))/(l1+(l2*cos(a2))))
if a1 < 0:
a1 = a1 + pi
elif a1 > pi:
a1 = a1 - pi
return a1,a2
# Question 3
l1 = 0.34
l2 = 0.46
angs = array([30.0,60.0,90.0]) * pi/180
figure(figsize=(5,10))
for i in range(3):
for j in range(3):
a1 = angs[i]
a2 = angs[j]
subplot(2,1,1)
plot(a1*180/pi,a2*180/pi,'r+')
ex,ey,hx,hy = joints_to_hand(a1,a2,l1,l2)
subplot(2,1,2)
plot(hx,hy,'r+')
for k in range(20):
hxn = hx + randn()*(sqrt(2)/100)
hyn = hy + randn()*(sqrt(2)/100)
a1n,a2n = hand_to_joints(hxn,hyn,l1,l2)
subplot(2,1,1)
plot(a1n*180/pi,a2n*180/pi,'b.')
subplot(2,1,2)
plot(hxn,hyn,'b.')
subplot(2,1,1)
axis('equal')
xlabel('SHOULDER ANGLE (deg)')
ylabel('ELBOW ANGLE (deg)')
title('JOINT SPACE')
subplot(2,1,2)
axis('equal')
xlabel('HAND POSITION X (m)')
ylabel('HAND POSITION Y (m)')
title('HAND SPACE')
# Question 4
l1,l2 = 0.34, 0.46
H1x,H1y = -0.20, -.55
movdist = 0.10
movtime = 0.50
npts = 20
ncirc = 8
angs = linspace(0,360,ncirc+1)*pi/180
angs = angs[0:-1]
figure(figsize=(5,10))
for i in range(ncirc):
H2x = H1x + movdist*cos(angs[i])
H2y = H1y + movdist*sin(angs[i])
T,Hx,Hy = minjerk(H1x,H1y,H2x,H2y,movtime,npts)
subplot(2,1,2)
plot(Hx,Hy,'.')
axis('equal')
A1 = zeros(npts)
A2 = zeros(npts)
for j in range(npts):
A1[j],A2[j] = hand_to_joints(Hx[j],Hy[j],l1,l2)
subplot(2,1,1)
plot(A1*180/pi,A2*180/pi,'.')
axis('equal')
subplot(2,1,1)
xlabel('SHOULDER ANGLE (deg)')
ylabel('ELBOW ANGLE (deg)')
title('JOINT SPACE')
subplot(2,1,2)
xlabel('HAND POS X (m)')
ylabel('HAND POS Y (m)')
title('HAND SPACE')
# Question 5
l1,l2 = 0.34, 0.46
A1s,A1e = 45*pi/180, 90*pi/180
movdist = 10*pi/180
movtime = 0.50
npts = 20
ncirc = 8
angs = linspace(0,360,ncirc+1)*pi/180
angs = angs[0:-1]
figure(figsize=(5,10))
for i in range(ncirc):
A2s = A1s + movdist*cos(angs[i])
A2e = A1e + movdist*sin(angs[i])
T,As,Ae = minjerk(A1s,A1e,A2s,A2e,movtime,npts)
subplot(2,1,1)
plot(As*180/pi,Ae*180/pi,'.')
axis('equal')
Hx = zeros(npts)
Hy = zeros(npts)
for j in range(npts):
ex,ey,Hx[j],Hy[j] = joints_to_hand(As[j],Ae[j],l1,l2)
subplot(2,1,2)
plot(Hx,Hy,'.')
axis('equal')
subplot(2,1,1)
xlabel('SHOULDER ANGLE (deg)')
ylabel('ELBOW ANGLE (deg)')
title('JOINT SPACE')
subplot(2,1,2)
xlabel('HAND POS X (m)')
ylabel('HAND POS Y (m)')
title('HAND SPACE')
| StarcoderdataPython |
3384465 | # -*- coding: utf-8 -*-
# Copyright: (c) 2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from contextlib import contextmanager
from ansible_collections.community.general.tests.unit.compat import unittest
from ansible_collections.community.general.tests.unit.compat.mock import call, patch
from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args
from ansible_collections.community.general.plugins.modules.identity.keycloak import keycloak_realm_info
from itertools import count
from ansible.module_utils.six import StringIO
@contextmanager
def patch_keycloak_api(get_realm_info_by_id):
"""Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server
Patches the `login` and `_post_json` methods
Keyword arguments are passed to the mock object that patches `_post_json`
No arguments are passed to the mock object that patches `login` because no tests require it
Example::
with patch_ipa(return_value={}) as (mock_login, mock_post):
...
"""
obj = keycloak_realm_info.KeycloakAPI
with patch.object(obj, 'get_realm_info_by_id', side_effect=get_realm_info_by_id) as mock_get_realm_info_by_id:
yield mock_get_realm_info_by_id
def get_response(object_with_future_response, method, get_id_call_count):
if callable(object_with_future_response):
return object_with_future_response()
if isinstance(object_with_future_response, dict):
return get_response(
object_with_future_response[method], method, get_id_call_count)
if isinstance(object_with_future_response, list):
call_number = next(get_id_call_count)
return get_response(
object_with_future_response[call_number], method, get_id_call_count)
return object_with_future_response
def build_mocked_request(get_id_user_count, response_dict):
def _mocked_requests(*args, **kwargs):
url = args[0]
method = kwargs['method']
future_response = response_dict.get(url, None)
return get_response(future_response, method, get_id_user_count)
return _mocked_requests
def create_wrapper(text_as_string):
"""Allow to mock many times a call to one address.
Without this function, the StringIO is empty for the second call.
"""
def _create_wrapper():
return StringIO(text_as_string)
return _create_wrapper
def mock_good_connection():
token_response = {
'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), }
return patch(
'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url',
side_effect=build_mocked_request(count(), token_response),
autospec=True
)
class TestKeycloakRealmRole(ModuleTestCase):
def setUp(self):
super(TestKeycloakRealmRole, self).setUp()
self.module = keycloak_realm_info
def test_get_public_info(self):
"""Get realm public info"""
module_args = {
'auth_keycloak_url': 'http://keycloak.url/auth',
'realm': 'my-realm',
}
return_value = [
None,
{
"realm": "my-realm",
"public_key": "<KEY>
"token-service": "https://auth.mock.com/auth/realms/my-realm/protocol/openid-connect",
"account-service": "https://auth.mock.com/auth/realms/my-realm/account",
"tokens-not-before": 0,
}
]
set_module_args(module_args)
# Run the module
with mock_good_connection():
with patch_keycloak_api(get_realm_info_by_id=return_value) \
as (mock_get_realm_info_by_id):
with self.assertRaises(AnsibleExitJson) as exec_info:
self.module.main()
self.assertEqual(len(mock_get_realm_info_by_id.mock_calls), 1)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
11345116 | <reponame>alexstaley/machine-learning<gh_stars>1-10
"""
<NAME> -- Student ID: 919519311
Assignment 2 -- February 2020
### HERE BEGINS THE Main.py FILE ###
This code creates a neural network of perceptron objects, runs the experiments
described in assignment 2, and displays the results in the prescribed format.
Parameters are set via global constants declared in the Perceptron.py file.
This includes the primary variables of each experiment in the assignment:
# NUM_HIDDEN_UNITS # Experiment 1 variable--default 100
# PARTIAL # Experiment 2 variable--default 1
# MOMENTUM # Experiment 3 variable--default 0
File paths are defined in this file, lines 22/23. Change as needed.
"""
from neuralNetwork.Experiment import *
# DEFINE FILE PATHS FOR TRAINING AND VALIDATION DATA HERE:
trainingFilepath = "./venv/mnist_train.csv"
validationFilepath = "./venv/mnist_test.csv"
print("\nImporting training file...")
trainingSet, trainingVector, trainingValues = parseData(trainingFilepath, PARTIAL)
print("Importing validation file...")
validationSet, validationVector, validationValues = parseData(validationFilepath, partial=1)
print("\nInitializing neural network...\n")
network = NeuralNetwork()
print("######## RUNNING EXPERIMENT: ########", end="\n\t\t")
print(NUM_HIDDEN_UNITS, "hidden units", end="\n\t\t")
print(PCT_DATA, "of training data", sep="% ")
print("\t\tMomentum =", MOMENTUM, '\n')
trainingAccuracy, validationAccuracy, confusionMatrix = runExperiment(
network, trainingSet, trainingVector, validationSet, validationVector, validationValues)
print("\nExperiment complete! Displaying results")
produceGraph(trainingAccuracy, validationAccuracy)
produceMatrix(confusionMatrix)
| StarcoderdataPython |
5046761 | from cone.app.browser.ajax import AjaxAction
from cone.app.browser.ajax import ajax_form_fiddle
from cone.app.browser.utils import format_traceback
from cone.app.browser.utils import make_url
from cone.app.model import AppSettings
from cone.tile import Tile
from cone.tile import render_tile
from cone.tile import tile
from plumber import Behavior
from plumber import default
from plumber import plumb
from pyramid.i18n import TranslationStringFactory
from pyramid.i18n import get_localizer
from pyramid.view import view_config
from webob import Response
_ = TranslationStringFactory('cone.app')
@view_config(name='settings_tab_content', xhr=True, permission='manage')
def settings_tab_content(model, request):
"""Used by jquerytools tabs plugin to get settings section content.
"""
try:
rendered = render_tile(model, request, 'content')
except Exception:
localizer = get_localizer(request)
error = localizer.translate(_('error', default='Error'))
rendered = '<div>{}: {}</div>'.format(error, format_traceback())
return Response('<div class="{}">{}</div>'.format(model.name, rendered))
@tile(name='content',
path='templates/settings.pt',
interface=AppSettings,
permission='manage')
class AppSettings(Tile):
@property
def tabs(self):
ret = list()
for val in self.model.values():
ret.append({
'title': val.metadata.title,
'target': make_url(
self.request,
node=val,
resource='settings_tab_content'),
})
return ret
class SettingsBehavior(Behavior):
"""Particular settings object form behavior.
"""
@plumb
def prepare(_next, self):
_next(self)
selector = '#form-{}'.format('-'.join(self.form.path))
ajax_form_fiddle(self.request, selector, 'replace')
@default
def next(self, request):
url = make_url(request.request, node=self.model)
selector = '.{}'.format(self.model.name)
return [AjaxAction(url, 'content', 'inner', selector)]
| StarcoderdataPython |
9720316 | from datetime import datetime, timedelta
from app.api.validations import (
MISSING_PARAMS, INVALID_PARAMS, MISSING_BODY, INVALID_TYPE
)
from app.models import Resource
from app.utils import get_error_code_from_status
def create_resource(client,
apikey,
name=None,
url=None,
category=None,
languages=None,
paid=None,
notes=None,
headers=None,
endpoint='/api/v1/resources'):
return client.post(endpoint,
json=[dict(
name="Some Name" if not name else name,
url=f"http://example.org/{str(datetime.now())}"
if not url else url,
category="New Category" if not category else category,
languages=[
"Python", "New Language"
] if not languages else languages,
paid=False if not paid else paid,
notes="Some notes" if not notes else notes)],
headers={'x-apikey': apikey} if not headers else headers)
def update_resource(client,
apikey,
name=None,
url=None,
category=None,
languages=None,
paid=None,
notes=None,
headers=None,
endpoint='/api/v1/resources/1'):
return client.put(endpoint,
json=dict(
name="New name" if not name else name,
url="https://new.url" if not url else url,
category="New Category" if not category else category,
languages=["New language"] if not languages else languages,
paid=False if not paid else paid,
notes="New notes" if not notes else notes),
headers={'x-apikey': apikey} if not headers else headers)
def set_resource_last_updated(updated_time=(datetime.now() + timedelta(days=-7)),
id=None, db=None):
if id is not None:
row = Resource.query.get(id)
row.created_at = updated_time
row.last_updated = updated_time
else:
q = Resource.query
for row in q:
row.created_at = updated_time
row.last_updated = updated_time
if db is not None:
db.session.commit()
def get_api_key(client):
response = client.post('api/v1/apikey', json=dict(
email="<EMAIL>",
password="<PASSWORD>"
))
return response.json['data'].get('apikey')
def assert_correct_response(response, code):
assert (response.status_code == code)
assert (isinstance(response.json.get('errors').get(
get_error_code_from_status(response.status_code)), dict))
assert (isinstance(response.json.get('errors').get(
get_error_code_from_status(response.status_code)).get('message'), str))
def assert_correct_validation_error(response, params):
assert (response.status_code == 422)
assert (isinstance(response.json.get('errors')
.get(INVALID_PARAMS), dict))
assert (isinstance(response.json.get('errors')
.get(INVALID_PARAMS).get('message'), str))
for param in params:
assert (param in response.json.get('errors')
.get(INVALID_PARAMS).get("params"))
assert (param in response.json.get('errors')
.get(INVALID_PARAMS).get("message"))
def assert_missing_body(response):
assert (response.status_code == 422)
assert (isinstance(response.json.get('errors')[0]
.get(MISSING_BODY), dict))
assert (isinstance(response.json.get('errors')[0]
.get(MISSING_BODY).get('message'), str))
def assert_invalid_body(response):
assert response.status_code == 422
assert isinstance(response.json.get("errors"), dict)
assert isinstance(response.json["errors"].get(f"{INVALID_TYPE}"), dict)
def assert_invalid_create(response, params, index):
assert (response.status_code == 422)
assert (isinstance(response.json.get('errors')[index]
.get(INVALID_PARAMS), dict))
assert (isinstance(response.json.get('errors')[index]
.get(INVALID_PARAMS).get('message'), str))
for param in params:
assert (response.json.get('errors')[index].get("index") == index)
assert (param in response.json.get('errors')[index]
.get(INVALID_PARAMS).get("params"))
assert (param in response.json.get('errors')[index]
.get(INVALID_PARAMS).get("message"))
def assert_missing_params_create(response, params, index):
assert (response.status_code == 422)
assert (isinstance(response.json.get('errors')[index]
.get(MISSING_PARAMS), dict))
assert (isinstance(response.json.get('errors')[index]
.get(MISSING_PARAMS).get('message'), str))
for param in params:
assert (response.json.get('errors')[index].get("index") == index)
assert (param in response.json.get('errors')[index]
.get(MISSING_PARAMS).get("params"))
assert (param in response.json.get('errors')[index]
.get(MISSING_PARAMS).get("message"))
def assert_wrong_type(response, expected_type):
assert (response.status_code == 422)
assert (expected_type in response.get_json()
.get("errors").get(INVALID_TYPE).get("message"))
| StarcoderdataPython |
6582209 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-05-03 13:42
from __future__ import unicode_literals
from django.db import migrations, models
from django.utils.text import slugify
def generate_slugs(apps, schema_editor):
SponsoredEvent = apps.get_model('events', 'SponsoredEvent')
db_alias = schema_editor.connection.alias
slug_len = SponsoredEvent._meta.get_field('slug').max_length
for e in SponsoredEvent.objects.using(db_alias).filter(slug=''):
e.slug = slugify(e.title, allow_unicode=True)[:slug_len]
e.save()
class Migration(migrations.Migration):
dependencies = [
('events', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='sponsoredevent',
name='slug',
field=models.SlugField(
allow_unicode=True, blank=True, verbose_name='slug',
),
),
migrations.RunPython(
code=generate_slugs,
reverse_code=migrations.RunPython.noop,
),
migrations.AlterField(
model_name='sponsoredevent',
name='slug',
field=models.SlugField(allow_unicode=True, verbose_name='slug'),
),
]
| StarcoderdataPython |
314587 |
from django.db import connection
from django.db.models import Q
from django.shortcuts import get_object_or_404
from tenant_schemas.utils import tenant_context
from cajas.tenant.models import Platform
from cajas.users.models.charges import Charge
from cajas.users.models.employee import Employee
def _get_queryset(klass):
if hasattr(klass, '_default_manager'):
return klass._default_manager.all()
return klass
def get_object_or_none(klass, *args, **kwargs):
"""
Use get() to return an object, or return None if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Like with QuerySet.get(), MultipleObjectsReturned is raised if more than
one object is found.
"""
queryset = _get_queryset(klass)
if not hasattr(queryset, 'get'):
klass__name = klass.__name__ if isinstance(klass, type) else klass.__class__.__name__
raise ValueError(
"First argument to get_object_or_none() must be a Model, Manager, "
"or QuerySet, not '%s'." % klass__name
)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
return None
def is_secretary(user, office):
secretary = Charge.objects.get(name="Secretaria")
try:
employee = Employee.objects.get(
Q(user=user),
(Q(office_country=office) | Q(office=office.office))
)
return employee.charge == secretary
except Employee.DoesNotExist:
return False
def is_admin_senior(user, office):
admin = Charge.objects.get(name="Administrador Senior")
try:
employee = Employee.objects.get(
Q(user=user),
Q(office_country=office) |
Q(office=office.office)
)
return employee.charge == admin
except Employee.DoesNotExist:
return False
def get_president_user():
charge = get_object_or_404(Charge, name="Presidente")
schema_name = connection.schema_name
tenant = Platform.objects.get(schema_name=schema_name)
with tenant_context(tenant):
try:
employee = Employee.objects.get(charge=charge)
return employee.user
except:
return None
| StarcoderdataPython |
11251973 | <filename>bot.py
import asyncio
from asyncio.tasks import sleep
from datetime import time
import datetime
import discord
import aioschedule as schedule
import functools
import config
from discord.ext import tasks, commands
intents = discord.Intents.default()
intents.voice_states = True
textChannel = None
voiceChannel = None
lock_job = None
unlock_job = None
def isNowInTimePeriod(startTime, endTime, nowTime):
if startTime < endTime:
return nowTime >= startTime and nowTime <= endTime
else:
return nowTime >= startTime or nowTime <= endTime
async def send_message(target_channel_id, target_message = "", send_with_empty_channel = False):
if (target_message == ""):
print("requested to send message but skipping because no message was configured")
return
voiceChannel = client.get_channel(config.voicechannel_id)
if (send_with_empty_channel == False and len(voiceChannel.members) == 0):
print("requested to send message but skipping because no members are present to see it")
return
print("running send_message")
target_channel = client.get_channel(target_channel_id)
await target_channel.send(target_message)
async def lock():
print("running lock")
voiceChannel = client.get_channel(config.voicechannel_id)
await voiceChannel.set_permissions(voiceChannel.guild.default_role, connect=False)
for member in voiceChannel.members:
await member.move_to(None)
await send_message(config.textchannel_id, config.lock_message)
async def unlock():
print("running unlock")
voiceChannel = client.get_channel(config.voicechannel_id)
await voiceChannel.set_permissions(voiceChannel.guild.default_role, connect=True)
await send_message(config.textchannel_id, config.unlock_message, True)
async def task_test(string):
print(string)
def checkManually():
print("checking manually")
lock_job = schedule.every().day.at(config.lock_time_str).do(lock)
unlock_job = schedule.every().day.at(config.unlock_time_str).do(unlock)
class MyClient(discord.Client):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# create the background task and run it in the background
self.bg_task = self.loop.create_task(self.my_background_task())
async def on_ready(self):
print(f'Logged in as {self.user} (ID: {self.user.id})')
print('------')
voiceChannel = client.get_channel(config.voicechannel_id)
overwrite = voiceChannel.overwrites_for(voiceChannel.guild.default_role)
if (overwrite.connect == False):
lock_state = True
else:
lock_state = False
expected_lock_state = isNowInTimePeriod(lock_job.at_time, unlock_job.at_time, datetime.datetime.now().time())
if (lock_state == False and expected_lock_state == True):
print("Locking because state is inconsistent")
await lock()
elif (lock_state == True and expected_lock_state == False):
print("Unlocking because state is inconsistent")
await unlock()
async def on_message(self, message):
if (message.author.id != config.owner):
return
if (message.content.startswith('!lock')):
await lock()
await message.reply("✅ I've locked the voice channel!")
if message.content.startswith('!unlock'):
await unlock()
await message.reply("✅ I've unlocked the voice channel!")
async def my_background_task(self):
for key,value in config.warnings.items():
schedule.every().day.at(key).do(send_message, config.textchannel_id, value)
await self.wait_until_ready()
while not self.is_closed():
loop = asyncio.get_event_loop()
await schedule.run_pending()
await asyncio.sleep(3)
client = MyClient(intents=intents)
client.run(config.token)
| StarcoderdataPython |
11360176 | import copy
from plenum.test.test_node import ensureElectionsDone
from plenum.test.view_change.helper import add_new_node
from plenum.test.helper import checkViewNoForNodes
from plenum.test.pool_transactions.helper import demote_node
nodeCount = 6
old_commit = None
def test_future_primaries_replicas_increase(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_stewards, tdir, tconf, allPluginsPath):
# Don't delete NodeStates, so we could check them.
global old_commit
old_commit = txnPoolNodeSet[0].future_primaries_handler.commit_batch
for node in txnPoolNodeSet:
node.future_primaries_handler.commit_batch = lambda three_pc_batch, prev_handler_result=None: 0
initial_primaries = copy.copy(txnPoolNodeSet[0].primaries)
last_ordered = txnPoolNodeSet[0].master_replica.last_ordered_3pc
starting_view_number = checkViewNoForNodes(txnPoolNodeSet)
# Increase replicas count
add_new_node(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], tdir, tconf, allPluginsPath)
new_view_no = checkViewNoForNodes(txnPoolNodeSet)
assert new_view_no == starting_view_number + 1
# "seq_no + 2" because 1 domain and 1 pool txn.
state = txnPoolNodeSet[0].future_primaries_handler.node_states[-1]
assert len(state.primaries) == len(initial_primaries) + 1
assert len(state.primaries) == len(txnPoolNodeSet[0].primaries)
def test_future_primaries_replicas_decrease(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_stewards, tdir, tconf, allPluginsPath):
assert len(txnPoolNodeSet) == 7
initial_primaries = copy.copy(txnPoolNodeSet[0].primaries)
last_ordered = txnPoolNodeSet[0].master_replica.last_ordered_3pc
starting_view_number = checkViewNoForNodes(txnPoolNodeSet)
# Decrease replicas count
demote_node(looper, sdk_wallet_stewards[-1], sdk_pool_handle, txnPoolNodeSet[-2])
txnPoolNodeSet.remove(txnPoolNodeSet[-2])
ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)
new_view_no = checkViewNoForNodes(txnPoolNodeSet)
assert new_view_no == starting_view_number + 1
state = txnPoolNodeSet[0].future_primaries_handler.node_states[-1]
assert len(state.primaries) + 1 == len(initial_primaries)
assert len(state.primaries) == len(txnPoolNodeSet[0].primaries)
for node in txnPoolNodeSet:
node.future_primaries_handler.commit_batch = old_commit
| StarcoderdataPython |
11373738 | import tensorflow as tf
import numpy as np
from tqdm import trange
from utils.config_manager import Config
from data.datasets import ASRDataset
from utils.decorators import ignore_exception, time_it
from utils.scheduling import piecewise_linear_schedule
from utils.logging_utils import SummaryManager
from model.transformer_utils import create_mel_padding_mask
from utils.scripts_utils import dynamic_memory_allocation, basic_train_parser
from data.metadata_readers import post_processed_reader
np.random.seed(42)
tf.random.set_seed(42)
dynamic_memory_allocation()
@ignore_exception
@time_it
def validate(model,
val_dataset,
summary_manager):
val_loss = {'loss': 0.}
norm = 0.
for spk, mels, phonemes, mel_len, phon_len, fname in val_dataset.all_batches():
model_out = model.val_step(spk=spk,
mel_inp=mels,
phon_tar=phonemes,
mel_inp_len=mel_len,
phon_tar_len=phon_len)
norm += 1
val_loss['loss'] += model_out['loss']
val_loss['loss'] /= norm
summary_manager.display_loss(model_out, tag='Validation', plot_all=True)
summary_manager.display_attention_heads(model_out, tag='ValidationAttentionHeads')
# predict phonemes
for j, mel in enumerate(mels):
model_out = model.predict(mel[np.newaxis, :mel_len[j], ...])
pred_phon = model_out['encoder_output'][0]
pred_phon, _ = tf.nn.ctc_beam_search_decoder(pred_phon[:,np.newaxis,:], mel_len[j][np.newaxis,...], beam_width=20, top_paths=1)
iphon = model.text_pipeline.tokenizer.decode(pred_phon[0].values).replace('/', '')
iphon_tar = model.text_pipeline.tokenizer.decode(phonemes[j][:phon_len[j]]).replace('/', '')
summary_manager.display_audio(tag=f'Validation /{j} /{iphon_tar}', step=model.step,
mel=mel[:mel_len[j], :], description=iphon)
return val_loss['loss']
parser = basic_train_parser()
args = parser.parse_args()
config = Config(config_path=args.config, asr=True)
config_dict = config.config
config.create_remove_dirs(clear_dir=args.clear_dir,
clear_logs=args.clear_logs,
clear_weights=args.clear_weights)
config.dump_config()
config.print_config()
model = config.get_model()
config.compile_model(model)
train_data_handler = ASRDataset.from_config(config,
tokenizer=model.text_pipeline.tokenizer,
kind='train')
valid_data_handler = ASRDataset.from_config(config,
tokenizer=model.text_pipeline.tokenizer,
kind='valid')
train_dataset = train_data_handler.get_dataset(bucket_batch_sizes=config_dict['bucket_batch_sizes'],
bucket_boundaries=config_dict['bucket_boundaries'],
shuffle=True)
valid_dataset = valid_data_handler.get_dataset(bucket_batch_sizes=config_dict['val_bucket_batch_size'],
bucket_boundaries=config_dict['bucket_boundaries'],
shuffle=False,
drop_remainder=True)
# create logger and checkpointer and restore latest model
summary_manager = SummaryManager(model=model, log_dir=config.log_dir, config=config_dict)
checkpoint = tf.train.Checkpoint(step=tf.Variable(1),
optimizer=model.optimizer,
net=model)
manager = tf.train.CheckpointManager(checkpoint, config.weights_dir,
max_to_keep=config_dict['keep_n_weights'],
keep_checkpoint_every_n_hours=config_dict['keep_checkpoint_every_n_hours'])
manager_training = tf.train.CheckpointManager(checkpoint, str(config.weights_dir / 'latest'),
max_to_keep=1, checkpoint_name='latest')
checkpoint.restore(manager_training.latest_checkpoint)
if manager_training.latest_checkpoint:
print(f'\nresuming training from step {model.step} ({manager_training.latest_checkpoint})')
else:
print(f'\nstarting training from scratch')
if config_dict['debug'] is True:
print('\nWARNING: DEBUG is set to True. Training in eager mode.')
# main event
print('\nTRAINING')
test_spk, test_mel, test_phonemes, test_mel_len, test_phon_len, test_fname = valid_dataset.next_batch()
losses = []
t = trange(model.step, config_dict['max_steps'], leave=True)
for _ in t:
t.set_description(f'step {model.step}')
spk, mel, phonemes, mel_len, phon_len, fname = train_dataset.next_batch()
learning_rate = piecewise_linear_schedule(model.step, config_dict['learning_rate_schedule'])
model.set_constants(learning_rate=learning_rate)
output = model.train_step(spk=spk,
mel_inp=mel,
phon_tar=phonemes,
mel_inp_len=mel_len,
phon_tar_len=phon_len)
losses.append(float(output['loss']))
t.display(f'step loss: {losses[-1]}', pos=1)
for pos, n_steps in enumerate(config_dict['n_steps_avg_losses']):
if len(losses) > n_steps:
t.display(f'{n_steps}-steps average loss: {sum(losses[-n_steps:]) / n_steps}', pos=pos + 2)
summary_manager.display_loss(output, tag='Train')
summary_manager.display_scalar(scalar_value=t.avg_time, tag='Meta/iter_time')
summary_manager.display_scalar(scalar_value=tf.shape(fname)[0], tag='Meta/batch_size')
summary_manager.display_scalar(tag='Meta/learning_rate', scalar_value=model.optimizer.lr)
if model.step % config_dict['train_images_plotting_frequency'] == 0:
summary_manager.display_attention_heads(output, tag='TrainAttentionHeads')
pred_phon = output['encoder_output'][0]
pred_phon, _ = tf.nn.ctc_beam_search_decoder(pred_phon[:,np.newaxis,:], mel_len[0][np.newaxis,...], beam_width=20, top_paths=1)
iphon = model.text_pipeline.tokenizer.decode(pred_phon[0].values).replace('/', '')
iphon_tar = model.text_pipeline.tokenizer.decode(phonemes[0]).replace('/', '')
summary_manager.display_audio(tag=f'Train /{0} /{iphon_tar}', step=model.step,
mel=mel[0][:mel_len[0], :], description=iphon)
if model.step % 1000 == 0:
save_path = manager_training.save()
if model.step % config_dict['weights_save_frequency'] == 0:
save_path = manager.save()
t.display(f'checkpoint at step {model.step}: {save_path}', pos=len(config_dict['n_steps_avg_losses']) + 2)
if model.step % config_dict['validation_frequency'] == 0:
t.display(f'Validating', pos=len(config_dict['n_steps_avg_losses']) + 3)
val_loss, time_taken = validate(model=model,
val_dataset=valid_dataset,
summary_manager=summary_manager)
t.display(f'validation loss at step {model.step}: {val_loss} (took {time_taken}s)',
pos=len(config_dict['n_steps_avg_losses']) + 3)
# if model.step % config_dict['prediction_frequency'] == 0 and (model.step >= config_dict['prediction_start_step']):
# for j in range(len(test_mel)):
# if j < config['n_predictions']:
# model_out = model.predict(test_mel[j])
# indices = tf.math.argmax(model_out['encoder_output'][j, ...], axis=-1)
# iphon = model.text_pipeline.tokenizer.decode(tf.gather_nd(indices, tf.where(indices > 0)))
# iphon_tar = " ".join(model.text_pipeline.tokenizer.decode(test_phonemes[j]))
# summary_manager.display_audio(tag=f'Test /{iphon}',
# mel=mel[j][:test_mel_len[j], :], description=iphon_tar)
print('Done.')
| StarcoderdataPython |
1995749 | import csv
import gzip
import re
import logging
import os
from dipper.sources.Source import Source
from dipper.models.Genotype import Genotype
from dipper.models.assoc.G2PAssoc import G2PAssoc
from dipper.models.Evidence import Evidence
from dipper.models.Provenance import Provenance
from dipper.models.Model import Model
LOG = logging.getLogger(__name__)
# Sometimes latest disappears
IMPCDL = 'ftp://ftp.ebi.ac.uk/pub/databases/impc/latest/csv'
GITHUBRAW = 'https://raw.githubusercontent.com/'
class IMPC(Source):
"""
From the [IMPC](http://mousephenotype.org) website:
The IMPC is generating a knockout mouse strain for every protein coding
gene by using the embryonic stem cell resource generated by the
International Knockout Mouse Consortium (IKMC).
Systematic broad-based phenotyping is performed by each IMPC center using
standardized procedures found within the
International Mouse Phenotyping Resource of Standardised Screens (IMPReSS)
resource. Gene-to-phenotype associations are made by a versioned
statistical analysis with all data freely available by this web portal and
by several data download features.
Here, we pull the data and model the genotypes using GENO and the
genotype-to-phenotype associations using the OBAN schema.
We use all identifiers given by the IMPC with a few exceptions:
* For identifiers that IMPC provides, but does not resolve,
we instantiate them as Blank Nodes.
Examples include things with the pattern of: UROALL, EUROCURATE, NULL-*,
* We mint three identifiers:
1. Intrinsic genotypes not including sex, based on:
* colony_id (ES cell line + phenotyping center)
* strain
* zygosity
2. For the Effective genotypes that are attached to the phenotypes:
* colony_id (ES cell line + phenotyping center)
* strain
* zygosity
* sex
3. Associations based on:
effective_genotype_id + phenotype_id + phenotyping_center +
pipeline_stable_id + procedure_stable_id + parameter_stable_id
We DO NOT yet add the assays as evidence for the G2P associations here.
To be added in the future.
"""
files = {
# 'impc': {
# 'file': 'IMPC_genotype_phenotype.csv.gz',
# 'url': IMPCDL + '/IMPC_genotype_phenotype.csv.gz'},
# 'euro': {
# 'file': 'EuroPhenome_genotype_phenotype.csv.gz',
# 'url': IMPCDL + '/EuroPhenome_genotype_phenotype.csv.gz'},
# 'mgd': {
# 'file': 'MGP_genotype_phenotype.csv.gz',
# 'url': IMPCDL + '/MGP_genotype_phenotype.csv.gz'},
# '3i': {
# 'file': '3I_genotype_phenotype.csv.gz',
# 'url': IMPCDL + '/3I_genotype_phenotype.csv.gz'},
'all': {
'file': 'ALL_genotype_phenotype.csv.gz',
'url': IMPCDL + '/ALL_genotype_phenotype.csv.gz',
'columns': [ # head -1 | tr ',' '\n' | sed "s|\(.*\)|'\1',|g"
'marker_accession_id',
'marker_symbol',
'phenotyping_center',
'colony_id',
'sex',
'zygosity',
'allele_accession_id',
'allele_symbol',
'allele_name',
'strain_accession_id',
'strain_name',
'project_name',
'project_fullname',
'pipeline_name',
'pipeline_stable_id',
'procedure_stable_id',
'procedure_name',
'parameter_stable_id',
'parameter_name',
'top_level_mp_term_id',
'top_level_mp_term_name',
'mp_term_id',
'mp_term_name',
'p_value',
'percentage_change',
'effect_size',
'statistical_method',
'resource_name'
]
},
'checksum': {
'file': 'checksum.md5',
'url': IMPCDL + '/checksum.md5'},
}
def __init__(self,
graph_type,
are_bnodes_skolemized,
data_release_version=None):
super().__init__(
graph_type=graph_type,
are_bnodes_skized=are_bnodes_skolemized,
data_release_version=data_release_version,
name='impc',
ingest_title='International Mouse Phenotyping Consortium',
ingest_url='http://www.mousephenotype.org',
ingest_logo='source-impc.png',
license_url=None,
data_rights=GITHUBRAW + 'mpi2/PhenotypeArchive/master/LICENSE',
file_handle=None
)
# TODO add a citation for impc dataset as a whole
# :impc cito:citesAsAuthority PMID:24194600
# self.dataset.citation()
if 'mouse' in self.all_test_ids:
self.gene_ids = self.all_test_ids['mouse']
else:
LOG.warning("not configured with gene test ids.")
self.gene_ids = []
def fetch(self, is_dl_forced=False):
self.get_files(is_dl_forced)
LOG.info("Verifying checksums...")
if self.compare_checksums():
LOG.debug('Files have same checksum as reference')
else:
raise Exception('Reference checksums do not match disk')
def parse(self, limit=None):
"""
IMPC data is delivered in three separate csv files OR
in one integrated file, each with the same file format.
:param limit:
:return:
"""
if limit is not None:
LOG.info("Only parsing first %s rows fo each file", str(limit))
LOG.info("Parsing files...")
if self.test_only:
self.test_mode = True
# for f in ['impc', 'euro', 'mgd', '3i']:
for src_key in ['all']:
file = '/'.join((self.rawdir, self.files[src_key]['file']))
self._process_data(file, limit)
LOG.info("Finished parsing")
def _process_data(self, raw, limit=None):
LOG.info("Processing Data from %s", raw)
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
# Add the taxon as a class
taxon_id = self.globaltt['Mus musculus']
model.addClassToGraph(taxon_id, None)
# with open(raw, 'r', encoding="utf8") as csvfile:
col = self.files['all']['columns']
with gzip.open(raw, 'rt') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='\"')
row = next(reader) # presumed header
if not self.check_fileheader(col, row):
pass
for row in reader:
# | head -1 | tr ',' '\n' | sed "s|\(.*\)|# \1 = row[col.index('\1')]|g"
marker_accession_id = row[col.index('marker_accession_id')].strip()
marker_symbol = row[col.index('marker_symbol')].strip()
phenotyping_center = row[col.index('phenotyping_center')].strip()
colony_raw = row[col.index('colony_id')].strip()
sex = row[col.index('sex')].strip()
zygosity = row[col.index('zygosity')].strip()
allele_accession_id = row[col.index('allele_accession_id')].strip()
allele_symbol = row[col.index('allele_symbol')].strip()
# allele_name = row[col.index('allele_name')]
strain_accession_id = row[col.index('strain_accession_id')].strip()
strain_name = row[col.index('strain_name')].strip()
# project_name = row[col.index('project_name')]
project_fullname = row[col.index('project_fullname')].strip()
pipeline_name = row[col.index('pipeline_name')].strip()
pipeline_stable_id = row[col.index('pipeline_stable_id')].strip()
procedure_stable_id = row[col.index('procedure_stable_id')].strip()
procedure_name = row[col.index('procedure_name')].strip()
parameter_stable_id = row[col.index('parameter_stable_id')].strip()
parameter_name = row[col.index('parameter_name')].strip()
# top_level_mp_term_id = row[col.index('top_level_mp_term_id')]
# top_level_mp_term_name = row[col.index('top_level_mp_term_name')]
mp_term_id = row[col.index('mp_term_id')].strip()
mp_term_name = row[col.index('mp_term_name')].strip()
p_value = row[col.index('p_value')].strip()
percentage_change = row[col.index('percentage_change')].strip()
effect_size = row[col.index('effect_size')].strip()
statistical_method = row[col.index('statistical_method')].strip()
resource_name = row[col.index('resource_name')].strip()
if self.test_mode and marker_accession_id not in self.gene_ids:
continue
# ##### cleanup some of the identifiers ######
zygosity = zygosity.strip()
zygosity_id = self.resolve(zygosity)
if zygosity_id == zygosity:
LOG.warning(
"Zygosity '%s' unmapped. detting to indeterminate", zygosity)
zygosity_id = self.globaltt['indeterminate']
# colony ids sometimes have <> in them, spaces,
# or other non-alphanumerics and break our system;
# replace these with underscores
colony_id = '_:' + re.sub(r'\W+', '_', colony_raw)
if not re.match(r'MGI', allele_accession_id):
allele_accession_id = '_:IMPC-'+re.sub(
r':', '', allele_accession_id)
if re.search(r'EUROCURATE', strain_accession_id):
# the eurocurate links don't resolve at IMPC
# TODO blank nodes do not maintain identifiers
strain_accession_id = '_:' + strain_accession_id
elif not re.match(r'MGI', strain_accession_id):
LOG.info(
"Found a strange strain accession...%s", strain_accession_id)
strain_accession_id = 'IMPC:'+strain_accession_id
######################
# first, add the marker and variant to the graph as with MGI,
# the allele is the variant locus. IF the marker is not known,
# we will call it a sequence alteration. otherwise,
# we will create a BNode for the sequence alteration.
sequence_alteration_id = variant_locus_id = None
variant_locus_name = sequence_alteration_name = None
# extract out what's within the <> to get the symbol
if re.match(r'.*<.*>', allele_symbol):
sequence_alteration_name = re.match(
r'.*<(.*)>', allele_symbol)
if sequence_alteration_name is not None:
sequence_alteration_name = sequence_alteration_name.group(1)
else:
sequence_alteration_name = allele_symbol
if marker_accession_id is not None and marker_accession_id == '':
LOG.warning("Marker unspecified on row %d", reader.line_num)
marker_accession_id = None
if marker_accession_id is not None:
variant_locus_id = allele_accession_id
variant_locus_name = allele_symbol
variant_locus_type = self.globaltt['variant_locus']
geno.addGene(
marker_accession_id, marker_symbol, self.globaltt['gene'])
geno.addAllele(
variant_locus_id, variant_locus_name, variant_locus_type, None)
geno.addAlleleOfGene(variant_locus_id, marker_accession_id)
# TAG bnode
sequence_alteration_id = '_:seqalt' + re.sub(
r':', '', allele_accession_id)
geno.addSequenceAlterationToVariantLocus(
sequence_alteration_id, variant_locus_id)
else:
sequence_alteration_id = allele_accession_id
# IMPC contains targeted mutations with either gene traps,
# knockouts, insertion/intragenic deletions.
# but I don't really know what the SeqAlt is here,
# so I don't add it.
geno.addSequenceAlteration(
sequence_alteration_id, sequence_alteration_name)
# ############# BUILD THE COLONY #############
# First, let's describe the colony that the animals come from
# The Colony ID refers to the ES cell clone
# used to generate a mouse strain.
# <NAME>: we use this clone ID to track
# ES cell -> mouse strain -> mouse phenotyping.
# The same ES clone maybe used at multiple centers,
# so we have to concatenate the two to have a unique ID.
# some useful reading about generating mice from ES cells:
# http://ki.mit.edu/sbc/escell/services/details
# here, we'll make a genotype
# that derives from an ES cell with a given allele.
# the strain is not really attached to the colony.
# the colony/clone is reflective of the allele, with unknown zygosity
stem_cell_class = self.globaltt['embryonic stem cell line']
if colony_id is None:
print(colony_raw, stem_cell_class, "\nline:\t", reader.line_num)
model.addIndividualToGraph(colony_id, colony_raw, stem_cell_class)
# vslc of the colony has unknown zygosity
# note that we will define the allele
# (and it's relationship to the marker, etc.) later
# FIXME is it really necessary to create this vslc
# when we always know it's unknown zygosity?
vslc_colony = '_:'+re.sub(
r':', '', allele_accession_id + self.globaltt['indeterminate'])
vslc_colony_label = allele_symbol + '/<?>'
# for ease of reading, we make the colony genotype variables.
# in the future, it might be desired to keep the vslcs
colony_genotype_id = vslc_colony
colony_genotype_label = vslc_colony_label
geno.addGenotype(colony_genotype_id, colony_genotype_label)
geno.addParts(
allele_accession_id, colony_genotype_id,
self.globaltt['has_variant_part'])
geno.addPartsToVSLC(
vslc_colony, allele_accession_id, None,
self.globaltt['indeterminate'], self.globaltt['has_variant_part'])
graph.addTriple(
colony_id, self.globaltt['has_genotype'], colony_genotype_id)
# ########## BUILD THE ANNOTATED GENOTYPE ##########
# now, we'll build the genotype of the individual that derives
# from the colony/clone genotype that is attached to
# phenotype = colony_id + strain + zygosity + sex
# (and is derived from a colony)
# this is a sex-agnostic genotype
genotype_id = self.make_id(
(colony_id + phenotyping_center + zygosity + strain_accession_id))
geno.addSequenceDerivesFrom(genotype_id, colony_id)
# build the VSLC of the sex-agnostic genotype
# based on the zygosity
allele1_id = allele_accession_id
allele2_id = allele2_rel = None
allele1_label = allele_symbol
allele2_label = '<?>'
# Making VSLC labels from the various parts,
# can change later if desired.
if zygosity == 'heterozygote':
allele2_label = re.sub(r'<.*', '<+>', allele1_label)
allele2_id = None
elif zygosity == 'homozygote':
allele2_label = allele1_label
allele2_id = allele1_id
allele2_rel = self.globaltt['has_variant_part']
elif zygosity == 'hemizygote':
allele2_label = re.sub(r'<.*', '<0>', allele1_label)
allele2_id = None
elif zygosity == 'not_applicable':
allele2_label = re.sub(r'<.*', '<?>', allele1_label)
allele2_id = None
else:
LOG.warning("found unknown zygosity %s", zygosity)
break
vslc_name = '/'.join((allele1_label, allele2_label))
# Add the VSLC
vslc_id = '-'.join(
(marker_accession_id, allele_accession_id, zygosity))
vslc_id = re.sub(r':', '', vslc_id)
vslc_id = '_:'+vslc_id
model.addIndividualToGraph(
vslc_id, vslc_name,
self.globaltt['variant single locus complement'])
geno.addPartsToVSLC(
vslc_id, allele1_id, allele2_id, zygosity_id,
self.globaltt['has_variant_part'], allele2_rel)
# add vslc to genotype
geno.addVSLCtoParent(vslc_id, genotype_id)
# note that the vslc is also the gvc
model.addType(vslc_id, self.globaltt['genomic_variation_complement'])
# Add the genomic background
# create the genomic background id and name
if strain_accession_id != '':
genomic_background_id = strain_accession_id
else:
genomic_background_id = None
genotype_name = vslc_name
if genomic_background_id is not None:
geno.addGenotype(
genomic_background_id, strain_name,
self.globaltt['genomic_background'])
# make a phenotyping-center-specific strain
# to use as the background
pheno_center_strain_label = strain_name + '-' + phenotyping_center \
+ '-' + colony_raw
pheno_center_strain_id = '-'.join((
re.sub(r':', '', genomic_background_id),
re.sub(r'\s', '_', phenotyping_center),
re.sub(r'\W+', '', colony_raw)))
if not re.match(r'^_', pheno_center_strain_id):
# Tag bnode
pheno_center_strain_id = '_:' + pheno_center_strain_id
geno.addGenotype(
pheno_center_strain_id, pheno_center_strain_label,
self.globaltt['genomic_background'])
geno.addSequenceDerivesFrom(
pheno_center_strain_id, genomic_background_id)
# Making genotype labels from the various parts,
# can change later if desired.
# since the genotype is reflective of the place
# it got made, should put that in to disambiguate
genotype_name = \
genotype_name + ' [' + pheno_center_strain_label + ']'
geno.addGenomicBackgroundToGenotype(
pheno_center_strain_id, genotype_id)
geno.addTaxon(taxon_id, pheno_center_strain_id)
# this is redundant, but i'll keep in in for now
geno.addSequenceDerivesFrom(genotype_id, colony_id)
geno.addGenotype(genotype_id, genotype_name)
# Make the sex-qualified genotype,
# which is what the phenotype is associated with
sex_qualified_genotype_id = \
self.make_id((
colony_id + phenotyping_center + zygosity +
strain_accession_id + sex))
sex_qualified_genotype_label = genotype_name + ' (' + sex + ')'
sq_type_id = self.resolve(sex, False)
if sq_type_id == sex:
sq_type_id = self.globaltt['intrinsic_genotype']
LOG.warning(
"Unknown sex qualifier %s, adding as intrinsic_genotype",
sex)
geno.addGenotype(
sex_qualified_genotype_id, sex_qualified_genotype_label, sq_type_id)
geno.addParts(
genotype_id, sex_qualified_genotype_id,
self.globaltt['has_variant_part'])
if genomic_background_id is not None and genomic_background_id != '':
# Add the taxon to the genomic_background_id
geno.addTaxon(taxon_id, genomic_background_id)
else:
# add it as the genomic background
geno.addTaxon(taxon_id, genotype_id)
# ############# BUILD THE G2P ASSOC #############
# from an old email dated July 23 2014:
# Phenotypes associations are made to
# imits colony_id+center+zygosity+gender
# sometimes phenotype ids are missing. (about 711 early 2020)
if mp_term_id is None or mp_term_id == '':
LOG.warning(
"No phenotype id specified for row %d", reader.line_num)
continue
# hard coded ECO code
eco_id = self.globaltt['mutant phenotype evidence']
# the association comes as a result of a g2p from
# a procedure in a pipeline at a center and parameter tested
assoc = G2PAssoc(
graph, self.name, sex_qualified_genotype_id, mp_term_id)
assoc.add_evidence(eco_id)
# assoc.set_score(float(p_value))
# TODO add evidence instance using
# pipeline_stable_id +
# procedure_stable_id +
# parameter_stable_id
assoc.add_association_to_graph()
assoc_id = assoc.get_association_id()
model._addSexSpecificity(assoc_id, self.resolve(sex))
# add a free-text description
try:
description = ' '.join((
mp_term_name, 'phenotype determined by', phenotyping_center,
'in an', procedure_name, 'assay where', parameter_name.strip(),
'was measured with an effect_size of',
str(round(float(effect_size), 5)),
'(p =', "{:.4e}".format(float(p_value)), ').'))
except ValueError:
description = ' '.join((
mp_term_name, 'phenotype determined by', phenotyping_center,
'in an', procedure_name, 'assay where', parameter_name.strip(),
'was measured with an effect_size of', str(effect_size),
'(p =', "{0}".format(p_value), ').'))
study_bnode = self._add_study_provenance(
phenotyping_center, colony_raw, project_fullname, pipeline_name,
pipeline_stable_id, procedure_stable_id, procedure_name,
parameter_stable_id, parameter_name, statistical_method,
resource_name)
evidence_line_bnode = self._add_evidence(
assoc_id, eco_id, p_value, percentage_change, effect_size,
study_bnode)
self._add_assertion_provenance(assoc_id, evidence_line_bnode)
model.addDescription(evidence_line_bnode, description)
# resource_id = resource_name
# assoc.addSource(graph, assoc_id, resource_id)
if not self.test_mode and limit is not None and reader.line_num > limit:
break
def _add_assertion_provenance(
self,
assoc_id,
evidence_line_bnode
):
"""
Add assertion level provenance, currently always IMPC
:param assoc_id:
:param evidence_line_bnode:
:return:
"""
provenance_model = Provenance(self.graph)
model = Model(self.graph)
assertion_bnode = self.make_id(
"assertion{0}{1}".format(assoc_id, self.localtt['IMPC']), '_')
model.addIndividualToGraph(assertion_bnode, None, self.globaltt['assertion'])
provenance_model.add_assertion(
assertion_bnode, self.localtt['IMPC'],
'International Mouse Phenotyping Consortium')
self.graph.addTriple(
assoc_id, self.globaltt['is_asserted_in'], assertion_bnode)
self.graph.addTriple(
assertion_bnode,
self.globaltt['is_assertion_supported_by_evidence'], # "SEPIO:0000111"
evidence_line_bnode)
def _add_study_provenance(
self,
phenotyping_center,
colony,
project_fullname,
pipeline_name,
pipeline_stable_id,
procedure_stable_id,
procedure_name,
parameter_stable_id,
parameter_name,
statistical_method,
resource_name
):
"""
:param phenotyping_center: str, from self.files['all']
:param colony: str, from self.files['all']
:param project_fullname: str, from self.files['all']
:param pipeline_name: str, from self.files['all']
:param pipeline_stable_id: str, from self.files['all']
:param procedure_stable_id: str, from self.files['all']
:param procedure_name: str, from self.files['all']
:param parameter_stable_id: str, from self.files['all']
:param parameter_name: str, from self.files['all']
:param statistical_method: str, from self.files['all']
:param resource_name: str, from self.files['all']
:return: study bnode
"""
provenance_model = Provenance(self.graph)
model = Model(self.graph)
# Add provenance
# A study is a blank node equal to its parts
study_bnode = self.make_id("{0}{1}{2}{3}{4}{5}{6}{7}".format(
phenotyping_center, colony, project_fullname, pipeline_stable_id,
procedure_stable_id, parameter_stable_id, statistical_method,
resource_name), '_')
model.addIndividualToGraph(
study_bnode, None, self.globaltt['study'])
# List of nodes linked to study with has_part property
study_parts = []
# Add study parts
if procedure_stable_id in self.localtt:
procedure_stable_id2 = self.localtt[procedure_stable_id]
else:
procedure_stable_id2 = self.resolve(procedure_stable_id)
model.addIndividualToGraph(procedure_stable_id2, procedure_name)
study_parts.append(procedure_stable_id2)
study_parts.append(self.resolve(statistical_method))
provenance_model.add_study_parts(study_bnode, study_parts)
# Add parameter/measure statement: study measures parameter
parameter_label = "{0} ({1})".format(parameter_name, procedure_name)
logging.info("Adding Provenance for %s", project_fullname)
pram_id = self.resolve(parameter_stable_id)
model.addIndividualToGraph(pram_id, parameter_label)
provenance_model.add_study_measure(study_bnode, pram_id)
# Add Colony
colony_bnode = self.make_id("{0}".format(colony), '_')
model.addIndividualToGraph(colony_bnode, colony)
# Add study agent
phenotyping_center_id = self.localtt[phenotyping_center]
model.addIndividualToGraph(
phenotyping_center_id,
phenotyping_center,
self.globaltt['organization'])
# self.graph
model.addTriple(
study_bnode, self.globaltt['has_agent'], phenotyping_center_id)
if pipeline_stable_id in self.localtt:
pipeline_stable_id2 = self.localtt[pipeline_stable_id]
else:
pipeline_stable_id2 = self.resolve(pipeline_stable_id)
# add pipeline and project
model.addIndividualToGraph(pipeline_stable_id2, pipeline_name)
# self.graph
model.addTriple(study_bnode, self.globaltt['part_of'], pipeline_stable_id2)
if project_fullname in self.localtt:
project_fullname_id = self.localtt[project_fullname]
else:
project_fullname_id = self.resolve(project_fullname)
model.addIndividualToGraph(
project_fullname_id, project_fullname, self.globaltt['project'])
# self.graph
model.addTriple(study_bnode, self.globaltt['part_of'], project_fullname_id)
return study_bnode
def _add_evidence(
self,
assoc_id,
eco_id,
p_value,
percentage_change,
effect_size,
study_bnode
):
"""
:param assoc_id: assoc curie used to reify a
genotype to phenotype association, generated in _process_data()
:param eco_id: eco_id as curie, hardcoded in _process_data()
:param p_value: str, from self.files['all']
:param percentage_change: str, from self.files['all']
:param effect_size: str, from self.files['all']
:param study_bnode: str, from self.files['all']
:param phenotyping_center: str, from self.files['all']
:return: str, evidence_line_bnode as curie
"""
evidence_model = Evidence(self.graph, assoc_id)
provenance_model = Provenance(self.graph)
model = Model(self.graph)
# Add line of evidence
evidence_line_bnode = self.make_id(
"{0}{1}".format(assoc_id, study_bnode), '_')
evidence_model.add_supporting_evidence(evidence_line_bnode)
model.addIndividualToGraph(evidence_line_bnode, None, eco_id)
# Add supporting measurements to line of evidence
measurements = {}
if p_value is not None or p_value != "":
p_value_bnode = self.make_id(
"{0}{1}{2}".format(evidence_line_bnode, 'p_value', p_value), '_')
model.addIndividualToGraph(p_value_bnode, None, self.globaltt['p-value'])
try:
measurements[p_value_bnode] = float(p_value)
except ValueError:
measurements[p_value_bnode] = p_value
if percentage_change is not None and percentage_change != '':
fold_change_bnode = self.make_id(
"{0}{1}{2}".format(
evidence_line_bnode, 'percentage_change', percentage_change), '_')
model.addIndividualToGraph(
fold_change_bnode, None, self.resolve('percentage_change'))
measurements[fold_change_bnode] = percentage_change
if effect_size is not None or effect_size != "":
fold_change_bnode = self.make_id(
"{0}{1}{2}".format(
evidence_line_bnode, 'effect_size', effect_size), '_')
model.addIndividualToGraph(
fold_change_bnode, None, self.globaltt['effect size estimate'])
measurements[fold_change_bnode] = effect_size
evidence_model.add_supporting_data(evidence_line_bnode, measurements)
# Link evidence to provenance by connecting to study node
provenance_model.add_study_to_measurements(study_bnode, measurements.keys())
self.graph.addTriple(
evidence_line_bnode, self.globaltt['has_supporting_activity'],
study_bnode)
return evidence_line_bnode
def parse_checksum_file(self, file):
"""
:param file
:return dict
"""
checksums = dict()
file_path = '/'.join((self.rawdir, file))
col = ['checksum', 'whitespace', 'file_name']
with open(file_path, 'rt') as tsvfile:
reader = csv.reader(tsvfile, delimiter=' ')
for row in reader:
checksums[row[col.index('checksum')]] = row[col.index('file_name')]
return checksums
def compare_checksums(self):
"""
test to see if fetched file matches checksum from ebi
:return: True or False
"""
is_match = True
reference_checksums = self.parse_checksum_file(
self.files['checksum']['file'])
for md5, file in reference_checksums.items():
if os.path.isfile('/'.join((self.rawdir, file))):
if self.get_file_md5(self.rawdir, file) != md5:
is_match = False
LOG.warning('%s was not downloaded completely', file)
return is_match
return is_match
def getTestSuite(self):
import unittest
from tests.test_impc import EvidenceProvenanceTestCase
# TODO test genotypes
test_suite = unittest.TestLoader().loadTestsFromTestCase(
EvidenceProvenanceTestCase)
return test_suite
| StarcoderdataPython |
134158 | from os.path import join
import torchvision.datasets as datasets
__DATASETS_DEFAULT_PATH = '/media/ssd/Datasets/'
def get_dataset(name, train, transform, target_transform=None, download=True, datasets_path=__DATASETS_DEFAULT_PATH):
root = datasets_path # '/mnt/ssd/ImageNet/ILSVRC/Data/CLS-LOC' #os.path.join(datasets_path, name)
if name == 'cifar10':
cifar_ = datasets.CIFAR10(root=root, train=train, transform=transform, target_transform=target_transform, download=download)
return cifar_
elif name == 'cifar100':
cifar_ = datasets.CIFAR100(root=root, train=train, transform=transform, target_transform=target_transform, download=download)
return cifar_
elif name == 'imagenet':
if train:
root = join(root, 'train')
else:
root = join(root, 'val')
return datasets.ImageFolder(root=root, transform=transform, target_transform=target_transform)
| StarcoderdataPython |
3347236 | <gh_stars>1-10
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains some utility functions for big integers."""
def num_to_bytes(n: int) -> bytes:
"""Converts a non-negative n into an unsigned big integer in big-endian."""
if n < 0:
raise OverflowError("number can't be negative")
if n == 0:
return b'\x00'
return n.to_bytes((n.bit_length() + 7) // 8, byteorder='big')
| StarcoderdataPython |
11295839 | from django.urls import path
from rest_framework_simplejwt.views import (
TokenRefreshView,
)
from .views import (
RegisterAPIView,
LoginAPIView
)
urlpatterns = [
path('register', RegisterAPIView.as_view(), name='register'),
path('login', LoginAPIView.as_view(), name='login'),
path('refresh-token', TokenRefreshView.as_view(), name='refresh-token'),
]
| StarcoderdataPython |
6563052 | import olexparser.convert as convert
class TurTurSegmentSummary:
"""
A Class representing a Tur Tur Segment Summary.
Each Tur Tur in the Turdata file lists 1 or more segment files associated to it.
A summary of the segment file is also stored in the Turdata file.
.. note::
These summaries may contain the only remaining data as the actual segment file may be empty.
Further analysis is needed to determine how a segment file becomes empty (0 bytes).
Each Segment Summary contains the following:
1. The segment number.
2. The number of 16 byte entries in the segment file.
3. The smallest latitude value found in the file.
4. The smallest longitude value found in the file.
5. The largest latitude value found in the file.
6. The largest longitude value found in the file.
7. The smallest unix timestamp value found in the file.
8. The largest unix timestamp value found in the file.
:param seg_num: A number identifying the related Segment filename. ie. if the segment number is 83 the filename
will be "segment83_A".
:type seg_num: int
:param num_entries: The number of entries in the segment file. Each entry is 16 bytes in length. This number
can be used to determine the expected file size of the segment.
:type num_entries: int
:param smallest_lat: An "Olex float" containing the smallest Latitude position in the segment file.
:type smallest_lat: float
:param smallest_long: An "Olex float" containing the smallest Latitude position in the segment file.
:type smallest_long: float
:param largest_lat: An "Olex float" containing the largest Latitude position in the segment file.
:type largest_lat: float
:param largest_long: An "Olex float" containing the largest Longitude position in the segment file.
:type largest_long: float
:param smallest_time: A Unix Timestamp of the first entry in the segment file.
:type smallest_time: int
:param largest_time: A Unix Timestamp of the last entry in the segment file.
:type largest_time: int
.. todo:: Determine how a segment file becomes 0 bytes
"""
def __init__(self, seg_num, num_entries, smallest_lat, smallest_long, largest_lat, largest_long, smallest_time,
largest_time):
"""A constructor method for the TurTurSegmentSummary.
:param seg_num: A number identifying the related Segment filename. i.e. if the segment number is 83 the
filename will be "segment83_A".
:type seg_num: int
:param num_entries: The number of entries in the segment file. Each entry is 16 bytes in length. This number
can be used to determine the expected file size of the segment.
:type num_entries: int
:param smallest_lat: An "Olex float" containing the smallest Latitude position in the segment file.
:type smallest_lat: float
:param smallest_long: An "Olex float" containing the smallest Latitude position in the segment file.
:type smallest_long: float
:param largest_lat: An "Olex float" containing the largest Latitude position in the segment file.
:type largest_lat: float
:param largest_long: An "Olex float" containing the largest Longitude position in the segment file.
:type largest_long: float
:param smallest_time: A Unix Timestamp of the first entry in the segment file.
:type smallest_time: int
:param largest_time: A Unix Timestamp of the last entry in the segment file.
:type largest_time: int
"""
self.warnings = []
self.seg_num = seg_num
self.num_entries = num_entries
self.smallest_lat = smallest_lat
self.smallest_long = smallest_long
self.largest_lat = largest_lat
self.largest_long = largest_long
self.smallest_time = smallest_time
self.largest_time = largest_time
return
def __str__(self):
"""A description of the contents of the TurTurSegmentSummary.
:return: A description of the contents of the TurTurSegmentSummary
:rtype: str
"""
s = "\nSegment number: {}".format(self.seg_num)
s = s + "\nNumber of entries in the segment file: {}".format(self.num_entries)
s = s + "\nSmallest Latitude float: {} Starting Latitude coordinate: {}".format(
self.smallest_lat, convert.get_lat_dmm(self.smallest_lat))
s = s + "\nSmallest Longitude float: {} Starting Longitude coordinate: {}".format(
self.smallest_long, convert.get_long_dmm(self.smallest_long))
s = s + "\nLargest Latitude float: {} End Latitude coordinate: {}".format(
self.largest_lat, convert.get_lat_dmm(self.largest_lat))
s = s + "\nLargest Longitude float: {} End Longitude coordinate: {}".format(
self.largest_long, convert.get_long_dmm(self.largest_long))
s = s + "\nSmallest Unix Timestamp: {} Starting time UTC: {}".format(
self.smallest_time, convert.get_timestamp_str_from_int(self.smallest_time))
s = s + "\nLargest Unix Timestamp: {} End time UTC: {}".format(
self.largest_time, convert.get_timestamp_str_from_int(self.largest_time))
return s
def get_warnings(self):
"""
:return: a list of warnings generated by the TurTurSegmentSummary
:rtype: list
"""
return self.warnings.copy()
def print_segment_summary(self):
"""Prints a description of the contents of the TurTurSegmentSummary.
"""
print()
print("Segment number: {}".format(self.seg_num))
print("Number of entries in the segment file: {}".format(self.num_entries))
print("Smallest Latitude float: {} Starting Latitude coordinate: {}".format(
self.smallest_lat, convert.get_lat_dmm(self.smallest_lat)))
print("Smallest Longitude float: {} Starting Longitude coordinate: {}".format(
self.smallest_long, convert.get_long_dmm(self.smallest_long)))
print("Largest Latitude float: {} End Latitude coordinate: {}".format(
self.largest_lat, convert.get_lat_dmm(self.largest_lat)))
print("Largest Longitude float: {} End Longitude coordinate: {}".format(
self.largest_long, convert.get_long_dmm(self.largest_long)))
print("Smallest Unix Timestamp: {} Starting time UTC: {}".format(
self.smallest_time, convert.get_timestamp_str_from_int(self.smallest_time)))
print("Largest Unix Timestamp: {} End time UTC: {}".format(
self.largest_time, convert.get_timestamp_str_from_int(self.largest_time)))
print()
return
def get_seg_num(self):
"""
:return: the segment number
:rtype: int
"""
return self.seg_num
def get_entries_num(self):
"""
:return: the number of entries in the segment file
:rtype: int
"""
return self.num_entries
def get_lat_start_float(self):
"""
:return: the "OLEX" float representing smallest latitude value
:rtype: float
"""
return self.smallest_lat
def get_long_start_float(self):
"""
:return: the "OLEX" float representing the smallest longitude value
:rtype: float
"""
return self.smallest_long
def get_lat_end_float(self):
"""
:return: the "OLEX" float representing the largest latitude value
:rtype: float
"""
return self.largest_lat
def get_long_end_float(self):
"""
:return: the "OLEX" float representing the largest longitude value
:rtype: float
"""
return self.largest_long
def get_time_start_int(self):
"""
:return: the Unix Timestamp integer representing the smallest time value.
:rtype: int
"""
return self.smallest_time
def get_time_end_int(self):
"""
:return: the Unix Timestamp integer representing the largest time value.
:rtype: int
"""
return self.largest_time
| StarcoderdataPython |
6628038 | from binding import *
from ..namespace import llvm
from ..ADT.StringRef import StringRef
from ..Module import Module
from ..LLVMContext import LLVMContext
llvm.includes.add('llvm/Bitcode/ReaderWriter.h')
ParseBitCodeFile = llvm.CustomFunction('ParseBitCodeFile',
'llvm_ParseBitCodeFile',
PyObjectPtr, # returns Module*
cast(bytes, StringRef),
ref(LLVMContext),
PyObjectPtr, # file-like object
).require_only(2)
WriteBitcodeToFile = llvm.CustomFunction('WriteBitcodeToFile',
'llvm_WriteBitcodeToFile',
PyObjectPtr, # return None
ptr(Module),
PyObjectPtr, # file-like object
)
getBitcodeTargetTriple = llvm.CustomFunction('getBitcodeTargetTriple',
'llvm_getBitcodeTargetTriple',
PyObjectPtr, # return str
cast(str, StringRef),
ref(LLVMContext),
PyObjectPtr, # file-like object
).require_only(2)
| StarcoderdataPython |
6483 | <gh_stars>0
#!/usr/bin/python
# mp4museum.org by <NAME> 2019
import os
import sys
import glob
from subprocess import Popen, PIPE
import RPi.GPIO as GPIO
FNULL = open(os.devnull, "w")
# setup GPIO pin
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
GPIO.setup(13, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
# functions to be called by event listener
def buttonPause(channel):
player.stdin.write("p")
def buttonNext(channel):
player.stdin.write("q")
# add event listener
GPIO.add_event_detect(11, GPIO.FALLING, callback = buttonPause, bouncetime = 234)
GPIO.add_event_detect(13, GPIO.FALLING, callback = buttonNext, bouncetime = 1234)
# please do not remove my logo screen
player = Popen(['omxplayer', '--adev', 'both', '/home/pi/mp4museum.mp4'],stdin=PIPE,stdout=FNULL)
player.wait()
# the loop
while(1):
for files in sorted(glob.glob(r'/media/*/*.mp4')):
player = Popen(['omxplayer','--adev', 'both',files],stdin=PIPE,stdout=FNULL)
player.wait()
| StarcoderdataPython |
260105 | <reponame>Mauricio1xtra/python
# 🚨 Don't change the code below 👇
height = input("enter your height in m: ")
weight = input("enter your weight in kg: ")
# 🚨 Don't change the code above 👆
#Write your code below this line 👇
bmi = int(weight) / float(height) ** 2
bmi_as_int = int(bmi)
print(bmi_as_int)
#!Or
weight_as_int = int(weight)
height_as_float = float(height)
#TODO: Using the exponent operator
bmi = weight_as_int / height_as_float ** 2
#TODO: or using multiplication and PEMDAS
bmi = weight_as_int / (height_as_float * height_as_float) | StarcoderdataPython |
6631384 | <filename>alpyro_msgs/actionlib_tutorials/averagingresult.py<gh_stars>1-10
from alpyro_msgs import RosMessage, float32
class AveragingResult(RosMessage):
__msg_typ__ = "actionlib_tutorials/AveragingResult"
__msg_def__ = "ZmxvYXQzMiBtZWFuCmZsb2F0MzIgc3RkX2RldgoK"
__md5_sum__ = "d5c7decf6df75ffb4367a05c1bcc7612"
mean: float32
std_dev: float32
| StarcoderdataPython |
11243294 | # Generated by Django 4.0.2 on 2022-02-18 16:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('codiotix', '0004_alter_episode_video_alter_movie_image_and_more'),
]
operations = [
migrations.AddField(
model_name='webserie',
name='cast',
field=models.ManyToManyField(to='codiotix.Cast'),
),
]
| StarcoderdataPython |
11216232 | <filename>ritter.py
#!/usr/bin/env python3
from ritter.ritter import Ritter
ritter = Ritter()
ritter.start()
| StarcoderdataPython |
9771753 | import numpy as np
from mandlebrot import mandelbrot
def test_mandelbrot_small():
x = np.linspace(-2.25, 0.75, 10)
y = np.linspace(-1.25, 1.25, 10)
output = mandelbrot(x, y, 100, False)
assert output.shape == (10, 10) | StarcoderdataPython |
9669621 | <gh_stars>0
__description__ = \
"""
Class for displaying art on LED panels.
"""
__author__ = "<NAME>"
__date__ = "2017-01-01"
__all__ = ["display","art","sensors"]
from .art import ArtInstallation
from .display import Panel, Display
| StarcoderdataPython |
12801124 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the PyReach Logger interface."""
import logging # type: ignore
import queue # pylint: disable=unused-import
import threading
from typing import Callable, Dict, Optional, Tuple
from pyreach import core
from pyreach import logger
from pyreach.common.python import types_gen
from pyreach.impl import requester
from pyreach.impl import snapshot_impl
from pyreach.impl import thread_util
from pyreach.impl import utils
from pyreach.snapshot import Snapshot
class LoggerDevice(requester.Requester[core.PyReachStatus]):
"""Device for logger."""
_time_lock: threading.Lock
_start_times: Dict[str, int]
_task_time_lock: threading.Lock
_task_start_ts: Optional[int]
_task_end_ts: int
_task_state_closed: bool
_task_state: logger.TaskState
_task_state_queue: "queue.Queue[Optional[logger.TaskState]]"
_task_state_callback_manager: "thread_util.CallbackManager[logger.TaskState]"
def __init__(self) -> None:
"""Construct a logger."""
super().__init__()
self._time_lock = threading.Lock()
self._task_time_lock = threading.Lock()
self._start_times = {}
self._task_start_ts = None
self._task_end_ts = 0
self._task_state_closed = False
self._task_state = logger.TaskState.UNKNOWN
self._task_state_queue = queue.Queue()
self._task_state_callback_manager = thread_util.CallbackManager()
def _task_state_update_thread(self) -> None:
while True:
state = self._task_state_queue.get(block=True, timeout=None)
if state is None:
self._task_state_callback_manager.close()
return
self._task_state_callback_manager.call(state)
@property
def task_state(self) -> logger.TaskState:
"""Get the current task state."""
with self._task_time_lock:
return self._task_state
def add_task_state_update_callback(
self,
callback: Callable[[logger.TaskState], bool],
finished_callback: Optional[Callable[[],
None]] = None) -> Callable[[], None]:
"""Add a callback for the task state.
Args:
callback: Callback called when a new task state arrives. The callback
function should return False for continuous state update. When the
callback function returns True, it will stop receiving future updates.
finished_callback: Optional callback, called when the callback is stopped
or if the host is closed.
Returns:
A function that when called stops the callback.
"""
with self._task_time_lock:
return self._task_state_callback_manager.add_callback(
callback, finished_callback)
def start(self) -> None:
"""Start the logger device."""
super().start()
self.run(self._task_state_update_thread)
def close(self) -> None:
"""Close the logger device."""
with self._task_time_lock:
if not self._task_state_closed:
self._task_state_closed = True
self._task_state_queue.put(None)
super().close()
def get_message_supplement(
self, msg: types_gen.DeviceData) -> Optional[core.PyReachStatus]:
"""Get additional message."""
if (msg.device_type == "client-annotation" and not msg.device_name and
msg.data_type == "cmd-status"):
return utils.pyreach_status_from_message(msg)
if (msg.device_type == "server" and not msg.device_name and
msg.data_type == "metric" and msg.metric_value):
if msg.metric_value.key == "operator/task_start":
with self._task_time_lock:
self._task_state = logger.TaskState.TASK_STARTED
self._task_state_queue.put(self._task_state)
if msg.metric_value.key == "operator/task_end_seconds":
with self._task_time_lock:
self._task_state = logger.TaskState.TASK_ENDED
self._task_state_queue.put(self._task_state)
return None
def get_wrapper(self) -> Tuple["LoggerDevice", "logger.Logger"]:
"""Get wrapper for the device that should be shown to the user."""
return self, LoggerImpl(self)
def start_annotation_interval(
self,
name: str,
log_channel_id: str = ""
) -> ("queue.Queue[Optional[Tuple[types_gen.DeviceData, "
"Optional[core.PyReachStatus]]]]"):
"""Generate an event marking the start of an interval.
Args:
name: name of the interval.
log_channel_id: log channel of the interval.
Returns:
A queue of response.
"""
with self._time_lock:
self._start_times[log_channel_id] = utils.timestamp_now()
return self.send_tagged_request(
types_gen.CommandData(
ts=utils.timestamp_now(),
tag=utils.generate_tag(),
device_type="client-annotation",
data_type="client-annotation",
client_annotation=types_gen.ClientAnnotation(
interval_start=types_gen.IntervalStart(name=name))),
timeout=30)
def end_annotation_interval(
self,
name: str,
log_channel_id: str = "",
start_time: Optional[float] = None,
end_time: Optional[float] = None
) -> ("queue.Queue[Optional[Tuple[types_gen.DeviceData, "
"Optional[core.PyReachStatus]]]]"):
"""Generate an event marking the end of an interval.
Args:
name: name of the interval.
log_channel_id: log channel of the interval.
start_time: the start time of the interval.
end_time: the end time of the interval.
Returns:
A queue of response.
"""
with self._time_lock:
if start_time is None:
start_timestamp = self._start_times.get(log_channel_id,
utils.timestamp_now())
else:
start_timestamp = utils.timestamp_at_time(start_time)
end_timestamp = utils.timestamp_now(
) if end_time is None else utils.timestamp_at_time(end_time)
return self.send_tagged_request(
types_gen.CommandData(
ts=utils.timestamp_now(),
tag=utils.generate_tag(),
device_type="client-annotation",
data_type="client-annotation",
client_annotation=types_gen.ClientAnnotation(
interval_end=types_gen.IntervalEnd(
name=name, start_ts=start_timestamp,
end_ts=end_timestamp))),
timeout=30)
def start_task(self, event_params: Dict[str, str]) -> None:
"""Start a task.
Args:
event_params: custom parameters of the event.
"""
start_ts = 0
with self._task_time_lock:
if self._task_state_closed:
return
if self._task_start_ts is not None:
raise core.PyReachError("start_task when task is already started")
start_ts = self._task_start_ts = utils.timestamp_now()
self._task_state = logger.TaskState.UNKNOWN
self._task_state_queue.put(self._task_state)
self.send_cmd(
types_gen.CommandData(
ts=start_ts,
device_type="operator",
data_type="event-start",
event_params=sorted([
types_gen.KeyValue(key=key, value=value)
for key, value in event_params.items()
],
key=lambda obj: obj.key)))
def end_task(self, event_params: Dict[str, str]) -> None:
"""End a task.
Args:
event_params: custom parameters of the event.
"""
end_ts = 0
event_duration = 0.0
with self._task_time_lock:
if self._task_state_closed:
return
if self._task_start_ts is None:
raise core.PyReachError("end_task when task is not yet started")
end_ts = self._task_end_ts = utils.timestamp_now()
event_duration = float(self._task_end_ts - self._task_start_ts) / 1e3
self._task_state = logger.TaskState.UNKNOWN
self._task_state_queue.put(self._task_state)
self.send_cmd(
types_gen.CommandData(
ts=end_ts,
device_type="operator",
data_type="event",
event_name="pick",
event_duration=event_duration,
event_params=sorted([
types_gen.KeyValue(key=key, value=value)
for key, value in event_params.items()
],
key=lambda obj: obj.key)))
with self._task_time_lock:
self._task_start_ts = None
def send_snapshot(self, snapshot: Snapshot) -> None:
"""Send a snapshot.
Args:
snapshot: The snapshot to send.
"""
self.send_cmd(
types_gen.CommandData(
ts=utils.timestamp_now(),
device_type="client-annotation",
data_type="client-annotation",
client_annotation=types_gen.ClientAnnotation(
snapshot_annotation=types_gen.SnapshotAnnotation()),
snapshot=snapshot_impl.convert_snapshot(snapshot)))
class LoggerImpl(logger.Logger):
"""A class for accessing logs."""
_device: LoggerDevice
def __init__(self, device: LoggerDevice) -> None:
"""Construct a logger implementation.
Args:
device: The device to log to.
"""
self._device = device
@property
def task_state(self) -> logger.TaskState:
"""Get the current task state."""
return self._device.task_state
def add_task_state_update_callback(
self,
callback: Callable[[logger.TaskState], bool],
finished_callback: Optional[Callable[[],
None]] = None) -> Callable[[], None]:
"""Add a callback for the task state.
Args:
callback: Callback called when a new task state arrives. The callback
function should return False for continuous state update. When the
callback function returns True, it will stop receiving future updates.
finished_callback: Optional callback, called when the callback is stopped
or if the host is closed.
Returns:
A function that when called stops the callback.
"""
return self._device.add_task_state_update_callback(callback,
finished_callback)
def wait_for_task_state(self,
state: logger.TaskState,
timeout: Optional[float] = None) -> bool:
"""Wait for a given task state.
Args:
state: the state to wait for.
timeout: optional timeout (in seconds) to wait for.
Returns:
True if the goal task state has been entered, false otherwise.
"""
q: "queue.Queue[None]" = queue.Queue()
found = False
def state_update(state_update: logger.TaskState) -> bool:
nonlocal found
found = found or (state_update == state)
return found
def finished() -> None:
q.put(None)
stop = self.add_task_state_update_callback(state_update, finished)
try:
q.get(block=True, timeout=timeout)
except queue.Empty:
pass
stop()
return found
def start_task(self, event_params: Dict[str, str]) -> None:
"""Start a task.
Args:
event_params: custom parameters of the event.
"""
self._device.start_task(event_params)
def end_task(self, event_params: Dict[str, str]) -> None:
"""End a task.
Args:
event_params: custom parameters of the event.
"""
self._device.end_task(event_params)
def send_snapshot(self, snapshot: Snapshot) -> None:
"""Send a snapshot.
Args:
snapshot: The snapshot to send.
"""
self._device.send_snapshot(snapshot)
def start_annotation_interval(self,
name: str,
log_channel_id: str = "") -> core.PyReachStatus:
"""Start an annotation interval.
Args:
name: Name of the annotation interval.
log_channel_id: channel ID of the log.
Returns:
Return status of the call.
"""
q = self._device.start_annotation_interval(name, log_channel_id)
msgs = thread_util.extract_all_from_queue(q)
if not msgs:
return core.PyReachStatus(
utils.timestamp_now(), status="rejected", error="timeout")
if len(msgs) != 1:
logging.warning("expected single message, got %d", len(msgs))
result = msgs[0][1]
if result is None:
return core.PyReachStatus(
utils.timestamp_now(), status="rejected", error="timeout")
return result
def async_start_annotation_interval(
self,
name: str,
log_channel_id: str = "",
callback: Optional[Callable[[core.PyReachStatus], None]] = None) -> None:
"""Generate a message that marks the start of an annotation interval.
Non-blocking.
Args:
name: the interval name.
log_channel_id: channel id for this log entry.
callback: callback when status is received.
"""
q = self._device.start_annotation_interval(name, log_channel_id)
self._device.queue_to_error_callback(q, callback, callback)
def end_annotation_interval(
self,
name: str,
log_channel_id: str = "",
start_time: Optional[float] = None,
end_time: Optional[float] = None) -> core.PyReachStatus:
"""Generate a message that marks the end of an annotation interval.
Non-blocking version.
Args:
name: Name of the interval.
log_channel_id: logging channel of the interval.
start_time: start time of the interval.
end_time: end time of the interval.
Returns:
Status of the command.
"""
q = self._device.end_annotation_interval(name, log_channel_id, start_time,
end_time)
msgs = thread_util.extract_all_from_queue(q)
if not msgs:
return core.PyReachStatus(
utils.timestamp_now(), status="rejected", error="timeout")
if len(msgs) != 1:
logging.warning("expected single message, got %d", len(msgs))
result = msgs[0][1]
if result is None:
return core.PyReachStatus(
utils.timestamp_now(), status="rejected", error="timeout")
return result
def async_end_annotation_interval(
self,
name: str,
log_channel_id: str = "",
start_time: Optional[float] = None,
end_time: Optional[float] = None,
callback: Optional[Callable[[core.PyReachStatus], None]] = None) -> None:
"""Generate a message that marks the end of an annotation interval.
Non-blocking version.
Args:
name: Name of the interval.
log_channel_id: logging channel of the interval.
start_time: start time of the interval.
end_time: end time of the interval.
callback: callback when the request is complete.
"""
q = self._device.end_annotation_interval(name, log_channel_id, start_time,
end_time)
self._device.queue_to_error_callback(q, callback, callback)
| StarcoderdataPython |
3387549 | <reponame>anuragpapineni/Hearthbreaker-evolved-agent<gh_stars>0
try:
import ctrnn # C++ extension
except ImportError:
print "CTRNN extension library not found!"
raise
def create_phenotype(chromo):
num_inputs = chromo.sensors
num_neurons = len(chromo.node_genes) - num_inputs
#num_outputs = chromo.actuators
network = ctrnn.CTRNN(num_inputs, num_neurons)
#network.set_rk4(0.01) # integration method
network.set_euler(0.01)
if chromo.node_genes[-1].activation_type == 'tanh':
network.set_logistic(False)
# create neurons
neuron_type = None
for ng in chromo.node_genes[num_inputs:]:
if ng.type == 'OUTPUT':
neuron_type = 1
else:
neuron_type = 0
#print 'Creating neuron: ', ng.id-num_inputs-1, ng.bias, ng.response, neuron_type
network.setNeuronParameters(ng.id-num_inputs-1, ng.bias, ng.response, neuron_type)
# create connections
for cg in chromo.conn_genes:
if cg.enabled:
if cg.innodeid-1 < num_inputs:
# set sensory input
network.set_sensory_weight(cg.innodeid-1, cg.outnodeid-num_inputs-1, cg.weight)
#print "Sensory: ", cg.innodeid-1, cg.outnodeid-num_inputs-1, cg.weight
else:
# set interneuron connection
network.SetConnectionWeight(cg.innodeid-num_inputs-1, cg.outnodeid-num_inputs-1, cg.weight)
#print "Inter..: ", cg.innodeid-num_inputs, cg.outnodeid-num_inputs-1, cg.weight
return network
if __name__ == "__main__":
# setting a network manually
network = ctrnn.CTRNN(0,2)
network.set_logistic(True)
network.set_euler(0.05) # integrate using Euler's method
#network.set_rk4(0.05)
network.setNeuronParameters(0, -2.75, 1.0, 1)
network.setNeuronParameters(1, -1.75, 1.0, 1)
network.set_neuron_state(0, -0.084000643)
network.set_neuron_state(1, -0.408035109)
network.SetConnectionWeight(0, 0, 4.5)
network.SetConnectionWeight(0, 1, -1.0)
network.SetConnectionWeight(1, 0, 1.0)
network.SetConnectionWeight(1, 1, 4.5)
print "%2.17f %2.17f" %(network.NeuronOutput(0), network.NeuronOutput(1))
for i in range(100000):
output = network.pactivate([])
print "%2.17f %2.17f" %(output[0], output[1])
| StarcoderdataPython |
9790467 | # Longest Substring Without Repeating Characters: https://leetcode.com/problems/longest-substring-without-repeating-characters/
# Given a string s, find the length of the longest substring without repeating characters.
# In order to solve this problem we can go through the list with a sliding windowand take a count of every char
# then we can simply reduce this down everytime that we have a char twice and then check for the longest answer
# while this is pretty much optimal there is a slight optimization you can make by keeping track
# of what index you saw the char at so you can quickly jump over and not have to manually remove all the chars
# up to that index you can just check if they are at a higher index
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
result = 0
left = 0
seen = set()
for right in range(len(s)):
while s[right] in seen:
seen.remove(s[left])
left += 1
result = max(result, right - left + 1)
seen.add(s[right])
return result
# This solution is super easy and actually runs in O(N) time could be O(1) if we are only considering
# the alphabet but could become as large as O(len(alphabet))
# Now this is pretty optimal but like I said you could make a small change to quickly jump until the
# next time you see the char and ignore any chars that are of a lower index
def lengthOfLongestSubstring(self, s: str) -> int:
if len(s) == 0:
return 0
result = 0
left = 0
seen = {}
for right in range(len(s)):
char = s[right]
# Basically move left if that char is still valid other wise
# keep the other left max that we have seen
if char in seen and seen[char] >= left and seen[char] < right:
left = seen[char] + 1
result = max(result, right - left + 1)
seen[char] = right
return result
# Score Card
# Did I need hints? Nope
# Did you finish within 30 min? 15
# Was the solution optimal? Yup
# Were there any bugs? None
# 5 5 5 5 = 5
| StarcoderdataPython |
5186278 | import re
import pytest
from dbpunctuator.utils import DEFAULT_ENGLISH_NER_MAPPING, NORMAL_TOKEN_TAG
from tests.common import cleaned_data, processed_data # noqa: F401
punctuations = list(DEFAULT_ENGLISH_NER_MAPPING.keys())
@pytest.mark.usefixtures("cleaned_data")
def test_data_cleaning(cleaned_data): # noqa: F811
checking_regex = r"\([^()]*\)"
for line in cleaned_data:
bracketed_texts = re.findall(checking_regex, line)
assert len(bracketed_texts) == 0
@pytest.mark.usefixtures("processed_data")
def test_training_data_generation(processed_data): # noqa: F811
for tokens, tags in zip(*processed_data):
last_token_is_punct = False
for token, tag in zip(tokens, tags):
assert not token.isdigit()
if last_token_is_punct:
assert token not in punctuations
if token in punctuations:
assert tag != NORMAL_TOKEN_TAG
last_token_is_punct = True
| StarcoderdataPython |
5105113 | <filename>egs/zeroth/s5/data/local/lm/buildLM/_scripts_/at_unicode.py
#
# Copyright 2017 Atlas Guide (Author : <NAME>)
#
# Apache 2.0
#
import unicodedata
import re
measureUnits = "".join(chr(i) for i in range(0xffff) if i >= 0x3380 and i<=0x33DD)
percents = ''.join(chr(i) for i in range(0xffff) \
if unicodedata.category(chr(i)) == 'Po' and re.search('PERCENT', unicodedata.name(chr(i))))
currencies = "".join(chr(i) for i in range(0xffff) if unicodedata.category(chr(i)) == 'Sc')
quatation = ''.join(chr(i) for i in range(0xffff) if unicodedata.category(chr(i)) in ['Pc', 'Pd', 'Pe', 'Pf', 'Pi',
'Po', 'Ps'] and re.search('QUOTATION', unicodedata.name(chr(i))))
apostrophe = ''.join(chr(i) for i in range(0xffff) if unicodedata.category(chr(i)) in ['Pc', 'Pd', 'Pe', 'Pf', 'Pi',
'Po', 'Ps'] and re.search('APOSTROPHE', unicodedata.name(chr(i))))
userDefines = "-~+=%/:;"
puctuations = ".,?!'"
triangles = ''.join(chr(i) for i in range(0xffff) if unicodedata.category(chr(i)) == 'So'
and re.search(' TRIANGLE\\b', unicodedata.name(chr(i))))
circles = ''.join(chr(i) for i in range(0xffff) if unicodedata.category(chr(i)) == 'So'
and re.search(' CIRCLE\\b', unicodedata.name(chr(i))))
squares = ''.join(chr(i) for i in range(0xffff) if unicodedata.category(chr(i)) == 'So'
and re.search(' SQUARE\\b', unicodedata.name(chr(i))))
separators = triangles + circles + squares
valids = measureUnits + percents + currencies + userDefines + puctuations
invalids_chars = r"[^ \n가-힣0-9a-zA-Z" + re.escape(valids) + r"]+"
valids_chars = r"[ \n가-힣0-9a-zA-Z" + re.escape(valids) + r"]+"
chinese = re.compile(u'[⺀-⺙⺛-⻳⼀-⿕々〇〡-〩〸-〺〻㐀-䶵一-鿃豈-鶴侮-頻並-龎]', re.UNICODE)
#3000-303F : punctuation
#3040-309F : hiragana
#30A0-30FF : katakana
#FF00-FFEF : Full-width roman + half-width katakana
#4E00-9FAF : Common and uncommon kanji
japanese = re.compile(u'[\u3040-\u309f\u30a0-\u30ff\uff00-\uffef\u4e00-\u9faf]', re.UNICODE)
userDefines_pronun={
'-': ['마이너스', '에서', '다시'],
'~': ['에서', '부터'],
'+': ['더하기', '플러스'],
#'=': ['는', '은', '이콜'],
'%': ['퍼센트', '프로', '퍼센티지'],
'/': ['나누기', '퍼', '슬래쉬'],
}
measureUnits_pronun = {
'㎀': ['피코 암페어'],
'㎁': ['나노 암페어'],
'㎂': ['마이크로 암페어'],
'㎃': ['밀리 암페어'],
'㎄': ['킬로 암페어'],
'㎅': ['킬로 바이트'],
'㎆': ['메가 바이트'],
'㎇': ['기가 바이트'],
'㎈': ['칼로리'],
'㎉': ['킬로 칼로리'],
'㎊': ['피코 페럿'],
'㎋': ['나노 페럿'],
'㎌': ['마이크로 페럿'],
'㎍': ['마이크로 그램'],
'㎎': ['밀리 그램'],
'㎏': ['킬로 그램'],
'㎐': ['헤르츠'],
'㎑': ['킬로 헤르츠'],
'㎒': ['메가 헤르츠'],
'㎓': ['기가 헤르츠'],
'㎔': ['킬로 헤르츠'],
'㎕': ['마이크로 리터'],
'㎖': ['밀리 리터'],
'㎗': ['데시 리터'],
'㎘': ['킬로 리터'],
'㎙': ['펨토 미터'],
'㎚': ['나노 미터'],
'㎛': ['마이크로 미터'],
'㎜': ['밀리 미터'],
'㎝': ['센티 미터'],
'㎞': ['킬로 미터'],
'㎟': ['제곱 밀리 미터'],
'㎠': ['제곱 센티 미터'],
'㎡': ['제곱 미터'],
'㎢': ['제곱 킬로 미터'],
'㎣': ['세 제곱 밀리 미터'],
'㎤': ['세 제곱 센티 미터'],
'㎥': ['세 제곱 미터'],
'㎦': ['세 제곱 킬로 미터'],
'㎧': ['미터 퍼 쎄크'],
'㎨': ['미터 퍼 제곱 쎄그'],
'㎩': ['파스칼'],
'㎪': ['킬로 파스칼'],
'㎫': ['메가 파스칼'],
'㎬': ['기가 파스칼'],
'㎭': ['라디안'],
'㎮': ['라디안 퍼 쎄크'],
'㎯': ['라디안 퍼 제곱 쎄크'],
'㎰': ['피코 쎄크'],
'㎱': ['나노 쎄크'],
'㎲': ['마이크로 쎄크'],
'㎳': ['밀리 쎄크'],
'㎴': ['피코 볼트'],
'㎵': ['나노 볼트'],
'㎶': ['마이크로 볼트'],
'㎷': ['밀리 볼트'],
'㎸': ['킬로 볼트'],
'㎹': ['메가 볼트'],
'㎺': ['피코 와트'],
'㎻': ['나노 와트'],
'㎼': ['마이크로 와트'],
'㎽': ['밀리 와트'],
'㎾': ['킬로 와트'],
'㎿': ['메가 와트'],
'㏀': ['킬로 옴'],
'㏁': ['메가 옴'],
'㏂': ['오전'],
'㏃': ['베크렐'],
'㏄': ['씨씨'],
'㏅': ['칸델라'],
'㏆': ['쿨롱 퍼 킬로 그램'],
'㏇': ['씨 오'],
'㏈': ['데시 벨'],
'㏉': ['그레이'],
'㏊': ['헥타르'],
'㏋': ['마력'],
'㏌': ['인치'],
'㏍': ['킬로 카이저'],
'㏎': ['킬로 미터'],
'㏏': ['킬로 톤'],
'㏐': ['루멘'],
'㏑': ['로그'],
'㏒': ['로그'],
'㏓': ['럭스'],
'㏔': ['밀리 바'],
'㏕': ['밀'],
'㏖': ['몰'],
'㏗': ['피 에이치'],
'㏘': ['오후'],
'㏙': ['피 피 엠'],
'㏚': ['피 알'],
'㏛': ['스테라디안'],
'㏜': ['시버트'],
'㏝': ['웨버']
}
currencies_pronun = {
'$': ['달러'],
'¢': ['센트'],
'£': ['파운드'],
'¤': ['화폐 표시'],
'¥': ['엔'],
'֏': ['드람'],
'؋': ['아프가니'],
'৲': ['루피 마크'],
'৳': ['루피 싸인'],
'৻': ['간다'],
'૱': ['루피'],
'௹': ['루피'],
'฿': ['바트'],
'៛': ['리엘'],
'₠': ['유로'],
'₡': ['콜론'],
'₢': ['크루제이로'],
'₣': ['프랑'],
'₤': ['리라'],
'₥': ['밀'],
'₦': ['나이라'],
'₧': ['페세타'],
'₨': ['루피'],
'₩': ['원'],
'₪': ['세겔'],
'₫': ['동'],
'€': ['유로'],
'₭': ['킵'],
'₮': ['터그릭'],
'₯': ['드라크마'],
'₰': ['페니'],
'₱': ['페소'],
'₲': ['과라니'],
'₳': ['오스트랄'],
'₴': ['리브니아'],
'₵': ['세디'],
'₶': ['토르노'],
'₷': ['스페스밀로'],
'₸': ['텐지'],
'₹': ['루피'],
'₺': ['리라'],
'₻': ['노르딕'],
'₼': ['마네'],
'₽': ['루블'],
'₾': ['라리'],
'꠸': ['루피'],
'﷼': ['리알'],
'﹩': ['달러'],
'$': ['달러'],
'¢': ['센트'],
'£': ['파운드'],
'¥': ['엔'],
'₩': ['원']
}
# TBD
# extracted from the corpus
validChars={
'℃': ['도', '도 섭씨', '도 씨'],
'㈜': ['주', '주식회사'],
'ρ': ['로'],
'μ': ['뮤', '마이크로'],
'µ': ['마이크로', '뮤'],
'W': ['와트'],
}
if __name__ == '__main__':
print(valids_chars)
| StarcoderdataPython |
1819043 | <filename>workapp/models.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.db.models.fields.files import ImageField
# Create your models here.
class Contact(models.Model):
name = models.CharField(max_length=200, null=True)
email = models.EmailField(null=True)
subject = models.CharField(max_length=200, null=True)
message = models.TextField(null=True)
def __unicode__(self):
return self.name
class Meta:
db_table = "contact"
verbose_name = "Contact"
class UserRegister(models.Model):
id = models.AutoField(primary_key=True)
user = models.OneToOneField(User, on_delete=True)
first_name = models.CharField(max_length=100, null=True)
last_name = models.CharField(max_length=100, null=True)
fathername = models.CharField(max_length=100, null=True)
date = models.DateField(max_length=200, null=True)
location = models.CharField(max_length=200, null=True)
zipcode = models.IntegerField(null=True)
gender = models.CharField(max_length=200, null=True)
phone = models.CharField(max_length=200, null=True)
qualification = models.CharField(max_length=200, null=True)
experience = models.CharField(max_length=100, null=True)
skills = models.CharField(max_length=100, null=True)
certification = models.CharField(max_length=100, null=True)
language = models.CharField(max_length=200, null=True)
photo = models.ImageField(upload_to='image/userphoto/', null=True, blank=False)
resume = models.FileField(upload_to='image/userresume/', null=True, blank=False)
def __unicode__(self):
return self.first_name
class Meta:
db_table = "user_registration"
verbose_name = "User Registration"
class CompanyRegister(models.Model):
id = models.AutoField(primary_key=True)
user = models.OneToOneField(User, on_delete=True)
company_name = models.CharField(max_length=200, null=True)
jobpost = models.CharField(max_length=200, null=True)
phone = models.CharField(max_length=200, null=True)
website = models.CharField(max_length=200, null=True)
address= models.TextField(max_length=200, null=True)
def __unicode__(self):
return self.company_name
class Meta:
db_table = "company_registration"
verbose_name = "Company Registration"
class JobNotifications(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=200, null=True)
email = models.EmailField(null=True)
job_skills = models.CharField(max_length=200, null=True)
job_location = models.CharField(max_length=200, null=True)
def __unicode__(self):
return self.name
class Meta:
db_table = "Job_notification"
verbose_name = "Job notification"
class CandidateNotifications(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=200, null=True)
email = models.EmailField(max_length=200, null=True)
candidate_skills = models.CharField(max_length=200, null=True)
candidate_location = models.CharField(max_length=200, null=True)
def __unicode__(self):
return self.name
class Meta:
db_table = "Candidate_notification"
verbose_name = "Candidate notification"
class JobPost(models.Model):
id = models.AutoField(primary_key=True)
company_name = models.CharField(max_length=200, null=True)
post_name = models.CharField(max_length=100, null=True)
experience = models.CharField(max_length=200, null=True)
package = models.CharField(max_length=200, null=True)
location = models.CharField(max_length=200, null=True)
skills = models.CharField(max_length=200, null=True)
registered = models.CharField(max_length=100, null=True, default="False")
def __unicode__(self):
return self.company_name
class Meta:
db_table = "Job_Post"
verbose_name = "Job Post"
class Appliedjobs(models.Model):
job_id = models.OneToOneField(JobPost, on_delete=True)
candidate_id = models.OneToOneField(User, on_delete=True)
def __unicode__(self):
return self.job_id
class Meta:
db_table = "Applied_Jobs"
verbose_name = "Applied Jobs"
class SelectCandidate(models.Model):
user_id = models.OneToOneField(UserRegister, on_delete=True)
company_id = models.OneToOneField(CompanyRegister, on_delete=True)
def __unicode__(self):
return self.candidate_id
class Meta:
db_table = "Selected_Candidate"
verbose_name = "Selected Candidate"
| StarcoderdataPython |
49601 | name = input("Enter file:")
if len(name) < 1:
name = "mbox-short.txt"
handle = open(name)
hist=dict()
for line in handle:
if line.startswith('From:'):
words=line.split()
if words[1] not in hist:
hist[words[1]]=1
else:
hist[words[1]]=hist[words[1]]+1
#print(hist)
nome=conta=None
for a,b in hist.items():
if conta==None or b>conta:
nome=a
conta=b
print(nome,conta)
# Alternativa
name = input("Enter file:")
if len(name) < 1:
name = "mbox-short.txt"
handle = open(name)
hist=dict()
for line in handle:
if line.startswith('From:'):
words=line.split()
hist[words[1]]=hist.get(words[1],0)+1
#print(hist)
nome=conta=None
for a,b in hist.items():
if conta==None or b>conta:
nome=a
conta=b
print(nome,conta) | StarcoderdataPython |
57732 | <gh_stars>1-10
# Copyright 2017 The Australian National University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rlib.jit import JitDriver
d = JitDriver(greens=[], reds='auto')
def arraysum(arr, sz):
sum = rffi.r_longlong(0)
for i in range(sz):
d.jit_merge_point()
sum += arr[i]
return sum
def setup(n):
lst, _ = rand_list_of(n)
arr = lltype.malloc(rffi.CArray(rffi.LONGLONG), n, flavor='raw')
for i, k in enumerate(lst):
arr[i] = k
return arr, rffi.cast(lltype.Unsigned, n)
def teardown(arr, n):
lltype.free(arr, 'raw')
def rand_list_of(n):
# 32 extend to 64-bit integers (to avoid overflow in summation
from random import randrange, setstate
init_state = (3, (
2147483648L, 3430835514L, 2928424416L, 3147699060L, 2823572732L, 2905216632L, 1887281517L, 14272356L,
1356039141L,
2741361235L, 1824725388L, 2228169284L, 2679861265L, 3150239284L, 657657570L, 1407124159L, 517316568L,
653526369L,
139268705L, 3784719953L, 2212355490L, 3452491289L, 1232629882L, 1791207424L, 2898278956L, 1147783320L,
1824413680L,
1993303973L, 2568444883L, 4228847642L, 4163974668L, 385627078L, 3663560714L, 320542554L, 1565882322L,
3416481154L,
4219229298L, 315071254L, 778331393L, 3961037651L, 2951403614L, 3355970261L, 102946340L, 2509883952L, 215897963L,
3361072826L, 689991350L, 3348092598L, 1763608447L, 2140226443L, 3813151178L, 2619956936L, 51244592L,
2130725065L,
3867113849L, 1980820881L, 2600246771L, 3207535572L, 257556968L, 2223367443L, 3706150033L, 1711074250L,
4252385224L,
3197142331L, 4139558716L, 748471849L, 2281163369L, 2596250092L, 2804492653L, 484240110L, 3726117536L,
2483815933L,
2173995598L, 3765136999L, 3178931194L, 1237068319L, 3427263384L, 3958412830L, 2268556676L, 360704423L,
4113430429L,
3758882140L, 3743971788L, 1685454939L, 488386L, 3511218911L, 3020688912L, 2168345327L, 3149651862L, 1472484695L,
2011779229L, 1112533726L, 1873931730L, 2196153055L, 3806225492L, 1515074892L, 251489714L, 1958141723L,
2081062631L,
3703490262L, 3211541213L, 1436109217L, 2664448365L, 2350764370L, 1285829042L, 3496997759L, 2306637687L,
1571644344L,
1020052455L, 3114491401L, 2994766034L, 1518527036L, 994512437L, 1732585804L, 2089330296L, 2592371643L,
2377347339L,
2617648350L, 1478066246L, 389918052L, 1126787130L, 2728695369L, 2921719205L, 3193658789L, 2101782606L,
4284039483L,
2704867468L, 3843423543L, 119359906L, 1882384901L, 832276556L, 1862974878L, 1943541262L, 1823624942L,
2146680272L,
333006125L, 929197835L, 639017219L, 1640196300L, 1424826762L, 2119569013L, 4259272802L, 2089277168L,
2030198981L,
2950559216L, 621654826L, 3452546704L, 4085446289L, 3038316311L, 527272378L, 1679817853L, 450787204L,
3525043861L,
3838351358L, 1558592021L, 3649888848L, 3328370698L, 3247166155L, 3855970537L, 1183088418L, 2778702834L,
2820277014L,
1530905121L, 1434023607L, 3942716950L, 41643359L, 310637634L, 1537174663L, 4265200088L, 3126624846L,
2837665903L,
446994733L, 85970060L, 643115053L, 1751804182L, 1480207958L, 2977093071L, 544778713L, 738954842L, 3370733859L,
3242319053L, 2707786138L, 4041098196L, 1671493839L, 3420415077L, 2473516599L, 3949211965L, 3686186772L,
753757988L,
220738063L, 772481263L, 974568026L, 3190407677L, 480257177L, 3620733162L, 2616878358L, 665763320L, 2808607644L,
3851308236L, 3633157256L, 4240746864L, 1261222691L, 268963935L, 1449514350L, 4229662564L, 1342533852L,
1913674460L,
1761163533L, 1974260074L, 739184472L, 3811507072L, 2880992381L, 3998389163L, 2673626426L, 2212222504L,
231447607L,
2608719702L, 3509764733L, 2403318909L, 635983093L, 4233939991L, 2894463467L, 177171270L, 2962364044L,
1191007101L,
882222586L, 1004217833L, 717897978L, 2125381922L, 626199402L, 3694698943L, 1373935523L, 762314613L, 2291077454L,
2111081024L, 3758576304L, 2812129656L, 4067461097L, 3700761868L, 2281420733L, 197217625L, 460620692L,
506837624L,
1532931238L, 3872395078L, 3629107738L, 2273221134L, 2086345980L, 1240615886L, 958420495L, 4059583254L,
3119201875L,
3742950862L, 891360845L, 2974235885L, 87814219L, 4067521161L, 615939803L, 1881195074L, 2225917026L, 2775128741L,
2996201447L, 1590546624L, 3960431955L, 1417477945L, 913935155L, 1610033170L, 3212701447L, 2545374014L,
2887105562L,
2991635417L, 3194532260L, 1565555757L, 2142474733L, 621483430L, 2268177481L, 919992760L, 2022043644L,
2756890220L,
881105937L, 2621060794L, 4262292201L, 480112895L, 2557060162L, 2367031748L, 2172434102L, 296539623L,
3043643256L,
59166373L, 2947638193L, 1312917612L, 1798724013L, 75864164L, 339661149L, 289536004L, 422147716L, 1134944052L,
1095534216L, 1231984277L, 239787072L, 923053211L, 1015393503L, 2558889580L, 4194512643L, 448088150L, 707905706L,
2649061310L, 3081089715L, 3432955562L, 2217740069L, 1965789353L, 3320360228L, 3625802364L, 2420747908L,
3116949010L,
442654625L, 2157578112L, 3603825090L, 3111995525L, 1124579902L, 101836896L, 3297125816L, 136981134L,
4253748197L,
3809600572L, 1668193778L, 4146759785L, 3712590372L, 2998653463L, 3032597504L, 1046471011L, 2843821193L,
802959497L,
3307715534L, 3226042258L, 1014478160L, 3105844949L, 3209150965L, 610876993L, 2563947590L, 2482526324L,
3913970138L,
2812702315L, 4281779167L, 1026357391L, 2579486306L, 402208L, 3457975059L, 1714004950L, 2543595755L, 2421499458L,
478932497L, 3117588180L, 1565800974L, 1757724858L, 1483685124L, 2262270397L, 3794544469L, 3986696110L,
2914756339L,
1952061826L, 2672480198L, 3793151752L, 309930721L, 1861137379L, 94571340L, 1162935802L, 3681554226L,
4027302061L,
21079572L, 446709644L, 1587253187L, 1845056582L, 3080553052L, 3575272255L, 2526224735L, 3569822959L,
2685900491L,
918305237L, 1399881227L, 1554912161L, 703181091L, 738501299L, 269937670L, 1078548118L, 2313670525L, 3495159622L,
2659487842L, 11394628L, 1222454456L, 3392065094L, 3426833642L, 1153231613L, 1234517654L, 3144547626L,
2148039080L,
3790136587L, 684648337L, 3956093475L, 1384378197L, 2042781475L, 759764431L, 222267088L, 3187778457L,
3795259108L,
2817237549L, 3494781277L, 3762880618L, 892345749L, 2153484401L, 721588894L, 779278769L, 3306398772L,
4221452913L,
1981375723L, 379087895L, 1604791625L, 1426046977L, 4231163093L, 1344994557L, 1341041093L, 1072537134L,
1829925137L,
3791772627L, 3176876700L, 2553745117L, 664821113L, 473469583L, 1076256869L, 2406012795L, 3141453822L,
4123012649L,
3058620143L, 1785080140L, 1181483189L, 3587874749L, 1453504375L, 707249496L, 2022787257L, 2436320047L,
602521701L,
483826957L, 821599664L, 3333871672L, 3024431570L, 3814441382L, 416508285L, 1217138244L, 3975201118L,
3077724941L,
180118569L, 3754556886L, 4121534265L, 3495283397L, 700504668L, 3113972067L, 719371171L, 910731026L, 619936911L,
2937105529L, 2039892965L, 3853404454L, 3783801801L, 783321997L, 1135195902L, 326690505L, 1774036419L,
3476057413L,
1518029608L, 1248626026L, 427510490L, 3443223611L, 4087014505L, 2858955517L, 1918675812L, 3921514056L,
3929126528L,
4048889933L, 1583842117L, 3742539544L, 602292017L, 3393759050L, 3929818519L, 3119818281L, 3472644693L,
1993924627L,
4163228821L, 2943877721L, 3143487730L, 4087113198L, 1149082355L, 1713272081L, 1243627655L, 3511633996L,
3358757220L,
3812981394L, 650044449L, 2143650644L, 3869591312L, 3719322297L, 386030648L, 2633538573L, 672966554L,
3498396042L,
3907556L, 2308686209L, 2878779858L, 1475925955L, 2701537395L, 1448018484L, 2962578755L, 1383479284L,
3731453464L,
3659512663L, 1521189121L, 843749206L, 2243090279L, 572717972L, 3400421356L, 3440777300L, 1393518699L,
1681924551L,
466257295L, 568413244L, 3288530316L, 2951425105L, 2624424893L, 2410788864L, 2243174464L, 1385949609L,
2454100663L,
1113953725L, 2127471443L, 1775715557L, 3874125135L, 1901707926L, 3152599339L, 2277843623L, 1941785089L,
3171888228L,
802596998L, 3397391306L, 1743834429L, 395463904L, 2099329462L, 3761809163L, 262702111L, 1868879810L,
2887406426L,
1160032302L, 4164116477L, 2287740849L, 3312176050L, 747117003L, 4048006270L, 3955419375L, 2724452926L,
3141695820L,
791246424L, 524525849L, 1794277132L, 295485241L, 4125127474L, 825108028L, 1582794137L, 1259992755L, 2938829230L,
912029932L, 1534496985L, 3075283272L, 4052041116L, 1125808104L, 2032938837L, 4008676545L, 1638361535L,
1649316497L,
1302633381L, 4221627277L, 1206130263L, 3114681993L, 3409690900L, 3373263243L, 2922903613L, 349048087L,
4049532385L,
3458779287L, 1737687814L, 287275672L, 645786941L, 1492233180L, 3925845678L, 3344829077L, 1669219217L,
665224162L,
2679234088L, 1986576411L, 50610077L, 1080114376L, 1881648396L, 3818465156L, 1486861008L, 3824208930L,
1782008170L,
4115911912L, 656413265L, 771498619L, 2709443211L, 1919820065L, 451888753L, 1449812173L, 2001941180L,
2997921765L,
753032713L, 3011517640L, 2386888602L, 3181040472L, 1280522185L, 1036471598L, 1243809973L, 2985144032L,
2238294821L,
557934351L, 347132246L, 1797956016L, 624L), None)
setstate(init_state)
return [rffi.r_longlong(randrange(-(1 << 31), (1 << 31) - 1)) for _ in range(n)]
def measure(N):
args = setup(N)
from time import time
t0 = time()
arraysum(*args)
t1 = time()
teardown(*args)
return t0, t1
def rpy_entry(N):
t0, t1 = measure(N)
# from rpython.rlib import rfloat
# print rfloat.double_to_string(t1 - t0, 'e', %(fprec)d, rfloat.DTSF_ADD_DOT_0)
return t1 - t0
if __name__ == '__main__':
import sys
t0, t1 = measure(int(sys.argv[1]))
print '%.15f' % (t1 - t0)
def target(*args):
from rpython.rlib.entrypoint import export_symbol
export_symbol(rpy_entry)
return rpy_entry, [int] | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.