id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
9775994 | import final_server as fs
import pytest
def test_circlePixelID():
circleData = [5, 5, 1]
pixelLocations = fs.circlePixelID(circleData)
exp_pixelLocations = [[4, 5],
[5, 4], [5, 5], [5, 6],
[6, 5]]
assert pixelLocations == exp_pixelLocations
circleData = [5, 5, 2]
pixelLocations = fs.circlePixelID(circleData)
exp_pixelLocations = [[3, 5],
[4, 4], [4, 5], [4, 6],
[5, 3], [5, 4], [5, 5], [5, 6], [5, 7],
[6, 4], [6, 5], [6, 6],
[7, 5]]
assert pixelLocations == exp_pixelLocations
| StarcoderdataPython |
9607564 | <gh_stars>1-10
class TermColors:
GREEN = '\033[32m'
RED = '\033[31m'
YELLOW = '\033[33m'
ENDC = '\033[0m'
| StarcoderdataPython |
6620559 | <reponame>myutman/contracode<filename>scripts/hf_create_train_test.py
from pathlib import Path
import numpy as np
import pandas as pd
import pickle
import gzip
from tqdm.auto import tqdm
DATA_PICKLE_PATH = Path("data/codesearchnet_javascript/javascript_augmented.pickle.gz")
CACHE_PATH = Path("data/hf_data/augmented_pretrain_data_df.parquet")
TRAIN_OUT_PATH = Path("/data/paras/augmented_pretrain_df.train.pickle.gz")
TEST_OUT_PATH = Path("/data/paras/augmented_pretrain_df.test.pickle.gz")
TRAIN_OUT_TXT_PATH = Path("/data/paras/augmented_pretrain_df.train.txt")
TEST_OUT_TXT_PATH = Path("/data/paras/augmented_pretrain_df.test.txt")
if __name__ == "__main__":
if CACHE_PATH.exists():
print("Loading from cache")
df = pd.read_parquet(CACHE_PATH)
else:
print("Loading from pickle")
with gzip.open(DATA_PICKLE_PATH) as f:
data = pickle.load(f)
flattened_data = []
for idx, x in enumerate(tqdm(data)):
for item in x:
flattened_data.append(dict(data_idx=idx, text=item))
df = pd.DataFrame(flattened_data)
del data, flattened_data
print("Saving cache file of dataframe")
df.to_parquet(str(CACHE_PATH.resolve()), engine="pyarrow")
data_idxs = np.asarray(list(set(df["data_idx"])))
np.random.shuffle(data_idxs)
test_idxs, train_idxs = data_idxs[:10000], data_idxs[10000:]
train_df = df[df["data_idx"].isin(train_idxs)].sample(frac=1).reset_index(drop=True)
test_df = df[df["data_idx"].isin(test_idxs)].sample(frac=1).reset_index(drop=True)
print("Saving train data")
train_df.to_pickle(TRAIN_OUT_PATH)
print("Saving test data")
test_df.to_pickle(TEST_OUT_PATH)
train_txt = train_df["text"].tolist()
test_txt = test_df["text"].tolist()
print("Saving train text")
with TRAIN_OUT_TXT_PATH.open("w") as f:
f.write("\n".join(train_txt))
print("Saving test text")
with TEST_OUT_TXT_PATH.open("w") as f:
f.write("\n".join(test_txt))
| StarcoderdataPython |
5128858 | <gh_stars>0
#!/usr/bin/env python
""" Tool for gathering statistics on progress of download. """
import argparse
import datetime
import os
import pathlib
import re
import time
import utils.solo_models
import utils.team_models
def monitor(module, args):
profile_pattern = re.compile(r'matches_for_([0-9]+)\.csv$')
start = None
count = 0
for filename in sorted(pathlib.Path(module.DATA_DIR).iterdir(), key=os.path.getmtime):
m = profile_pattern.search(str(filename))
if count > 0 and m:
count += 1
if m and m.group(1) == args.first_profile:
start = os.stat(module.Match.data_file(m.group(1))).st_mtime
count = 1
now = time.time()
time_expended = now - start
time_expected = time_expended * args.total/count
time_left = datetime.timedelta(seconds=int(time_expected - time_expended))
expected_finish = (datetime.datetime.now() + time_left).strftime('%I:%M:%S')
template = '{:18}: {:>8}'
print(template.format('Expected finish', expected_finish))
print(template.format('Time left', str(time_left)))
print(template.format('Records done', count))
print(template.format('Records left', args.total - count))
template = '{:18}: {:>8.2f}'
print(template.format('Users per second', count/time_expended))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('klass', choices=('team', 'solo',), help="team or solo")
parser.add_argument('first_profile', help="id of first profile downloaded")
parser.add_argument('total', type=int, help="number of records to process")
args = parser.parse_args()
if args.klass == 'team':
monitor(utils.team_models, args)
else:
monitor(utils.solo_models, args)
| StarcoderdataPython |
4996110 | import json
import boto3
import os
# env variables
BUILDER_INSTANCE_PROFILE_ARN = os.environ['BUILDER_INSTANCE_PROFILE_ARN']
print('Loading function')
ec2 = boto3.client('ec2')
ssm = boto3.client('ssm')
COMMAND_START = 'start'
COMMAND_STOP = 'stop'
COMMAND_STATUS_EC2 = 'status_ec2'
COMMAND_STATUS_SSM = 'status_ssm'
STATUS_STOPPED = 'STOPPED'
STATUS_STARTED = 'STARTED'
STATUS_IN_PROGRESS = 'IN PROGRESS'
def lambda_handler(event, context):
# Log the received event
print("Received event: " + json.dumps(event, indent=2))
# Get parameters from the event
command = event['command']
try:
if command == COMMAND_START:
return start_instance(event)
elif command == COMMAND_STOP:
return stop_instance(event)
elif command == COMMAND_STATUS_EC2:
return check_instance_status_ec2(event)
elif command == COMMAND_STATUS_SSM:
return check_instance_status_ssm(event)
else:
raise Exception('Unknown command')
except Exception as e:
print(e)
message = 'Error submitting Batch Job'
print(message)
raise Exception(message)
def get_instance_status(state_name):
if state_name in ['running']:
return STATUS_STARTED
elif state_name in ['terminated', 'stopped']:
return STATUS_STOPPED
else:
return STATUS_IN_PROGRESS
def get_detailed_instance_status(status):
if status in ['passed']:
return STATUS_STARTED
elif status in ['failed']:
return STATUS_STOPPED
elif status in ['insufficient-data', 'initializing']:
return STATUS_IN_PROGRESS
else:
raise Exception('Unsupported detailed instance status: ' + status)
def start_instance(event):
# Get parameters from event
image_id = event.get('imageId', '')
instance_type = event.get('instanceType', '')
key_name = event.get('keyName', '')
# TODO: validate parameters
# Send command to the builder instance
response = ec2.run_instances(
ImageId=image_id,
MinCount=1,
MaxCount=1,
InstanceType=instance_type,
# KeyName = key_name,
IamInstanceProfile={
'Arn': BUILDER_INSTANCE_PROFILE_ARN
},
TagSpecifications=[{
'ResourceType': 'instance',
'Tags': [{
'Key': 'AmiBuilder',
'Value': 'True'
}]
}]
)
# Log response
# print("Response: " + json.dumps(instances, indent=2))
instances = response.get('Instances', [])
if not instances:
raise Exception('Instance creation error.')
instance = instances[0]
instance_id = instance['InstanceId']
return {
'instanceId': instance_id,
'status': STATUS_IN_PROGRESS
}
def stop_instance(event):
# Get parameters from event
instance_id = event['instanceId']
# Send command to the builder instance
response = ec2.terminate_instances(
InstanceIds=[instance_id]
)
# Log response
# print("Response: " + json.dumps(response, indent=2))
instances = response.get('TerminatingInstances', [])
if not instances:
raise Exception('Instance termination error.')
state_name = instances[0].get('CurrentState', {}).get('Name', '')
return {
'instanceId': instance_id,
'status': get_instance_status(state_name)
}
def check_instance_status_ssm(event):
# Get parameters from event
instance_id = event['instanceId']
# Send command to the builder instance
response = ssm.describe_instance_information(
Filters=[{
'Key': 'InstanceIds',
'Values': [instance_id]
}]
)
status = STATUS_IN_PROGRESS
instance_list = response.get('InstanceInformationList', [])
if instance_list:
status = STATUS_STARTED
return {
'instanceId': instance_id,
'status': status
}
def check_instance_status_ec2(event):
# Get parameters from event
instance_id = event['instanceId']
# Send command to the builder instance
response = ec2.describe_instances(
InstanceIds=[instance_id]
)
# Log response
# print("Response: " + json.dumps(response, indent=2))
reservations = response.get('Reservations', [])
if not reservations:
raise Exception('Describe instances error.')
reservation = reservations[0]
instances = reservation.get('Instances', [])
if not instances:
raise Exception('No instance found with provided id')
state_name = instances[0].get('State', {}).get('Name', '')
status = get_instance_status(state_name)
if status == STATUS_STARTED:
# we have to wait until all status checks are passed
response = ec2.describe_instance_status(InstanceIds=[instance_id])
instance_statuses = response.get('InstanceStatuses', [])
if not instance_statuses:
raise Exception('Describe instance status error.')
details = instance_statuses[0].get('InstanceStatus', {}).get('Details', [])
if not details:
raise Exception('Describe instance status error (missing details).')
detailed_status = details[0].get('Status', '<empty>')
status = get_detailed_instance_status(detailed_status)
return {
'instanceId': instance_id,
'status': status
}
| StarcoderdataPython |
266323 | from abc import ABC, abstractmethod
from typing import List, Tuple, Optional, Type
from functools import partial
import pkgutil
from io import BytesIO
import numpy as np
import torch
import pytorch_lightning as pl
from spacy.language import Language
from torch.utils.data import DataLoader, Dataset
from kogito.core.head import KnowledgeHead
from kogito.core.relation import (
KnowledgeRelation,
HEAD_TO_RELATION_MAP,
PHYSICAL_RELATIONS,
EVENT_RELATIONS,
SOCIAL_RELATIONS,
)
from kogito.core.processors.models.swem import SWEMHeadDataset, SWEMClassifier
from kogito.core.processors.models.distilbert import (
DistilBERTHeadDataset,
DistilBERTClassifier,
)
from kogito.core.processors.models.bert import BERTHeadDataset, BERTClassifier
RELATION_CLASSES = [PHYSICAL_RELATIONS, EVENT_RELATIONS, SOCIAL_RELATIONS]
class KnowledgeRelationMatcher(ABC):
"""Base class for relation matching"""
def __init__(self, name: str, lang: Optional[Language] = None) -> None:
"""Initialize relation matcher
Args:
name (str): Unique relation matcher name
lang (Optional[Language], optional): Spacy language pipeline to use. Defaults to None.
"""
self.name = name
self.lang = lang
@abstractmethod
def match(
self,
heads: List[KnowledgeHead],
relations: Optional[List[KnowledgeRelation]] = None,
**kwargs
) -> List[Tuple[KnowledgeHead, KnowledgeRelation]]:
"""Match relations to given heads
Args:
heads (List[KnowledgeHead]): List of heads to match for.
relations (Optional[List[KnowledgeRelation]], optional): Subset of relations to use for matching.
Defaults to None.
Raises:
NotImplementedError: This method has to be implemented by
concrete subclasses
Returns:
List[Tuple[KnowledgeHead, KnowledgeRelation]]: List of matched head, relation tuples
"""
raise NotImplementedError
class SimpleRelationMatcher(KnowledgeRelationMatcher):
"""Matches relation based on simple heuristics"""
def match(
self,
heads: List[KnowledgeHead],
relations: List[KnowledgeRelation] = None,
**kwargs
) -> List[Tuple[KnowledgeHead, KnowledgeRelation]]:
head_relations = []
for head in heads:
rels_to_match = HEAD_TO_RELATION_MAP[head.type]
if relations:
rels_to_match = set(HEAD_TO_RELATION_MAP[head.type]).intersection(
set(relations)
)
for relation in rels_to_match:
head_relations.append((head, relation))
return head_relations
class ModelBasedRelationMatcher(KnowledgeRelationMatcher):
"""Matches relations based on relation classifiers"""
def __init__(
self,
name: str,
dataset_class: Type[Dataset],
model_class: Type[pl.LightningModule],
model_path: str,
batch_size: int = 64,
lang: Optional[Language] = None,
) -> None:
"""Initialize a model based relation matcher
Args:
name (str): Unique relation matcher name
dataset_class (Type[Dataset]): Dataset class to use
model_class (Type[pl.LightningModule]): Model class to use
model_path (str): Model path to load model from
batch_size (int, optional): Batch size for inference. Defaults to 64.
lang (Optional[Language], optional): Spacy lang pipeline. Defaults to None.
"""
super().__init__(name, lang)
self.dataset_class = dataset_class
self.model_class = model_class
self.model_path = model_path
self.batch_size = batch_size
self.model = model_class.from_pretrained(model_path)
def match(
self,
heads: List[KnowledgeHead],
relations: List[KnowledgeRelation] = None,
**kwargs
) -> List[Tuple[KnowledgeHead, KnowledgeRelation]]:
data = [str(head) for head in heads]
dataset = self.dataset_class(data)
dataloader = DataLoader(dataset, batch_size=self.batch_size)
trainer = pl.Trainer()
predictions = torch.cat(
trainer.predict(self.model, dataloaders=dataloader)
).numpy()
head_relations = []
for head, prob in zip(heads, predictions):
prediction = np.where(prob >= 0.5, 1, 0).tolist()
pred_rel_classes = [
RELATION_CLASSES[idx]
for idx, pred in enumerate(prediction)
if pred == 1
]
if not pred_rel_classes:
pred_rel_classes = [RELATION_CLASSES[np.argmax(prob)]]
for rel_class in pred_rel_classes:
rels_to_match = rel_class
if relations:
rels_to_match = set(rels_to_match).intersection(set(relations))
for relation in rels_to_match:
head_relations.append((head, relation))
return head_relations
class SWEMRelationMatcher(ModelBasedRelationMatcher):
"""Relation matcher based on Simple Word Embeddings (GloVes)"""
def __init__(self, name: str, lang: Optional[Language] = None) -> None:
vocab = np.load(
BytesIO(pkgutil.get_data(__name__, "data/vocab_glove_100d.npy")),
allow_pickle=True,
).item()
dataset_class = partial(SWEMHeadDataset, vocab=vocab, lang=lang)
model_class = SWEMClassifier
model_path = "mismayil/kogito-rc-swem"
super().__init__(
name,
dataset_class=dataset_class,
model_class=model_class,
model_path=model_path,
lang=lang,
)
class DistilBERTRelationMatcher(ModelBasedRelationMatcher):
"""Relation matcher based on DistilBERT embeddings"""
def __init__(self, name: str, lang: Optional[Language] = None) -> None:
dataset_class = DistilBERTHeadDataset
model_class = DistilBERTClassifier
model_path = "mismayil/kogito-rc-distilbert"
super().__init__(
name,
dataset_class=dataset_class,
model_class=model_class,
model_path=model_path,
lang=lang,
)
class BERTRelationMatcher(ModelBasedRelationMatcher):
"""Relation matcher based on BERT embeddings"""
def __init__(self, name: str, lang: Optional[Language] = None) -> None:
dataset_class = BERTHeadDataset
model_class = BERTClassifier
model_path = "mismayil/kogito-rc-bert"
super().__init__(
name,
dataset_class=dataset_class,
model_class=model_class,
model_path=model_path,
lang=lang,
)
class GraphBasedRelationMatcher(KnowledgeRelationMatcher):
"""Relation matcher based on knowledge graphs"""
def match(
self,
heads: List[KnowledgeHead],
relations: List[KnowledgeRelation] = None,
**kwargs
) -> List[Tuple[KnowledgeHead, KnowledgeRelation]]:
sample_graph = kwargs.get("sample_graph")
head_relations = []
if sample_graph:
matched_rels = set()
for kg in sample_graph:
matched_rels.add(kg.relation)
if relations:
matched_rels = matched_rels.intersection(set(relations))
for head in heads:
for relation in matched_rels:
head_relations.append((head, relation))
return head_relations
| StarcoderdataPython |
3238174 | #!/usr/bin/env python3
"""
Copyright (c) 2019 LINKIT, The Netherlands. All Rights Reserved.
Author(s): <NAME>
This software may be modified and distributed under the terms of the
MIT license. See the LICENSE file for details.
"""
import os
import sys
import argparse
from .main.make_backend import BackendAWS
from .main.tfconfig import terraform_provider
def main():
"""This function is called when run as python3 -m ${MODULE}
Parse any additional arguments and call required module functions."""
module_name = '.'.join(__loader__.name.split('.')[0:-1])
argument_parser = argparse.ArgumentParser(
prog=module_name,
description='Create a remotestate backend to hold infra state configration'
)
argument_parser.add_argument('--git', action='store', nargs=1, required=True,
help='GIT is the name of a local repository directory')
argument_parser.add_argument('--provider', action='store', nargs=1, required=False,
default=['auto'],
help='PROVIDER used to create the remotestate. \
Defaults to "auto"')
args = argument_parser.parse_args(sys.argv[1:])
git_directory = args.git[0]
backend_provider = args.provider[0]
if backend_provider == "auto":
# discover based on project configuration
# reset to None
backend_provider = None
# check terraform first
terraform_provider_file = git_directory + '/terraform/provider.tf'
if os.path.isfile(terraform_provider_file) is True:
backend_provider = terraform_provider(terraform_provider_file)
else:
# no other auto configurations to check yet
pass
if backend_provider is None:
print('Cant retrieve backend provider automatically')
sys.exit(1)
if backend_provider == "aws":
backend = BackendAWS(git_directory=git_directory)
backend.create()
else:
print('Unknown backend provider: ' + str(backend_provider))
sys.exit(1)
return 0
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
1993377 | # import galry.plot as plt
from galry import *
import numpy as np
# fig = figure()
X = np.random.randn(2, 1000)
# fig.imshow(np.random.rand(10, 10, 4), is_static=True)
plot(X, color=['r', 'y'])
text("Hello world!", coordinates=(.0, .9), is_static=True)
def callback(figure, parameters):
print figure, parameters
# action('LeftClick', 'MyEvent')
# event('MyEvent', callback)
action('LeftClick', callback)
show()
| StarcoderdataPython |
5110347 | <filename>hmm2ancestral.py
'''
Created on Oct 20, 2015
@author: bardya
'''
import os
import argparse
import re
import sys
from Bio.SeqUtils import GC
def parse_args():
parser = argparse.ArgumentParser(description='Get the ancestral consensus sequence from a hmm file')
parser.add_argument('-i', dest='infilepath', metavar='<hmm_file_path>', type=argparse.FileType('rt'),
help='path to an hmm file')
parser.add_argument('--version', action='version', version='0.1')
return parser.parse_args()
def getConsensus(contig):
consensus_seq = []
for i, line in enumerate(inputfile.readlines()):
matchlist = re.split("\s+", line.strip())
if len(matchlist) == 26:
consensus_seq.append(matchlist[22].upper())
return consensus_seq
def getSubjectName(inputname):
if len(inputname.split('.')) == 2:
return inputname.split('.')[0]
def list2fasta(seqname, seqlist, linelen = 60):
numofbr = len(seqlist)//linelen
seqlist.insert(0,'BLANK')
for i in range(1, numofbr+1):
seqlist.insert(i*linelen + i, '\n')
del seqlist[0]
fastaseq = ['>', seqname, '\n'] + seqlist
return ''.join(fastaseq).strip()
if __name__ == '__main__':
args = parse_args()
try:
inputfile = open(args.infilepath.name, 'r')
# if not os.path.basename(args.outfilepath.name) == "basename":
# outputfile = open(args.outfilepath.name, 'w')
# else:
# outputfile = open(os.path.join(os.path.dirname(args.outfilepath.name),os.path.basename(args.infilepath.name) + '_consensus.faa'), 'w')
except:
print('IOError occured')
seqname = getSubjectName(os.path.basename(args.infilepath.name))
seqlist = getConsensus(inputfile)
sys.stdout.write(list2fasta(seqname, seqlist, 60)) | StarcoderdataPython |
1826463 | from typing import Union
from .base import DependencyBase, dataclass
@dataclass
class Id(DependencyBase):
""" Dependency on an id of some object
Usage:
when the cached data includes an object, use its id to declare a dependency
Example:
cache.put(
'articles-list',
[
Article(id=1).asdict(),
Article(id=2).asdict(),
],
dependencies=[
Id('article', 1),
Id('article', 2),
]
)
cache.invalidate(
Id('article', 1)
)
"""
type: str
id: Union[int, str]
__slots__ = 'type', 'id'
PREFIX = 'id'
def key(self) -> str:
return f'{self.PREFIX}:{self.type}:{self.id}'
| StarcoderdataPython |
9664837 | '''#!/usr/bin/env python'''
# python imports
import requests
import datetime
# native imports
from bs4 import BeautifulSoup
# native module imports
from lib.citation import Citation
from lib.datafinder import Datafinder
"""
Scrapes a news article for its article files based on the stipulations in
Datafinder, formatting based on the stipulations in Citation.
Reuters provides one XML file for each day.
Each URL is scraped according to Datafinder and stored in a Citation object.
This information is then loaded into a .txt file.
This file is not meant to be run separately. It is a template
that provides functions that can be implemented.
"""
def getUpdateLastOpened(test = False, file_name = "date.txt"):
"""(file_name = str, test = False) => (tuple of int)
Parses the file_name for its first line, which should contain
the date last opened in year, month, day format.
Then writes the new date back into the file, unless test is True.
ex. "2020,3,7" => (2020, 3, 7)
"""
# read last opened date
with open(file_name, "r") as txt:
line = txt.readline()
last_opened = line.split(",")
# get current date
now = datetime.datetime.now()
# only update if test is False
if not test:
with open(file_name, "w+") as txt:
txt.write(f"{now.year},{now.month}.{now.day}")
# return tuple of dates
return (int(last_opened[0]), int(last_opened[1]), int(last_opened[2]))
def soup_to_citation(url, soup):
"""(str, Soup object) => Citation object
Parses the soup from the url through the Datafinder. Stores the data
in the Citation object, which is returned.
"""
df = Datafinder(soup)
citation = Citation()
citation.authors = df.get_authors()
citation.title = df.get_title()
citation.access_date = datetime.datetime.now()
citation.publication_date = df.get_publication_date()
citation.url = url
citation.data = df.get_content()
return citation
def parseSitemap(sitemap_url, url_matches):
"""(str, [list of str],) => list of str
Requests the sitemap. Then, turns it into soup and finds all
of the article URLs. Checks these URLs against url_matches to see
if it is relevant.
"""
# request xml sitemap
print(f"Attempting {sitemap_url} extraction...")
sitemap = requests.get(sitemap_url)
# check if the connection status is stable
if sitemap.status_code == 404:
print(f"XML Sitemap @ {sitemap_url} not real")
return
elif sitemap.status_code == 200:
print(f"Successful XML Extraction {sitemap_url}")
else:
print(f"Unknown error code. {sitemap.status_code}")
return
# convert sitemap text into soup
sitemap_soup = BeautifulSoup(sitemap.text, "html.parser")
# find all instances of loc
loc = sitemap_soup.find_all("loc")
sitemap_urls = set()
# check if any of the strings in url_matches is in the article url
for article in loc:
for match in url_matches:
if match in article.string:
sitemap_urls.add(article.string)
return list(sitemap_urls)
def urltofile(article_url, last_opened, news_agency, test = False):
"""(str, (tuple of int), bool, str) => None
Converts a url from Reuters into a file and stores it inside path.
If test = True, it will not write to file.
Needs news_agency for storage purposes.
"""
# request article
article = requests.get(article_url)
if article.status_code == 404:
print("Article @ %s not real" % article_url)
# convert to citation object
citation = soup_to_citation(article_url,
BeautifulSoup(article.text,
"html.parser"))
# rids the title of any slashes
format_title = "".join(citation.title.split()).replace("/", "")
# relative path to news agency folder
path = (f"../News/{news_agency}/" + format_title + ".txt")
if not test:
with open(path, "w+") as file:
file.write("Title: " + citation.title + "\n\n")
file.write("URL: " + citation.url + "\n\n")
file.write("Article: " + "\n\n")
file.write(citation.data)
file.write("\nPublication Date: " +
str(citation.publication_date) + "\n\n")
file.write(f"Source: {news_agency}")
print(f"Successful Extraction {citation.title}")
return
print(f"{citation.title}")
def main():
name = "AJZ"
# test determines whether or not it will write to a file
select = input("Test? Y/N")
if select == "Y":
t = True
elif select == "N":
t = False
else:
return
last_opened = getUpdateLastOpened(test = t)
xmlurl = f"https://www.aljazeera.com/xml/sslsitemaps/sitemap{last_opened[0]}_1.xml"
urlmatches = ["news", "indepth", "ajimpact"]
# get list of article_urls from sitemap
article_urls = parseSitemap(xmlurl, urlmatches)
# scrape each article
for article_url in article_urls:
urltofile(article_url, last_opened, name, test = t)
print("Program finished.")
if __name__ == "__main__":
main()
| StarcoderdataPython |
3236904 | import k3d
import numpy as np
import pytest
from .plot_compare import *
from k3d.helpers import download
import vtk
from vtk.util import numpy_support
vertices = [
-10, 0, -1,
10, 0, -1,
10, 0, 1,
-10, 0, 1,
]
indices = [
0, 1, 3,
1, 2, 3
]
def test_mesh():
global vertices, indices
prepare()
mesh = k3d.mesh(vertices, indices)
pytest.plot += mesh
compare('mesh')
def test_mesh_attribute():
global vertices, indices
prepare()
vertex_attribute = [0, 1, 1, 0]
mesh = k3d.mesh(vertices, indices, attribute=vertex_attribute,
color_map=k3d.basic_color_maps.CoolWarm, color_range=[0.0, 1.0])
pytest.plot += mesh
compare('mesh_attribute')
def test_mesh_advanced():
prepare()
filename = download(
'https://github.com/To-Fujita/Babylon.js_3D_Graphics/raw/master/scenes/stl/Cute%20Darth%20Vader.stl')
reader = vtk.vtkSTLReader()
reader.SetFileName(filename)
reader.Update()
mesh = k3d.vtk_poly_data(reader.GetOutput(), color=0x222222, flat_shading=True,
transform=k3d.transform(rotation=[np.pi / 2, 1, 0, 0]))
pytest.plot += mesh
compare('mesh_advanced')
def test_mesh_advanced_smoothed():
prepare()
filename = download(
'https://github.com/To-Fujita/Babylon.js_3D_Graphics/raw/master/scenes/stl/Cute%20Darth%20Vader.stl')
reader = vtk.vtkSTLReader()
reader.SetFileName(filename)
reader.Update()
mesh = k3d.vtk_poly_data(reader.GetOutput(), color=0x222222, flat_shading=False,
transform=k3d.transform(rotation=[np.pi / 2, 1, 0, 0]))
pytest.plot += mesh
compare('mesh_advanced_smoothed')
def test_mesh_advanced_opacity():
prepare()
filename = download(
'https://github.com/To-Fujita/Babylon.js_3D_Graphics/raw/master/scenes/stl/Cute%20Darth%20Vader.stl')
reader = vtk.vtkSTLReader()
reader.SetFileName(filename)
reader.Update()
mesh = k3d.vtk_poly_data(reader.GetOutput(), color=0x222222, flat_shading=False, opacity=0.5,
transform=k3d.transform(rotation=[np.pi / 2, 1, 0, 0]))
pytest.plot += mesh
compare('mesh_advanced_opacity')
def test_mesh_advanced_wireframe():
prepare()
filename = download(
'https://github.com/To-Fujita/Babylon.js_3D_Graphics/raw/master/scenes/stl/Cute%20Darth%20Vader.stl')
reader = vtk.vtkSTLReader()
reader.SetFileName(filename)
reader.Update()
mesh = k3d.vtk_poly_data(reader.GetOutput(), color=0x222222, opacity=0.2, wireframe=True,
transform=k3d.transform(rotation=[np.pi / 2, 1, 0, 0]))
pytest.plot += mesh
compare('mesh_advanced_wireframe')
def test_mesh_attribute_advanced():
prepare()
N = 100
theta = np.linspace(0, 2.0 * np.pi, N)
phi = np.linspace(0, 2.0 * np.pi, N)
theta, phi = np.meshgrid(theta, phi)
c, a = 2, 1
x = (c + a * np.cos(theta)) * np.cos(phi)
y = (c + a * np.cos(theta)) * np.sin(phi)
z = a * np.sin(theta)
vertices = np.dstack([x, y, z]).astype(np.float32)
indices = (np.stack([
np.arange(N * N) + 0, np.arange(N * N) + N, np.arange(N * N) + N + 1,
np.arange(N * N) + 0, np.arange(N * N) + N + 1, np.arange(N * N) + 1
]).T % (N * N)).astype(np.uint32)
mesh = k3d.mesh(vertices, indices, flat_shading=False,
attribute=phi,
transform=k3d.transform(rotation=[np.pi / 2, 1, 0, 0]),
color_map=k3d.matplotlib_color_maps.twilight)
pytest.plot += mesh
compare('mesh_attribute_advanced')
pytest.plot.clipping_planes = [
[1, 1, 0, 0]
]
compare('mesh_attribute_advanced_clipping_planes')
def test_mesh_triangle_attribute():
prepare()
filename = download(
'https://github.com/To-Fujita/Babylon.js_3D_Graphics/raw/master/scenes/stl/Cute%20Darth%20Vader.stl')
reader = vtk.vtkSTLReader()
reader.SetFileName(filename)
reader.Update()
qualityFilter = vtk.vtkMeshQuality()
qualityFilter.SetInputData(reader.GetOutput())
qualityFilter.SetTriangleQualityMeasureToArea()
qualityFilter.SetQuadQualityMeasureToArea()
qualityFilter.Update()
mesh = k3d.vtk_poly_data(qualityFilter.GetOutput(), cell_color_attribute=('Quality', 0.0, 0.83),
transform=k3d.transform(rotation=[np.pi / 2, 1, 0, 0]))
pytest.plot += mesh
compare('mesh_triangle_attribute')
def test_mesh_volume_data():
prepare()
filename = download(
'https://github.com/To-Fujita/Babylon.js_3D_Graphics/raw/master/scenes/stl/Cute%20Darth%20Vader.stl')
reader = vtk.vtkSTLReader()
reader.SetFileName(filename)
reader.Update()
poly = reader.GetOutput()
reader = vtk.vtkXMLImageDataReader()
reader.SetFileName('./test/assets/volume.vti')
reader.Update()
vti = reader.GetOutput()
x, y, z = vti.GetDimensions()
volume_data = numpy_support.vtk_to_numpy(
vti.GetPointData().GetArray(0)
).reshape(-1, y, x).astype(np.float32)
mesh = k3d.vtk_poly_data(poly, color=0xffffff, volume=volume_data,
transform=k3d.transform(rotation=[np.pi / 2, 1, 0, 0]),
volume_bounds=[-50, 150, -200, 100, -50, 250])
pytest.plot += mesh
compare('mesh_volume_data')
| StarcoderdataPython |
8146006 | <reponame>KelvinHong/landmark-tool
from tkinter.tix import WINDOW
from urllib.parse import _NetlocResultMixinBytes
from utils import *
from PIL import Image
import io
import os
import pandas as pd
import numpy as np
import json
import screeninfo
# Detect double monitor
# monitors = screeninfo.get_monitors()
# if len(monitors) >= 2:
# w1, h1 = monitors[0].width, monitors[0].height
# w2, h2 = monitors[1].width, monitors[1].height
# WINDOW_LOC = (w1 + int(w2 / 8), int(h1 / 8))
# else:
WINDOW_LOC = (None, None)
# The StateMachine functions is to keep track of the graph
# and to keep function call and future modification easier to implement.
# The StateMachine functions are a bit repeat themselves,
# not very elegant.
# Considering cleaning them in the future.
class WindowStateMachine():
def __init__(self, window, data: dict = {}):
self.window = window
self.real_mouses = []
self.ignore_warning1 = False
# All data
self.total_num_images = data['total_num_images']
self.dir = data["dir"]
self.annotation_json = data["annotation_json"]
self.all_image_rel_paths = data["all_image_rel_paths"]
self.pointer = data["pointer"]
self.image_gap = data["image_gap"]
self.column_width = data["column_width"]
self.dynamic_lm = data["dynamic_lm"]
self.shift_mode = data["shift_mode"]
self.graph_w = data["graph_w"]
self.graph_h = data["graph_h"]
self.store_mouse = None # For storing mouse location in shift_mode
# Load num_lm correctly if user have annotated in last session
image_name = self.all_image_rel_paths[self.pointer]
with open(self.annotation_json, "r") as f:
d = json.load(f)
if image_name in d:
self.num_lm = len(d[image_name]["xy"])
else:
self.num_lm = 0
# Take total_num_lm only when dynamic_lm setting is off.
if not self.dynamic_lm:
self.total_num_lm = data["total_num_lm"]
# Take template_file if it is provided
if "template_file" in data:
self.template_file = data["template_file"]
def load_image(self, request=None):
"""
Load Image and its landmarks (if exist)
Update Image w and h on GUI
Update Table on GUI
Update self.real_mouses
"""
# Renew image name.
img_name = self.all_image_rel_paths[self.pointer]
self.window["-IMAGETEXT-"].Update(f"You are labeling {img_name}")
# Load current image for user to annotate
im = Image.open(os.path.join(self.dir, img_name))
w, h = im.size
im = im.resize((self.graph_w, self.graph_h),
resample=Image.BICUBIC)
with io.BytesIO() as output:
im.save(output, format="PNG")
data = output.getvalue()
# Load image to graph
graph = self.window["-GRAPH-"]
graph.DrawImage(data=data, location=(0, 0))
# If request is redo, do not load from JSON
if request == "redo":
self.num_lm = 0
self.real_mouses = []
table_values = []
else:
# Load points from JSON file if exist
with open(self.annotation_json, "r") as f:
data = json.load(f)
if img_name in data:
self.num_lm = len(data[img_name]["xy"])
self.real_mouses = [[
i + 1, *data[img_name]["mouse_xy"][i]
] for i in range(self.num_lm)]
table_values = [[i + 1, *data[img_name]["xy"][i]]
for i in range(self.num_lm)]
else:
self.num_lm = 0
self.real_mouses = []
table_values = []
# Update table
self.window["-LMTABLE-"].update(values=table_values)
# Show landmarks on graph
for i in range(self.num_lm):
graph.draw_circle(self.real_mouses[i][1:],
10,
fill_color='lightgreen',
line_color='darkgreen',
line_width=2)
graph.draw_text(self.real_mouses[i][0], self.real_mouses[i][1:])
# Update image width height info
self.image_original = [w, h]
self.window["-IMAGE-INFO-"].Update(f"W = {w}px, H = {h}px",
visible=True)
return img_name
def load_template(self):
if hasattr(self, "template_file"):
try:
image = Image.open(self.template_file)
except:
sg.popup("The file you provided is not an image file. ",
location=WINDOW_LOC)
return False
image.thumbnail(self.window["-TEMPLATE-IMG-"].Size)
bio = io.BytesIO()
image.save(bio, format="PNG")
self.window["-TEMPLATE-IMG-"].update(filename=None,
data=bio.getvalue())
self.window["-TEMPLATE-FILE-"].update(self.template_file)
return True
def window_init(self):
# Fill mouse_xy in json file
self.fill_json()
# renew window
self.renew_annotate()
# Load template file if exists
self.load_template()
def fill_json(self):
# Based on "xy" coordinates in json file, create "mouse_xy".
with open(self.annotation_json, "r") as f:
data = json.load(f)
for img_name, coor_info in data.items():
if coor_info["xy"] == []:
data[img_name]["mouse_xy"] = []
continue
im = Image.open(os.path.join(self.dir, img_name))
w, h = im.size
np_coorinfo = np.array(coor_info["xy"]) # Size (n, 2)
mouses = np.zeros(np_coorinfo.shape)
mouses[:, 0] = (np_coorinfo[:, 0] * self.graph_w / w).round()
mouses[:, 1] = (np_coorinfo[:, 1] * self.graph_h / h).round()
data[img_name]["mouse_xy"] = list(mouses)
pretty_dump(data, self.annotation_json)
def renew_annotate(self, request=None):
"""
[
Update Image text
Update Table
Update image in Graph
Plot landmarks if exist
] these done in load_image.
Update annotated image, its progress bar
Update Landmark and its progress bar (if not dynamic lm)
"""
img_path = self.load_image(request=request)
total_image_num = self.total_num_images
# Disable arrow button if first or last
# Next button style
if self.pointer == total_image_num - 1:
self.window['-NEXT-'].update(disabled=True)
else:
self.window['-NEXT-'].update(disabled=False)
# Prev button style
if self.pointer == 0:
self.window['-PREV-'].update(disabled=True)
else:
self.window['-PREV-'].update(disabled=False)
if not self.dynamic_lm:
self.window["-LMPROGRESS-"].update(
f"Landmark Progress: {self.num_lm}/{self.total_num_lm}")
self.window["-LBAR-"].update(current_count=self.num_lm,
max=self.total_num_lm)
else:
self.window["-LMPROGRESS-"].update(
f"Landmark Progress: {self.num_lm}")
self.window["-ANNOPROGRESS-"].update(
f"Annotation Progress: {self.pointer}/{total_image_num}")
self.window["-PBAR-"].update(current_count=self.pointer,
max=total_image_num)
def mouse_to_xy(self, mouse):
x, y = mouse[0], mouse[1]
ori_w, ori_h = self.image_original
x = int(x * ori_w / self.graph_w)
y = int(y * ori_h / self.graph_h)
return x, y
def xy_to_mouse(self, xy):
ori_w, ori_h = self.image_original
mouse_x = int(xy[0] * self.graph_w / ori_w)
mouse_y = int(xy[1] * self.graph_h / ori_h)
return mouse_x, mouse_y
def plot_point(self, mouse):
if not self.dynamic_lm and self.num_lm == self.total_num_lm:
sg.popup(
f"You've annotated the last landmark.\nPlease proceed to next image.",
location=WINDOW_LOC)
return
x, y = self.mouse_to_xy(mouse)
# num_lm increment
self.num_lm += 1
self.real_mouses.append([self.num_lm, *mouse])
# Update Table
table = self.window["-LMTABLE-"]
values = table.Values
values.append([self.num_lm, x, y])
table.update(values=values)
# Update landmark progress
if self.dynamic_lm:
self.window["-LMPROGRESS-"].Update(
f"Landmark Progress: {self.num_lm}")
else:
self.window["-LMPROGRESS-"].Update(
f"Landmark Progress: {self.num_lm}/{self.total_num_lm}")
self.window["-LBAR-"].update(current_count=self.num_lm,
max=self.total_num_lm)
# Plot point on image
graph = self.window["-GRAPH-"]
graph.draw_circle(mouse,
10,
fill_color='lightgreen',
line_color='darkgreen',
line_width=2)
graph.draw_text(self.num_lm, mouse)
def next_image(self):
# If Dynamic landmark, shouldn't prevent user to next image.
if self.dynamic_lm:
# Save current landmark progress into json file
self.record()
# Update window
if self.pointer + 1 < len(self.all_image_rel_paths):
# Load next image
self.pointer += 1
self.renew_annotate()
else:
self.window["-ANNOPROGRESS-"].update(
f"Annotation Progress: {self.pointer+1}/{self.total_num_images}"
)
self.window["-PBAR-"].update(current_count=self.pointer + 1,
max=self.total_num_images)
sg.popup(
"This is the last image.\nTo safely exit this program, please click the Save button.",
location=WINDOW_LOC)
return
# Landmark insufficient
elif self.num_lm != self.total_num_lm:
flag = self.popup_with_confirm_and_ignore("You have not done annotate this image yet.\n" \
+ f"This image need {self.total_num_lm} landmarks but only {self.num_lm} received.\n"\
+ "However, you can still continue where all data will be saved." \
+ "Do you wish to continue?")
if flag == "No" or flag is None: return
# When flag == Yes, save data into json
self.record()
# Not the last image
if self.pointer + 1 < len(self.all_image_rel_paths):
# Load next image
self.pointer += 1
self.renew_annotate()
# Last image
else:
self.window["-ANNOPROGRESS-"].update(
f"Annotation Progress: {self.pointer+1}/{self.total_num_images}"
)
self.window["-PBAR-"].update(current_count=self.pointer + 1,
max=self.total_num_images)
return self.unfinish_images_prompt(
) # Return "Home" if user agree to exit
# Number of landmark detected correct
else:
# Record landmark into json file
self.record()
# Not the last image
if self.pointer + 1 < len(self.all_image_rel_paths):
# Load next image and renew window.
self.pointer += 1
self.renew_annotate()
# Last image
else:
self.window["-ANNOPROGRESS-"].update(
f"Annotation Progress: {self.pointer+1}/{self.total_num_images}"
)
self.window["-PBAR-"].update(current_count=self.pointer + 1,
max=self.total_num_images)
return self.unfinish_images_prompt()
# If Last image, disable next button
if self.pointer == self.total_num_images - 1:
self.window['-NEXT-'].update(disabled=True)
# Coming from first to second image, enable prev button
if self.pointer == 1:
self.window['-PREV-'].update(disabled=False)
def unfinish_images_prompt(self):
response = sg.popup_yes_no(
"All images have been annotated. Please check for unfinished images. Do you wish to quit now? ",
location=WINDOW_LOC)
if response == "Yes":
response2 = self.save_session()
if response2 == "Yes":
return "Home"
def record(self):
img_name = self.all_image_rel_paths[self.pointer]
table_values = self.window["-LMTABLE-"].Values
mouse_values = self.real_mouses
# Load json
with open(self.annotation_json, "r") as f:
data = json.load(f)
values_dict = {
"xy": [value[1:] for value in table_values],
"mouse_xy": [value[1:] for value in mouse_values]
}
data.update({img_name: values_dict})
# Save to json
pretty_dump(data, self.annotation_json)
def renew_graph(self):
img_name = self.all_image_rel_paths[self.pointer]
# Load current image for user to annotate
im = Image.open(os.path.join(self.dir, img_name))
w, h = im.size
im = im.resize((self.graph_w, self.graph_h),
resample=Image.BICUBIC)
with io.BytesIO() as output:
im.save(output, format="PNG")
data = output.getvalue()
# Load image to graph
graph = self.window["-GRAPH-"]
graph.DrawImage(data=data, location=(0, 0))
for landmark in self.real_mouses:
graph.draw_circle(tuple(landmark[1:]),
10,
fill_color='lightgreen',
line_color='darkgreen',
line_width=2)
graph.draw_text(landmark[0], tuple(landmark[1:]))
def table_prompt(self, message: str):
self.window["-TABLEPROMPT-"].Update(message, visible=True)
def undo_landmark(self):
if self.num_lm == 0:
# Cannot undo when no landmarks
return
# Decrease number of landmark
self.num_lm -= 1
# Pop the mouse location in view
self.real_mouses.pop()
# Pop the real landmark coordinate, then update
values = self.window["-LMTABLE-"].Values
last_landmark = values.pop()
self.window["-LMTABLE-"].Update(values=values)
# Prompt removal below table
self.table_prompt(f"Landmark Number {last_landmark[0]} removed.")
# Load current image for user to annotate
img_name = self.all_image_rel_paths[self.pointer]
im = Image.open(os.path.join(self.dir, img_name))
w, h = im.size
im = im.resize((self.graph_w, self.graph_h),
resample=Image.BICUBIC)
with io.BytesIO() as output:
im.save(output, format="PNG")
data = output.getvalue()
# Load image to graph
graph = self.window["-GRAPH-"]
graph.DrawImage(data=data, location=(0, 0))
# Plot previous points
self.renew_graph()
# Update landmark progress text and bar.
if self.dynamic_lm:
self.window["-LMPROGRESS-"].Update(
f"Landmark Progress: {self.num_lm}")
else:
self.window["-LMPROGRESS-"].Update(
f"Landmark Progress: {self.num_lm}/{self.total_num_lm}")
self.window["-LBAR-"].Update(current_count=self.num_lm,
max=self.total_num_lm)
def prev_image(self):
# Cannot prev if first image
if self.pointer == 0:
return
# Record landmark to wait for user return.
self.record()
# Renew windows on previous image
self.pointer -= 1
self.renew_annotate()
# If First image, disable next button
if self.pointer == 0:
self.window['-PREV-'].update(disabled=True)
# Coming from last to second last, enable next
if self.pointer == self.total_num_images - 2:
self.window['-NEXT-'].update(disabled=False)
def generate_csv(self):
with open(self.annotation_json, "r") as f:
Data = json.load(f)
# In dynamic lm case, get max number of landmarks
if self.dynamic_lm:
max_lm = 0
for key, value in Data.items():
max_lm = max(max_lm, len(value["xy"]))
else:
max_lm = self.total_num_lm
# Following self.all_image_rel_paths
compact_Data = []
for imgrel in self.all_image_rel_paths:
if imgrel in Data:
xy = [int(j) for sub in Data[imgrel]["xy"] for j in sub]
num_nan = 2 * max_lm - len(xy)
xy += [np.nan] * num_nan
one_row = [imgrel] + xy
compact_Data.append(one_row)
# Save to csv
df = pd.DataFrame(data=compact_Data)
df.columns = ["image_name"] + [
letter + str(i) for i in range(1, max_lm + 1)
for letter in ["x", "y"]
]
csv_filename = os.path.splitext(self.annotation_json)[0] + ".csv"
df.to_csv(csv_filename, index=False)
return csv_filename
def cancel_shift(self):
graph = self.window["-GRAPH-"]
graph.draw_circle(tuple(self.store_mouse[1]),
10,
fill_color='lightgreen',
line_color='green',
line_width=2)
graph.draw_text(self.store_mouse[0], tuple(self.store_mouse[1]))
number = self.store_mouse[0]
table = self.window['-LMTABLE-']
table.update(row_colors=[[number - 1, table.BackgroundColor]])
self.store_mouse = None
self.window['-SHIFT-PROMPT-'].Update("Choose a point to move",
visible=True)
def move_point(self, mouse):
# Assuming mouse is not (None, None)
# No landmark on the graph, return immediately
if self.num_lm == 0:
# Clear mouse location
self.store_mouse = None
return
# When store_mouse is None find nearest point based on mouse location
if self.store_mouse is None:
mouses = np.array([
real_mouse[1:] for real_mouse in self.real_mouses
]) # Shape (N, 2)
diffs = mouses - np.array(mouse)
# Find nearest point
indmin = np.argmin(diffs[:, 0]**2 + diffs[:, 1]**2)
# Change the landmark color and store into store_mouse
self.store_mouse = (indmin + 1, mouses[indmin])
# Change the row color of table
self.window['-LMTABLE-'].Update(row_colors=[[indmin, '#6A0DAD']])
# Plot point on image
graph = self.window["-GRAPH-"]
graph.draw_circle(tuple(self.store_mouse[1]),
10,
fill_color='purple',
line_color='#6A0DAD',
line_width=2)
graph.draw_text(self.store_mouse[0],
tuple(self.store_mouse[1]),
color="white")
# Update prompt
self.window['-SHIFT-PROMPT-'].Update(
"Choose a destination\nEsc to cancel", visible=True)
# When store_mouse is not None, use its index to place new point.
else:
number = self.store_mouse[0]
# self.real_mouses and renew graph and table
self.real_mouses[number - 1] = [number, *mouse]
self.renew_graph()
x, y = self.mouse_to_xy(mouse)
table = self.window["-LMTABLE-"]
values = table.Values
values[number - 1] = [number, x, y]
table.update(values=values,
row_colors=[[number - 1, table.BackgroundColor]])
# Update prompt
self.window['-SHIFT-PROMPT-'].Update("Choose a point to move",
visible=True)
# Clear store_mouse
self.store_mouse = None
def save_session(self):
# Record where user at
with open(CACHE, "r") as f:
D = json.load(f)
D["pointer"] = self.pointer
pretty_dump(D, CACHE)
# Record landmark to json file
self.record()
# Convert json file into csv file
csv_filename = self.generate_csv()
response = sg.popup_yes_no(
f"Annotation file saved! View it in {csv_filename}.\nDo you wish to quit?",
location=WINDOW_LOC)
return response
def popup_with_confirm_and_ignore(self, message: str):
if self.ignore_warning1:
return "Yes"
layout = [[sg.Text(message)],
[
sg.Checkbox(
"I understand, do not show this warning again.",
key="-CHECK-")
], [sg.Button("OK"), sg.Button("Cancel")]]
window = sg.Window("Warning", layout, location=WINDOW_LOC)
while True:
event, values = window.read()
if event == sg.WIN_CLOSED: # if user closes window or clicks cancel
break
elif event == "OK":
if values["-CHECK-"]:
self.ignore_warning1 = True
window.close()
return "Yes"
elif event == "Cancel":
if values["-CHECK-"]:
self.ignore_warning1 = True
window.close()
return "No"
| StarcoderdataPython |
8055612 | from kubernetes import client, config
from flask import Flask, request, abort
from logging.config import dictConfig
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {'wsgi': {
'class': 'logging.StreamHandler',
'formatter': 'default'
}},
'root': {
'level': 'DEBUG',
'handlers': ['wsgi']
}
})
app = Flask(__name__) # pylint: disable=invalid-name
def deploymentApproved(namespace, deployment_name):
config.load_incluster_config()
events = client.CoreV1Api().list_namespaced_event(namespace)
if next(
filter(lambda e: e.involved_object.name == deployment_name
and e.involved_object.kind == 'Deployment'
and e.reason == 'Approval', events.items),
False):
return True
return False
@app.route('/', methods=['POST'])
def webhook():
admission = request.get_json()
if admission is None:
abort(400)
admission_request = admission.get('request', {})
uid = admission_request.get('uid')
namespace = admission_request.get('namespace')
owner_references = admission_request.get('object', {})\
.get('metadata', {}).get('ownerReferences', [])
deploy_approved = True
deployment_name = next(
filter(lambda r: r['kind'] == 'Deployment', owner_references),
{}).get('name', None)
app.logger.info(f"Checking deployment: {deployment_name}")
if deployment_name is not None:
deploy_approved = deploymentApproved(namespace, deployment_name)
resp = {
'apiVersion': 'admission.k8s.io/v1',
'kind': 'AdmissionReview',
'response': {
'uid': uid,
'allowed': deploy_approved
},
}
if deploy_approved is False:
app.logger.info("Denying deployment")
resp['response']['status'] = {'code': 403, 'message':
'Your deployment must be approved'}
else:
app.logger.info("Approving deployment")
return resp
if __name__ == "__main__":
app.run(host='127.0.0.1', debug=True, port=5000)
| StarcoderdataPython |
3403378 | import seaborn as sns
from pudzu.charts import *
from pudzu.sandbox.bamboo import *
countries = pd.read_csv("datasets/countries.csv")[["country", "continent", "flag"]].split_columns('country', "|").explode('country').set_index('country')
df = pd.read_csv("datasets/nobels.csv")
df = df[df['category'] == "Literature"][["name", "countries"]].split_columns("countries", "|")
df = df.assign_rows(continents=lambda r: tuple(sorted(set(countries.continent[c] for c in r.countries))))
NORDIC = ["Iceland", "Finland", "Sweden", "Norway", "Denmark"]
nordic_counts = df.update_columns(countries=lambda cs: tuple(sorted(set(c for c in cs if c in NORDIC)))).filter_rows(lambda r: any(r.countries)).groupby("countries").count().name
continent_counts = df.groupby("continents").count().name
counts = pd.concat((nordic_counts, continent_counts))
# chart
categories = { "Old World": ["Asia", "Africa"], "New World": ["South America", "North America", "Oceania"], "Europe": ["Europe"], "Nordic": NORDIC }
catlabels = { "Europe": "Europe (740m)", "New World": "Americas & Oceania (1,040m)", "Old World": "Asia & Africa (5,650m)", "Nordic": "Nordic countries (27m)" }
table = pd.DataFrame([{subcat: counts[subcat] if any(c in subcat for c in categories[cat]) else 0 for subcat in counts.index } for cat in categories], index=categories)
table = table.assign_rows(sum=lambda r: r.sum()).sort_values("sum", ascending=False).drop("sum", axis=1)
table = table[[ # hack #1 to get nice ordering
("Sweden",), ("Denmark",), ("Norway",), ("Finland",), ("Iceland",),
("Europe",),
("North America",), ("Europe", "North America"),
("Asia",), ("Asia", "Europe"),
("South America",), ("Europe", "South America"),
("Europe", "Oceania"),
("Africa",), ("Africa", "Europe"), ("Africa", "Oceania")
]]
WIDTH = 80
BAR = 3
PALETTE = tmap(RGBA, sns.xkcd_palette(["windows blue", "faded green", "amber", "dusty purple", "red", "brown"]))
CONTINENTS = [ "Europe", "North America", "South America", "Oceania", "Asia","Africa" ]
def continent_colour(c):
return PALETTE[CONTINENTS.index(c)]
def stripe(c1, c2=None):
if c2 is None: c2 = c1
return Image.from_column([Image.new("RGBA", (100,BAR), c1), Image.new("RGBA", (100,BAR), c2)])
def stripe_pattern(height, c1, c2=None):
return Image.from_pattern(stripe(c1, c2), (WIDTH,height))
def colorfn(c,r):
cs = table.columns[c] if isinstance(c, int) else (c,)
if len(cs) == 2 and cs[0] not in categories[table.index[r]]: cs = cs[1], cs[0]
if any(c in CONTINENTS for c in cs):
return lambda size: stripe_pattern(size[1], *[continent_colour(c) for c in reversed(cs)])
flagcolors = sorted(Image.from_url_with_cache(countries.flag[cs[0]]).convert("RGBA").getcolors(), reverse=True)
return lambda size: Image.from_row([Image.new("RGBA", (16, size[1]), flagcolors[0][1]),
Image.new("RGBA", (8, size[1]), flagcolors[1][1]),
Image.new("RGBA", (WIDTH-16-8, size[1]), flagcolors[0][1])])
def rlabelfn(r):
return Image.from_text(catlabels[table.index[r]], arial(14, bold=False), "black", "white", align="center", padding=2, max_width=WIDTH)
ymax = 100
chart = bar_chart(table, WIDTH, BAR*2*ymax, type=BarChartType.STACKED, spacing=10, colors=colorfn, grid_interval=10, tick_interval=5, label_font=arial(14), rlabels=rlabelfn, bg="white", fg="black", ylabel=Image.from_text("# Nobel Literature laureates", arial(14), padding=(0,2,0,10), bg="white").transpose(Image.ROTATE_90), ymax=ymax, clabels=None)
def lbox(c):
count = counts.select(lambda cs: c in cs).sum()
box = colorfn(c, 0)((WIDTH,WIDTH)).resize_fixed_aspect(width=BOXSIZE)
return box.place(Image.from_text(str(count), arial(14), "black" if c == "Finland" else "white"))
BOXSIZE = 30
cboxes = [[lbox(c), Image.from_text(c, arial(14), padding=(5,0), fg="black", bg="white")] for c in CONTINENTS]
clegs = Image.from_array(cboxes, bg="white", xalign=0)
nboxes = [[lbox(c), Image.from_text(c, arial(14), padding=(5,0), fg="black", bg="white")] for c in sorted(NORDIC)]
nlegs = Image.from_array(nboxes, bg="white", xalign=0)
legend = Image.from_column([
Image.from_text("Continents", arial(14, bold=True)),
clegs,
Image.from_text("(stripes indicate winners with dual nationalities)", arial(14), max_width=150, padding=(0,0,0,10)),
Image.from_text("Countries", arial(14, bold=True)),
nlegs
], bg="white", padding=(0,3), xalign=0).pad(5, "white").pad(1, "black")
chart = Image.from_row([chart, legend], bg="white", yalign=0, padding=5)
title = Image.from_text("Geographic distribution of Literature Nobel laureates", arial(24, bold=True)).pad((0,4,0,8), "white")
img = Image.from_column([title, chart], bg="white")
img.place(Image.from_text("/u/Udzu", font("arial", 16), fg="black", bg="white", padding=5).pad((1,1,0,0), "black"), align=1, padding=10, copy=False)
img.save("output/nobels_lit.png")
| StarcoderdataPython |
1958307 | <filename>IOPool/Output/test/PoolOutputTest_cfg.py
import FWCore.ParameterSet.Config as cms
import argparse
import sys
parser = argparse.ArgumentParser(prog=sys.argv[0], description="Test PoolOutputModule")
parser.add_argument("--firstLumi", type=int, default=None, help="Set first lumi to process ")
argv = sys.argv[:]
if '--' in argv:
argv.remove("--")
args, unknown = parser.parse_known_args(argv)
process = cms.Process("TESTOUTPUT")
process.load("FWCore.Framework.test.cmsExceptionsFatal_cff")
process.maxEvents.input = 20
process.Thing = cms.EDProducer("ThingProducer")
process.OtherThing = cms.EDProducer("OtherThingProducer")
process.output = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('file:PoolOutputTest.root')
)
process.source = cms.Source("EmptySource")
if args.firstLumi is not None:
process.source.firstLuminosityBlock = cms.untracked.uint32(args.firstLumi)
process.output.fileName = "file:PoolOutputTestLumi{}.root".format(args.firstLumi)
process.p = cms.Path(process.Thing*process.OtherThing)
process.ep = cms.EndPath(process.output)
| StarcoderdataPython |
8139064 | <reponame>European-XFEL/euxfel-python
"""AGIPD & LPD geometry handling."""
from cfelpyutils.crystfel_utils import load_crystfel_geometry
from copy import copy
import h5py
from itertools import product
import numpy as np
from scipy.ndimage import affine_transform
import warnings
from .crystfel_fmt import write_crystfel_geom
__all__ = ['AGIPD_1MGeometry', 'LPD_1MGeometry']
class GeometryFragment:
"""Holds the 3D position & orientation of one detector tile
corner_pos refers to the corner of the detector tile where the first pixel
stored is located. The tile is assumed to be a rectangle of ss_pixels in
the slow scan dimension and fs_pixels in the fast scan dimension.
ss_vec and fs_vec are vectors for a step of one pixel in each dimension.
The coordinates in this class are (x, y, z), in metres.
"""
def __init__(self, corner_pos, ss_vec, fs_vec, ss_pixels, fs_pixels):
self.corner_pos = corner_pos
self.ss_vec = ss_vec
self.fs_vec = fs_vec
self.ss_pixels = ss_pixels
self.fs_pixels = fs_pixels
@classmethod
def from_panel_dict(cls, d):
res = d['res']
corner_pos = np.array([d['cnx'], d['cny'], d['coffset']]) / res
ss_vec = np.array([d['ssx'], d['ssy'], d['ssz']]) / res
fs_vec = np.array([d['fsx'], d['fsy'], d['fsz']]) / res
ss_pixels = d['max_ss'] - d['min_ss'] + 1
fs_pixels = d['max_fs'] - d['min_fs'] + 1
return cls(corner_pos, ss_vec, fs_vec, ss_pixels, fs_pixels)
def corners(self):
return np.stack([
self.corner_pos,
self.corner_pos + (self.fs_vec * self.fs_pixels),
self.corner_pos + (self.ss_vec * self.ss_pixels) + (self.fs_vec * self.fs_pixels),
self.corner_pos + (self.ss_vec * self.ss_pixels),
])
def centre(self):
return (
self.corner_pos
+ (0.5 * self.ss_vec * self.ss_pixels)
+ (0.5 * self.fs_vec * self.fs_pixels)
)
def snap(self, px_shape):
# Round positions and vectors to integers, drop z dimension
corner_pos = np.around(self.corner_pos[:2] / px_shape).astype(np.int32)
ss_vec = np.around(self.ss_vec[:2] / px_shape).astype(np.int32)
fs_vec = np.around(self.fs_vec[:2] / px_shape).astype(np.int32)
# We should have one vector in the x direction and one in y, but
# we don't know which is which.
assert {tuple(np.abs(ss_vec)), tuple(np.abs(fs_vec))} == {(0, 1), (1, 0)}
# Convert xy coordinates to yx indexes
return GridGeometryFragment(
corner_pos[::-1], ss_vec[::-1], fs_vec[::-1], self.ss_pixels, self.fs_pixels
)
class GridGeometryFragment:
"""Holds the 2D axis-aligned position and orientation of one detector tile.
This is used in 'snapped' geometry which efficiently assembles a detector
image into a 2D array.
These coordinates are all (y, x), suitable for indexing a numpy array.
ss_vec and fs_vec must be length 1 vectors in either positive or negative
x or y direction. In the output array, the fast scan dimension is always x.
So if the input data is oriented with fast-scan vertical, we need to
transpose it first.
Regardless of transposition, we may also need to flip the data on one or
both axes; the fs_order and ss_order variables handle this.
"""
def __init__(self, corner_pos, ss_vec, fs_vec, ss_pixels, fs_pixels):
self.ss_vec = ss_vec
self.fs_vec = fs_vec
self.ss_pixels = ss_pixels
self.fs_pixels = fs_pixels
if fs_vec[0] == 0:
# Fast scan is x dimension: Flip without transposing
fs_order = fs_vec[1]
ss_order = ss_vec[0]
self.transform = lambda arr: arr[..., ::ss_order, ::fs_order]
corner_shift = np.array([
min(ss_order, 0) * self.ss_pixels,
min(fs_order, 0) * self.fs_pixels
])
self.pixel_dims = np.array([self.ss_pixels, self.fs_pixels])
else:
# Fast scan is y : Transpose so fast scan -> x and then flip
fs_order = fs_vec[0]
ss_order = ss_vec[1]
self.transform = lambda arr: arr.swapaxes(-1, -2)[..., ::fs_order, ::ss_order]
corner_shift = np.array([
min(fs_order, 0) * self.fs_pixels,
min(ss_order, 0) * self.ss_pixels
])
self.pixel_dims = np.array([self.fs_pixels, self.ss_pixels])
self.corner_idx = corner_pos + corner_shift
self.opp_corner_idx = self.corner_idx + self.pixel_dims
class DetectorGeometryBase:
"""Base class for detector geometry. Subclassed for specific detectors."""
# Define in subclasses:
pixel_size = 0.0
frag_ss_pixels = 0
frag_fs_pixels = 0
n_modules = 0
n_tiles_per_module = 0
expected_data_shape = (0, 0, 0)
_pixel_corners = np.array([ # pixel units; overridden for DSSC
[0, 1, 1, 0], # slow-scan
[0, 0, 1, 1] # fast-scan
])
_draw_first_px_on_tile = 1 # Tile num of 1st pixel - overridden for LPD
@property
def _pixel_shape(self):
"""Pixel (x, y) shape. Overridden for DSSC."""
return np.array([1., 1.], dtype=np.float64) * self.pixel_size
def __init__(self, modules, filename='No file'):
# List of lists (1 per module) of fragments (1 per tile)
self.modules = modules
# self.filename is metadata for plots, we don't read/write the file.
# There are separate methods for reading and writing.
self.filename = filename
self._snapped_cache = None
def _get_plot_scale_factor(self, axis_units):
if axis_units == 'm':
return 1
elif axis_units == 'px':
return 1 / self.pixel_size
else:
raise ValueError("axis_units must be 'px' or 'm', not {!r}"
.format(axis_units))
def inspect(self, axis_units='px', frontview=True):
"""Plot the 2D layout of this detector geometry.
Returns a matplotlib Figure object.
"""
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection, LineCollection
from matplotlib.patches import Polygon
scale = self._get_plot_scale_factor(axis_units)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1)
rects = []
first_rows = []
for module in self.modules:
for t, fragment in enumerate(module, start=1):
corners = fragment.corners()[:, :2] # Drop the Z dimension
rects.append(Polygon(corners * scale))
if t == self._draw_first_px_on_tile:
# Find the ends of the first row in reading order
c1 = fragment.corner_pos * scale
c2 = c1 + (fragment.fs_vec * fragment.fs_pixels * scale)
first_rows.append((c1[:2], c2[:2]))
# Add tile shapes
pc = PatchCollection(rects, facecolor=(0.75, 1.0, 0.75), edgecolor=None)
ax.add_collection(pc)
# Add markers for first pixels & lines for first row
first_rows = np.array(first_rows)
first_px_x, first_px_y = first_rows[:, 0, 0], first_rows[:, 0, 1]
ax.scatter(first_px_x, first_px_y, marker='x', label='First pixel')
ax.add_collection(LineCollection(
first_rows, linestyles=':', color='k', label='First row'
))
ax.legend()
cross_size = 0.02 * scale
# Draw cross in the centre.
ax.hlines(0, -cross_size, +cross_size, colors='0.75', linewidths=2)
ax.vlines(0, -cross_size, +cross_size, colors='0.75', linewidths=2)
if frontview:
ax.invert_xaxis()
ax.set_xlabel('metres' if axis_units == 'm' else 'pixels')
ax.set_ylabel('metres' if axis_units == 'm' else 'pixels')
return ax
@classmethod
def from_crystfel_geom(cls, filename):
"""Read a CrystFEL format (.geom) geometry file.
Returns a new geometry object.
"""
geom_dict = load_crystfel_geometry(filename)
modules = []
for p in range(cls.n_modules):
tiles = []
modules.append(tiles)
for a in range(cls.n_tiles_per_module):
d = geom_dict['panels']['p{}a{}'.format(p, a)]
tiles.append(GeometryFragment.from_panel_dict(d))
return cls(modules, filename=filename)
def write_crystfel_geom(self, filename, *,
data_path='/entry_1/instrument_1/detector_1/data',
mask_path=None, dims=('frame', 'modno', 'ss', 'fs'),
adu_per_ev=None, clen=None, photon_energy=None):
"""Write this geometry to a CrystFEL format (.geom) geometry file.
Parameters
----------
filename : str
Filename of the geometry file to write.
data_path : str
Path to the group that contains the data array in the hdf5 file.
Default: ``'/entry_1/instrument_1/detector_1/data'``.
mask_path : str
Path to the group that contains the mask array in the hdf5 file.
dims : tuple
Dimensions of the data. Extra dimensions, except for the defaults,
should be added by their index, e.g.
('frame', 'modno', 0, 'ss', 'fs') for raw data.
Default: ``('frame', 'modno', 'ss', 'fs')``.
Note: the dimensions must contain frame, ss, fs.
adu_per_ev : float
ADU (analog digital units) per electron volt for the considered
detector.
clen : float
Distance between sample and detector in meters
photon_energy : float
Beam wave length in eV
"""
write_crystfel_geom(
self, filename, data_path=data_path, mask_path=mask_path, dims=dims,
adu_per_ev=adu_per_ev, clen=clen, photon_energy=photon_energy,
)
if self.filename == 'No file':
self.filename = filename
def _snapped(self):
"""Snap geometry to a 2D pixel grid
This returns a new geometry object. The 'snapped' geometry is
less accurate, but can assemble data into a 2D array more efficiently,
because it doesn't do any interpolation.
"""
if self._snapped_cache is None:
new_modules = []
for module in self.modules:
new_tiles = [t.snap(px_shape=self._pixel_shape) for t in module]
new_modules.append(new_tiles)
self._snapped_cache = SnappedGeometry(new_modules, self)
return self._snapped_cache
@staticmethod
def split_tiles(module_data):
"""Split data from a detector module into tiles.
Must be implemented in subclasses.
"""
raise NotImplementedError
def output_array_for_position_fast(self, extra_shape=(), dtype=np.float32):
"""Make an empty output array to use with position_modules_fast
You can speed up assembling images by reusing the same output array:
call this once, and then pass the array as the ``out=`` parameter to
:meth:`position_modules_fast()`. By default, it allocates a new array on
each call, which can be slow.
Parameters
----------
extra_shape : tuple, optional
By default, a 2D output array is generated, to assemble a single
detector image. If you are assembling multiple pulses at once, pass
``extra_shape=(nframes,)`` to get a 3D output array.
dtype : optional (Default: np.float32)
"""
return self._snapped().make_output_array(extra_shape=extra_shape,
dtype=dtype)
def position_modules_fast(self, data, out=None):
"""Assemble data from this detector according to where the pixels are.
This approximates the geometry to align all pixels to a 2D grid.
Parameters
----------
data : ndarray
The last three dimensions should match the modules, then the
slow scan and fast scan pixel dimensions.
out : ndarray, optional
An output array to assemble the image into. By default, a new
array is allocated. Use :meth:`output_array_for_position_fast` to
create a suitable array.
If an array is passed in, it must match the dtype of the data and the
shape of the array that would have been allocated.
Parts of the array not covered by detector tiles are not overwritten.
In general, you can reuse an output array if you are assembling
similar pulses or pulse trains with the same geometry.
Returns
-------
out : ndarray
Array with one dimension fewer than the input.
The last two dimensions represent pixel y and x in the detector space.
centre : ndarray
(y, x) pixel location of the detector centre in this geometry.
"""
return self._snapped().position_modules(data, out=out)
def position_all_modules(self, data, out=None):
"""Deprecated alias for :meth:`position_modules_fast`"""
return self.position_modules_fast(data, out=out)
def plot_data_fast(self,
data, *,
axis_units='px',
frontview=True,
ax=None,
figsize=None,
colorbar=True,
**kwargs):
"""Plot data from the detector using this geometry.
This approximates the geometry to align all pixels to a 2D grid.
Returns a matplotlib axes object.
Parameters
----------
data : ndarray
Should have exactly 3 dimensions, for the modules, then the
slow scan and fast scan pixel dimensions.
axis_units : str
Show the detector scale in pixels ('px') or metres ('m').
frontview : bool
If True (the default), x increases to the left, as if you were looking
along the beam. False gives a 'looking into the beam' view.
ax : `~matplotlib.axes.Axes` object, optional
Axes that will be used to draw the image. If None is given (default)
a new axes object will be created.
figsize : tuple
Size of the figure (width, height) in inches to be drawn
(default: (10, 10))
colorbar : bool, dict
Draw colobar with default values (if boolean is given). Colorbar
appearance can be controlled by passing a dictionary of properties.
kwargs :
Additional keyword arguments passed to `~matplotlib.imshow`
"""
return self._snapped().plot_data(
data, axis_units=axis_units, frontview=frontview, figsize=figsize,
ax=ax, colorbar=colorbar, **kwargs
)
@classmethod
def _distortion_array_slice(cls, m, t):
"""Which part of distortion array each tile is.
"""
# _tile_slice gives the slice for the tile within its module.
# The distortion array joins the modules along the slow-scan axis, so
# we need to offset the slow-scan slice to land in the correct module.
ss_slice_inmod, fs_slice = cls._tile_slice(t)
mod_px_ss = cls.expected_data_shape[1]
mod_offset = m * mod_px_ss
ss_slice = slice(
ss_slice_inmod.start + mod_offset, ss_slice_inmod.stop + mod_offset
)
return ss_slice, fs_slice
def to_distortion_array(self, allow_negative_xy=False):
"""Generate a distortion array for pyFAI from this geometry.
"""
nmods, mod_px_ss, mod_px_fs = self.expected_data_shape
ncorners = self._pixel_corners.shape[1]
distortion = np.zeros((nmods * mod_px_ss, mod_px_fs, ncorners, 3),
dtype=np.float32)
pixpos = self.get_pixel_positions(centre=False).reshape(
(nmods * mod_px_ss, mod_px_fs, 3)
)
px, py, pz = np.moveaxis(pixpos, -1, 0)
corner_ss_offsets = self._pixel_corners[0]
corner_fs_offsets = self._pixel_corners[1]
for m, mod in enumerate(self.modules, start=0):
for t, tile in enumerate(mod, start=0):
ss_unit_x, ss_unit_y, ss_unit_z = tile.ss_vec
fs_unit_x, fs_unit_y, fs_unit_z = tile.fs_vec
# Which part of the array is this tile?
tile_ss_slice, tile_fs_slice = self._distortion_array_slice(m, t)
# Get coordinates of each pixel's first corner
# 2D arrays, shape: (64, 128)
pixel_corner1_x = px[tile_ss_slice, tile_fs_slice]
pixel_corner1_y = py[tile_ss_slice, tile_fs_slice]
pixel_corner1_z = pz[tile_ss_slice, tile_fs_slice]
# Calculate corner coordinates for each pixel
# 3D arrays, shape: (64, 128, 4)
corners_x = (
pixel_corner1_x[:, :, np.newaxis]
+ corner_ss_offsets * ss_unit_x
+ corner_fs_offsets * fs_unit_x
)
corners_y = (
pixel_corner1_y[:, :, np.newaxis]
+ corner_ss_offsets * ss_unit_y
+ corner_fs_offsets * fs_unit_y
)
corners_z = (
pixel_corner1_z[:, :, np.newaxis]
+ corner_ss_offsets * ss_unit_z
+ corner_fs_offsets * fs_unit_z
)
# Insert the data into the array
distortion[tile_ss_slice, tile_fs_slice, :, 0] = corners_z
distortion[tile_ss_slice, tile_fs_slice, :, 1] = corners_y
distortion[tile_ss_slice, tile_fs_slice, :, 2] = corners_x
if not allow_negative_xy:
# Shift the x & y origin from the centre to the corner
min_yx = distortion[..., 1:].min(axis=(0, 1, 2))
distortion[..., 1:] -= min_yx
return distortion
@classmethod
def _tile_slice(cls, tileno):
"""Implement in subclass: which part of module array each tile is.
"""
raise NotImplementedError
def _module_coords_to_tile(self, slow_scan, fast_scan):
"""Implement in subclass: positions in module to tile numbers & pos in tile
"""
raise NotImplementedError
@classmethod
def _adjust_pixel_coords(cls, ss_coords, fs_coords, centre):
"""Called by get_pixel_positions; overridden by DSSC"""
if centre:
# A pixel is from n to n+1 in each axis, so centres are at n+0.5.
ss_coords += 0.5
fs_coords += 0.5
def get_pixel_positions(self, centre=True):
"""Get the physical coordinates of each pixel in the detector
The output is an array with shape like the data, with an extra dimension
of length 3 to hold (x, y, z) coordinates. Coordinates are in metres.
If centre=True, the coordinates are calculated for the centre of each
pixel. If not, the coordinates are for the first corner of the pixel
(the one nearest the [0, 0] corner of the tile in data space).
"""
out = np.zeros(self.expected_data_shape + (3,), dtype=np.float64)
# Prepare some arrays to use inside the loop
pixel_ss_coord, pixel_fs_coord = np.meshgrid(
np.arange(0, self.frag_ss_pixels, dtype=np.float64),
np.arange(0, self.frag_fs_pixels, dtype=np.float64),
indexing='ij'
)
# Shift coordinates from corner to centre if requested.
# This is also where the DSSC subclass shifts odd rows by half a pixel
self._adjust_pixel_coords(pixel_ss_coord, pixel_fs_coord, centre)
for m, mod in enumerate(self.modules, start=0):
for t, tile in enumerate(mod, start=0):
corner_x, corner_y, corner_z = tile.corner_pos
ss_unit_x, ss_unit_y, ss_unit_z = tile.ss_vec
fs_unit_x, fs_unit_y, fs_unit_z = tile.fs_vec
# Calculate coordinates of each pixel's first corner
# 2D arrays, shape: (64, 128)
pixels_x = (
corner_x
+ pixel_ss_coord * ss_unit_x
+ pixel_fs_coord * fs_unit_x
)
pixels_y = (
corner_y
+ pixel_ss_coord * ss_unit_y
+ pixel_fs_coord * fs_unit_y
)
pixels_z = (
corner_z
+ pixel_ss_coord * ss_unit_z
+ pixel_fs_coord * fs_unit_z
)
# Which part of the array is this tile?
tile_ss_slice, tile_fs_slice = self._tile_slice(t)
# Insert the data into the array
out[m, tile_ss_slice, tile_fs_slice, 0] = pixels_x
out[m, tile_ss_slice, tile_fs_slice, 1] = pixels_y
out[m, tile_ss_slice, tile_fs_slice, 2] = pixels_z
return out
def data_coords_to_positions(self, module_no, slow_scan, fast_scan):
"""Convert data array coordinates to physical positions
Data array coordinates are how you might refer to a pixel in an array
of detector data: module number, and indices in the slow-scan and
fast-scan directions. But coordinates in the two pixel dimensions aren't
necessarily integers, e.g. if they refer to the centre of a peak.
module_no, fast_scan and slow_scan should all be numpy arrays of the
same shape. module_no should hold integers, starting from 0,
so 0: Q1M1, 1: Q1M2, etc.
slow_scan and fast_scan describe positions within that module.
They may hold floats for sub-pixel positions. In both, 0.5 is the centre
of the first pixel.
Returns an array of similar shape with an extra dimension of length 3,
for (x, y, z) coordinates in metres.
.. seealso::
:doc:`agipd_geometry` demonstrates using this method.
"""
assert module_no.shape == slow_scan.shape == fast_scan.shape
# We want to avoid iterating over the positions in Python.
# So we assemble arrays of the corner position and step vectors for all
# tiles, and then use numpy indexing to select the relevant ones for
# each set of coordinates.
tiles_corner_pos = np.stack([
t.corner_pos for m in self.modules for t in m
])
tiles_ss_vec = np.stack([
t.ss_vec for m in self.modules for t in m
])
tiles_fs_vec = np.stack([
t.fs_vec for m in self.modules for t in m
])
# Convert coordinates within each module to coordinates in a tile
tilenos, tile_ss, tile_fs = self._module_coords_to_tile(slow_scan, fast_scan)
# The indexes of the relevant tiles in the arrays assembled above
all_tiles_ix = (module_no * self.n_tiles_per_module) + tilenos
# Select the relevant tile geometry for each set of coordinates
coords_tile_corner = tiles_corner_pos[all_tiles_ix]
coords_ss_vec = tiles_ss_vec[all_tiles_ix]
coords_fs_vec = tiles_fs_vec[all_tiles_ix]
# Calculate the physical coordinate for each data coordinate
return coords_tile_corner \
+ (np.expand_dims(tile_ss, -1) * coords_ss_vec) \
+ (np.expand_dims(tile_fs, -1) * coords_fs_vec)
class AGIPD_1MGeometry(DetectorGeometryBase):
"""Detector layout for AGIPD-1M
The coordinates used in this class are 3D (x, y, z), and represent metres.
You won't normally instantiate this class directly:
use one of the constructor class methods to create or load a geometry.
"""
pixel_size = 2e-4 # 2e-4 metres == 0.2 mm
frag_ss_pixels = 64
frag_fs_pixels = 128
expected_data_shape = (16, 512, 128)
n_modules = 16
n_tiles_per_module = 8
@classmethod
def from_quad_positions(cls, quad_pos, asic_gap=2, panel_gap=29,
unit=pixel_size):
"""Generate an AGIPD-1M geometry from quadrant positions.
This produces an idealised geometry, assuming all modules are perfectly
flat, aligned and equally spaced within their quadrant.
The quadrant positions are given in pixel units, referring to the first
pixel of the first module in each quadrant, corresponding to data
channels 0, 4, 8 and 12.
The origin of the coordinates is in the centre of the detector.
Coordinates increase upwards and to the left (looking along the beam).
To give positions in units other than pixels, pass the *unit* parameter
as the length of the unit in metres.
E.g. ``unit=1e-3`` means the coordinates are in millimetres.
"""
asic_gap_px = asic_gap * unit / cls.pixel_size
panel_gap_px = panel_gap * unit / cls.pixel_size
# How much space one tile takes up, including the gaps
# separating it from its neighbour.
# In the y dimension, 128 px + gap between modules
module_height = (cls.frag_fs_pixels + panel_gap_px) * cls.pixel_size
# In x, 64 px + gap between tiles (asics)
tile_width = (cls.frag_ss_pixels + asic_gap_px) * cls.pixel_size
quads_x_orientation = [1, 1, -1, -1]
quads_y_orientation = [-1, -1, 1, 1]
modules = []
for p in range(16):
quad = p // 4
quad_corner = quad_pos[quad]
x_orient = quads_x_orientation[quad]
y_orient = quads_y_orientation[quad]
p_in_quad = p % 4
corner_y = (quad_corner[1] * unit)\
- (p_in_quad * module_height)
tiles = []
modules.append(tiles)
for a in range(8):
corner_x = (quad_corner[0] * unit)\
+ x_orient * tile_width * a
tiles.append(GeometryFragment(
corner_pos=np.array([corner_x, corner_y, 0.]),
ss_vec=np.array([x_orient, 0, 0]) * unit,
fs_vec=np.array([0, y_orient, 0]) * unit,
ss_pixels=cls.frag_ss_pixels,
fs_pixels=cls.frag_fs_pixels,
))
return cls(modules)
def inspect(self, axis_units='px', frontview=True):
"""Plot the 2D layout of this detector geometry.
Returns a matplotlib Axes object.
Parameters
----------
axis_units : str
Show the detector scale in pixels ('px') or metres ('m').
frontview : bool
If True (the default), x increases to the left, as if you were looking
along the beam. False gives a 'looking into the beam' view.
"""
ax = super().inspect(axis_units=axis_units, frontview=frontview)
scale = self._get_plot_scale_factor(axis_units)
# Label modules and tiles
for ch, module in enumerate(self.modules):
s = 'Q{Q}M{M}'.format(Q=(ch // 4) + 1, M=(ch % 4) + 1)
cx, cy, _ = module[4].centre() * scale
ax.text(cx, cy, s, fontweight='bold',
verticalalignment='center',
horizontalalignment='center')
for t in [0, 7]:
cx, cy, _ = module[t].centre() * scale
ax.text(cx, cy, 'T{}'.format(t + 1),
verticalalignment='center',
horizontalalignment='center')
ax.set_title('AGIPD-1M detector geometry ({})'.format(self.filename))
return ax
def compare(self, other, scale=1.0):
"""Show a comparison of this geometry with another in a 2D plot.
This shows the current geometry like :meth:`inspect`, with the addition
of arrows showing how each panel is shifted in the other geometry.
Parameters
----------
other : AGIPD_1MGeometry
A second geometry object to compare with this one.
scale : float
Scale the arrows showing the difference in positions.
This is useful to show small differences clearly.
"""
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon, FancyArrow
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1)
rects = []
arrows = []
for p, module in enumerate(self.modules):
for a, fragment in enumerate(module):
corners = fragment.corners()[:, :2] # Drop the Z dimension
corner1, corner1_opp = corners[0], corners[2]
rects.append(Polygon(corners))
if a in {0, 7}:
cx, cy, _ = fragment.centre()
ax.text(cx, cy, str(a),
verticalalignment='center',
horizontalalignment='center')
elif a == 4:
cx, cy, _ = fragment.centre()
ax.text(cx, cy, 'p{}'.format(p),
verticalalignment='center',
horizontalalignment='center')
panel2 = other.modules[p][a]
corners2 = panel2.corners()[:, :2]
corner2, corner2_opp = corners2[0], corners2[2]
dx, dy = corner2 - corner1
if not (dx == dy == 0):
sx, sy = corner1
arrows.append(FancyArrow(
sx, sy, scale * dx, scale * dy, width=5, head_length=4
))
dx, dy = corner2_opp - corner1_opp
if not (dx == dy == 0):
sx, sy = corner1_opp
arrows.append(FancyArrow(
sx, sy, scale * dx, scale * dy, width=5, head_length=5
))
pc = PatchCollection(rects, facecolor=(0.75, 1.0, 0.75), edgecolor=None)
ax.add_collection(pc)
ac = PatchCollection(arrows)
ax.add_collection(ac)
# Set axis limits to fit all shapes, with some margin
all_x = np.concatenate([s.xy[:, 0] for s in arrows + rects])
all_y = np.concatenate([s.xy[:, 1] for s in arrows + rects])
ax.set_xlim(all_x.min() - 20, all_x.max() + 20)
ax.set_ylim(all_y.min() - 40, all_y.max() + 20)
ax.set_title('Geometry comparison: {} → {}'
.format(self.filename, other.filename))
ax.text(1, 0, 'Arrows scaled: {}×'.format(scale),
horizontalalignment="right", verticalalignment="bottom",
transform=ax.transAxes)
return ax
def position_modules_interpolate(self, data):
"""Assemble data from this detector according to where the pixels are.
This performs interpolation, which is very slow.
Use :meth:`position_modules_fast` to get a pixel-aligned approximation
of the geometry.
Parameters
----------
data : ndarray
The three dimensions should be channelno, pixel_ss, pixel_fs
(lengths 16, 512, 128). ss/fs are slow-scan and fast-scan.
Returns
-------
out : ndarray
Array with the one dimension fewer than the input.
The last two dimensions represent pixel y and x in the detector space.
centre : ndarray
(y, x) pixel location of the detector centre in this geometry.
"""
assert data.shape == (16, 512, 128)
size_yx, centre = self._get_dimensions()
tmp = np.empty((16 * 8,) + size_yx, dtype=data.dtype)
for i, (module, mod_data) in enumerate(zip(self.modules, data)):
tiles_data = np.split(mod_data, 8)
for j, (tile, tile_data) in enumerate(zip(module, tiles_data)):
# We store (x, y, z), but numpy indexing, and hence affine_transform,
# work like [y, x]. Rearrange the numbers:
fs_vec_yx = tile.fs_vec[:2][::-1]
ss_vec_yx = tile.ss_vec[:2][::-1]
# Offset by centre to make all coordinates positive
corner_pos_yx = tile.corner_pos[:2][::-1] + centre
# Make the rotation matrix
rotn = np.stack((ss_vec_yx, fs_vec_yx), axis=-1)
# affine_transform takes a mapping from *output* to *input*.
# So we reverse the forward transformation.
transform = np.linalg.inv(rotn)
offset = np.dot(rotn, corner_pos_yx) # this seems to work, but is it right?
affine_transform(
tile_data,
transform,
offset=offset,
cval=np.nan,
output_shape=size_yx,
output=tmp[i * 8 + j],
)
# Silence warnings about nans - we expect gaps in the result
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
out = np.nanmax(tmp, axis=0)
return out, centre
def _get_dimensions(self):
"""Calculate appropriate array dimensions for assembling data.
Returns (size_y, size_x), (centre_y, centre_x)
"""
corners = []
for module in self.modules:
for tile in module:
corners.append(tile.corners())
corners = np.concatenate(corners)[:, :2] / self._pixel_shape
# Find extremes, add 1 px margin to allow for rounding errors
min_xy = corners.min(axis=0).astype(int) - 1
max_xy = corners.max(axis=0).astype(int) + 1
size = max_xy - min_xy
centre = -min_xy
# Switch xy -> yx
return tuple(size[::-1]), centre[::-1]
@staticmethod
def split_tiles(module_data):
# Split into 8 tiles along the slow-scan axis
return np.split(module_data, 8, axis=-2)
@classmethod
def _tile_slice(cls, tileno):
# Which part of the array is this tile?
# tileno = 0 to 7
tile_offset = tileno * cls.frag_ss_pixels
ss_slice = slice(tile_offset, tile_offset + cls.frag_ss_pixels)
fs_slice = slice(0, cls.frag_fs_pixels) # Every tile covers the full 128 pixels
return ss_slice, fs_slice
@classmethod
def _module_coords_to_tile(cls, slow_scan, fast_scan):
tileno, tile_ss = np.divmod(slow_scan, cls.frag_ss_pixels)
return tileno.astype(np.int16), tile_ss, fast_scan
def to_distortion_array(self, allow_negative_xy=False):
"""Return distortion matrix for AGIPD detector, suitable for pyFAI.
Parameters
----------
allow_negative_xy: bool
If False (default), shift the origin so no x or y coordinates are
negative. If True, the origin is the detector centre.
Returns
-------
out: ndarray
Array of float 32 with shape (8192, 128, 4, 3).
The dimensions mean:
- 8192 = 16 modules * 512 pixels (slow scan axis)
- 128 pixels (fast scan axis)
- 4 corners of each pixel
- 3 numbers for z, y, x
"""
# Overridden only for docstring
return super().to_distortion_array(allow_negative_xy)
class SnappedGeometry:
"""Detector geometry approximated to align modules to a 2D grid
The coordinates used in this class are (y, x) suitable for indexing a
Numpy array; this does not match the (x, y, z) coordinates in the more
precise geometry above.
"""
def __init__(self, modules, geom: DetectorGeometryBase):
self.modules = modules
self.geom = geom
self.size_yx, self.centre = self._get_dimensions()
def make_output_array(self, extra_shape=(), dtype=np.float32):
"""Make an output array for self.position_modules()
"""
shape = extra_shape + self.size_yx
return np.full(shape, np.nan, dtype=dtype)
def position_modules(self, data, out=None):
"""Implementation for position_modules_fast
"""
assert data.shape[-3:] == self.geom.expected_data_shape
if out is None:
out = self.make_output_array(data.shape[:-3], data.dtype)
else:
assert out.shape == data.shape[:-3] + self.size_yx
if not np.can_cast(data.dtype, out.dtype, casting='safe'):
raise TypeError("{} cannot be safely cast to {}".
format(data.dtype, out.dtype))
for i, module in enumerate(self.modules):
mod_data = data[..., i, :, :]
tiles_data = self.geom.split_tiles(mod_data)
for j, tile in enumerate(module):
tile_data = tiles_data[j]
# Offset by centre to make all coordinates positive
y, x = tile.corner_idx + self.centre
h, w = tile.pixel_dims
out[..., y : y + h, x : x + w] = tile.transform(tile_data)
return out, self.centre
def _get_dimensions(self):
"""Calculate appropriate array dimensions for assembling data.
Returns (size_y, size_x), (centre_y, centre_x)
"""
corners = []
for module in self.modules:
for tile in module:
corners.append(tile.corner_idx)
corners.append(tile.opp_corner_idx)
corners = np.stack(corners)
# Find extremes
min_yx = corners.min(axis=0)
max_yx = corners.max(axis=0)
size = max_yx - min_yx
centre = -min_yx
return tuple(size), centre
def plot_data(self,
modules_data, *,
axis_units='px',
frontview=True,
ax=None,
figsize=None,
colorbar=False,
**kwargs):
"""Implementation for plot_data_fast
"""
from matplotlib.cm import viridis
import matplotlib.pyplot as plt
if axis_units not in {'px', 'm'}:
raise ValueError("axis_units must be 'px' or 'm', not {!r}"
.format(axis_units))
res, centre = self.position_modules(modules_data)
min_y, min_x = -centre
max_y, max_x = np.array(res.shape) - centre
_extent = np.array((min_x - 0.5, max_x + 0.5, min_y - 0.5, max_y + 0.5))
cross_size = 20
if axis_units == 'm':
_extent *= self.geom.pixel_size
cross_size *= self.geom.pixel_size
# Use a dark grey for missing data
_cmap = copy(viridis)
_cmap.set_bad('0.25', 1.0)
kwargs.setdefault('cmap', _cmap)
kwargs.setdefault('extent', _extent)
kwargs.setdefault('origin', 'lower')
if ax is None:
fig = plt.figure(figsize=figsize or (10, 10))
ax = fig.add_subplot(1, 1, 1)
im = ax.imshow(res, **kwargs)
if isinstance(colorbar, dict) or colorbar is True:
if isinstance(colorbar, bool):
colorbar = {}
colorbar = plt.colorbar(im, ax=ax, **colorbar)
ax.set_xlabel('metres' if axis_units == 'm' else 'pixels')
ax.set_ylabel('metres' if axis_units == 'm' else 'pixels')
if frontview:
ax.invert_xaxis()
# Draw a cross at the centre
ax.hlines(0, -cross_size, +cross_size, colors='w', linewidths=1)
ax.vlines(0, -cross_size, +cross_size, colors='w', linewidths=1)
return ax
class LPD_1MGeometry(DetectorGeometryBase):
"""Detector layout for LPD-1M
The coordinates used in this class are 3D (x, y, z), and represent metres.
You won't normally instantiate this class directly:
use one of the constructor class methods to create or load a geometry.
"""
pixel_size = 5e-4 # 5e-4 metres == 0.5 mm
frag_ss_pixels = 32
frag_fs_pixels = 128
n_modules = 16
n_tiles_per_module = 16
expected_data_shape = (16, 256, 256)
_draw_first_px_on_tile = 8 # The first pixel in stored data is on tile 8
@classmethod
def from_quad_positions(cls, quad_pos, *, unit=1e-3, asic_gap=None,
panel_gap=None):
"""Generate an LPD-1M geometry from quadrant positions.
This produces an idealised geometry, assuming all modules are perfectly
flat, aligned and equally spaced within their quadrant.
The quadrant positions refer to the corner of each quadrant
where module 4, tile 16 is positioned.
This is the corner of the last pixel as the data is stored.
In the initial detector layout, the corner positions are for the top
left corner of the quadrant, looking along the beam.
The origin of the coordinates is in the centre of the detector.
Coordinates increase upwards and to the left (looking along the beam).
Parameters
----------
quad_pos: list of 2-tuples
(x, y) coordinates of the last corner (the one by module 4) of each
quadrant.
unit: float, optional
The conversion factor to put the coordinates into metres.
The default 1e-3 means the numbers are in millimetres.
asic_gap: float, optional
The gap between adjacent tiles/ASICs. The default is 4 pixels.
panel_gap: float, optional
The gap between adjacent modules/panels. The default is 4 pixels.
"""
px_conversion = unit / cls.pixel_size
asic_gap_px = 4 if (asic_gap is None) else asic_gap * px_conversion
panel_gap_px = 4 if (panel_gap is None) else panel_gap * px_conversion
# How much space one panel/module takes up, including the 'panel gap'
# separating it from its neighbour.
# In the x dimension, we have only one asic gap (down the centre)
panel_width = (256 + asic_gap_px + panel_gap_px) * cls.pixel_size
# In y, we have 7 gaps between the 8 ASICs in each column.
panel_height = (256 + (7 * asic_gap_px) + panel_gap_px) * cls.pixel_size
# How much space does one tile take up, including gaps to its neighbours?
tile_width = (cls.frag_fs_pixels + asic_gap_px) * cls.pixel_size
tile_height = (cls.frag_ss_pixels + asic_gap_px) * cls.pixel_size
# Size of a tile from corner to corner, excluding gaps
tile_size = np.array([cls.frag_fs_pixels, cls.frag_ss_pixels, 0]) * cls.pixel_size
panels_across = [-1, -1, 0, 0]
panels_up = [0, -1, -1, 0]
modules = []
for p in range(cls.n_modules):
quad = p // 4
quad_corner_x = quad_pos[quad][0] * unit
quad_corner_y = quad_pos[quad][1] * unit
p_in_quad = p % 4
# Top beam-left corner of panel
panel_corner_x = (quad_corner_x +
(panels_across[p_in_quad] * panel_width))
panel_corner_y = (quad_corner_y +
(panels_up[p_in_quad] * panel_height))
tiles = []
modules.append(tiles)
for a in range(cls.n_tiles_per_module):
if a < 8:
up = -a
across = -1
else:
up = -(15 - a)
across = 0
tile_last_corner = (
np.array([panel_corner_x, panel_corner_y, 0.0])
+ np.array([across, 0, 0]) * tile_width
+ np.array([0, up, 0]) * tile_height
)
tile_first_corner = tile_last_corner - tile_size
tiles.append(GeometryFragment(
corner_pos=tile_first_corner,
ss_vec=np.array([0, 1, 0]) * cls.pixel_size,
fs_vec=np.array([1, 0, 0]) * cls.pixel_size,
ss_pixels=cls.frag_ss_pixels,
fs_pixels=cls.frag_fs_pixels,
))
return cls(modules)
@classmethod
def from_h5_file_and_quad_positions(cls, path, positions, unit=1e-3):
"""Load an LPD-1M geometry from an XFEL HDF5 format geometry file
The quadrant positions are not stored in the file, and must be provided
separately. By default, both the quadrant positions and the positions
in the file are measured in millimetres; the unit parameter controls
this.
The origin of the coordinates is in the centre of the detector.
Coordinates increase upwards and to the left (looking along the beam).
This version of the code only handles x and y translation,
as this is all that is recorded in the initial LPD geometry file.
Parameters
----------
path : str
Path of an EuXFEL format (HDF5) geometry file for LPD.
positions : list of 2-tuples
(x, y) coordinates of the last corner (the one by module 4) of each
quadrant.
unit : float, optional
The conversion factor to put the coordinates into metres.
The default 1e-3 means the numbers are in millimetres.
"""
assert len(positions) == 4
modules = []
with h5py.File(path, 'r') as f:
for Q, M in product(range(1, 5), range(1, 5)):
quad_pos = np.array(positions[Q - 1])
mod_grp = f['Q{}/M{}'.format(Q, M)]
mod_offset = mod_grp['Position'][:2]
tiles = []
for T in range(1, cls.n_modules+1):
corner_pos = np.zeros(3)
tile_offset = mod_grp['T{:02}/Position'.format(T)][:2]
corner_pos[:2] = quad_pos + mod_offset + tile_offset
# Convert units (mm) to metres
corner_pos *= unit
# LPD geometry is measured to the last pixel of each tile.
# Subtract tile dimensions for the position of 1st pixel.
ss_vec = np.array([0, 1, 0]) * cls.pixel_size
fs_vec = np.array([1, 0, 0]) * cls.pixel_size
first_px_pos = (corner_pos
- (ss_vec * cls.frag_ss_pixels)
- (fs_vec * cls.frag_fs_pixels))
tiles.append(GeometryFragment(
corner_pos=first_px_pos,
ss_vec=ss_vec,
fs_vec=fs_vec,
ss_pixels=cls.frag_ss_pixels,
fs_pixels=cls.frag_fs_pixels,
))
modules.append(tiles)
return cls(modules, filename=path)
def inspect(self, axis_units='px', frontview=True):
"""Plot the 2D layout of this detector geometry.
Returns a matplotlib Axes object.
Parameters
----------
axis_units : str
Show the detector scale in pixels ('px') or metres ('m').
frontview : bool
If True (the default), x increases to the left, as if you were looking
along the beam. False gives a 'looking into the beam' view.
"""
ax = super().inspect(axis_units=axis_units, frontview=frontview)
scale = self._get_plot_scale_factor(axis_units)
# Label modules and tiles
for ch, module in enumerate(self.modules):
s = 'Q{Q}M{M}'.format(Q=(ch // 4) + 1, M=(ch % 4) + 1)
cx, cy, _ = module[0].centre() * scale
ax.text(cx, cy, s, fontweight='bold',
verticalalignment='center',
horizontalalignment='center')
for t in [7, 8, 15]:
cx, cy, _ = module[t].centre() * scale
ax.text(cx, cy, 'T{}'.format(t + 1),
verticalalignment='center',
horizontalalignment='center')
ax.set_title('LPD-1M detector geometry ({})'.format(self.filename))
return ax
@staticmethod
def split_tiles(module_data):
half1, half2 = np.split(module_data, 2, axis=-1)
# Tiles 1-8 (half1) are numbered top to bottom, whereas the array
# starts at the bottom. So we reverse their order after splitting.
return np.split(half1, 8, axis=-2)[::-1] + np.split(half2, 8, axis=-2)
@classmethod
def _tile_slice(cls, tileno):
# Which part of the array is this tile?
if tileno < 8: # First half of module (0 <= t <= 7)
fs_slice = slice(0, 128)
tiles_up = 7 - tileno
else: # Second half of module (8 <= t <= 15)
fs_slice = slice(128, 256)
tiles_up = tileno - 8
tile_offset = tiles_up * 32
ss_slice = slice(tile_offset, tile_offset + cls.frag_ss_pixels)
return ss_slice, fs_slice
@classmethod
def _module_coords_to_tile(cls, slow_scan, fast_scan):
tiles_across, tile_fs = np.divmod(fast_scan, cls.frag_fs_pixels)
tiles_up, tile_ss = np.divmod(slow_scan, cls.frag_ss_pixels)
# Each tiles_across is 0 or 1. To avoid iterating over the array with a
# conditional, multiply the number we want by 1 and the other by 0.
tileno = (
(1 - tiles_across) * (7 - tiles_up) # tileno 0-7
+ tiles_across * (tiles_up + 8) # tileno 8-15
)
return tileno.astype(np.int16), tile_ss, tile_fs
def to_distortion_array(self, allow_negative_xy=False):
"""Return distortion matrix for LPD detector, suitable for pyFAI.
Parameters
----------
allow_negative_xy: bool
If False (default), shift the origin so no x or y coordinates are
negative. If True, the origin is the detector centre.
Returns
-------
out: ndarray
Array of float 32 with shape (4096, 256, 4, 3).
The dimensions mean:
- 4096 = 16 modules * 256 pixels (slow scan axis)
- 256 pixels (fast scan axis)
- 4 corners of each pixel
- 3 numbers for z, y, x
"""
# Overridden only for docstring
return super().to_distortion_array(allow_negative_xy)
def invert_xfel_lpd_geom(path_in, path_out):
"""Invert the coordinates in an XFEL geometry file (HDF5)
The initial geometry file for LPD was recorded with the coordinates
increasing down and to the right (looking in the beam direction), but the
standard XFEL coordinate scheme is the opposite, increasing upwards and to
the left (looking in beam direction).
This utility function reads one file, and writes a second with the
coordinates inverted.
"""
with h5py.File(path_in, 'r') as fin, h5py.File(path_out, 'x') as fout:
src_ds = fin['DetectorDescribtion']
dst_ds = fout.create_dataset('DetectorDescription', data=src_ds)
for k, v in src_ds.attrs.items():
dst_ds.attrs[k] = v
for Q, M in product(range(1, 5), range(1, 5)):
path = 'Q{}/M{}/Position'.format(Q, M)
fout[path] = -fin[path][:]
for T in range(1, 17):
path = 'Q{}/M{}/T{:02}/Position'.format(Q, M, T)
fout[path] = -fin[path][:]
class DSSC_1MGeometry(DetectorGeometryBase):
"""Detector layout for DSSC-1M
The coordinates used in this class are 3D (x, y, z), and represent metres.
You won't normally instantiate this class directly:
use one of the constructor class methods to create or load a geometry.
"""
# Hexagonal pixels, 236 μm step in fast-scan axis, 204 μm in slow-scan
pixel_size = 236e-6
frag_ss_pixels = 128
frag_fs_pixels = 256
n_modules = 16
n_tiles_per_module = 2
expected_data_shape = (16, 128, 512)
# This stretches the dimensions for the 'snapped' geometry so that its pixel
# grid matches the aspect ratio of the detector pixels.
_pixel_shape = np.array([1., 1.5/np.sqrt(3)], dtype=np.float64) * pixel_size
# Pixel corners described clockwise from the top, assuming the reference
# point for a pixel is outside it, aligned with the top point & left edge.
# The unit is the width of a pixel, 236 μm.
# The 4/3 extends the hexagons into the next row to correctly tessellate.
_pixel_corners = np.stack([
(np.array([0, 0.25, 0.75, 1, 0.75, 0.25]) * 4 / 3),
[0.5, 1, 1, 0.5, 0, 0]
])
@classmethod
def from_h5_file_and_quad_positions(cls, path, positions, unit=1e-3):
"""Load a DSSC geometry from an XFEL HDF5 format geometry file
The quadrant positions are not stored in the file, and must be provided
separately. The position given should refer to the bottom right (looking
along the beam) corner of the quadrant.
By default, both the quadrant positions and the positions
in the file are measured in millimetres; the unit parameter controls
this.
The origin of the coordinates is in the centre of the detector.
Coordinates increase upwards and to the left (looking along the beam).
This version of the code only handles x and y translation,
as this is all that is recorded in the initial LPD geometry file.
Parameters
----------
path : str
Path of an EuXFEL format (HDF5) geometry file for DSSC.
positions : list of 2-tuples
(x, y) coordinates of the last corner (the one by module 4) of each
quadrant.
unit : float, optional
The conversion factor to put the coordinates into metres.
The default 1e-3 means the numbers are in millimetres.
"""
assert len(positions) == 4
modules = []
quads_x_orientation = [-1, -1, 1, 1]
quads_y_orientation = [1, 1, -1, -1]
with h5py.File(path, 'r') as f:
for Q, M in product(range(1, 5), range(1, 5)):
quad_pos = np.array(positions[Q - 1])
mod_grp = f['Q{}/M{}'.format(Q, M)]
mod_offset = mod_grp['Position'][:2]
# Which way round is this quadrant
x_orient = quads_x_orientation[Q - 1]
y_orient = quads_y_orientation[Q - 1]
tiles = []
for T in range(1, 3):
corner_pos = np.zeros(3)
tile_offset = mod_grp['T{:02}/Position'.format(T)][:2]
corner_pos[:2] = quad_pos + mod_offset + tile_offset
# Convert units (mm) to metres
corner_pos *= unit
# Measuring in terms of the step within a row, the
# step to the next row of hexagons is 1.5/sqrt(3).
ss_vec = np.array([0, y_orient, 0]) * cls.pixel_size * 1.5/np.sqrt(3)
fs_vec = np.array([x_orient, 0, 0]) * cls.pixel_size
# Corner position is measured at low-x, low-y corner (bottom
# right as plotted). We want the position of the corner
# with the first pixel, which is either high-x low-y or
# low-x high-y.
if x_orient == -1:
first_px_pos = corner_pos - (fs_vec * cls.frag_fs_pixels)
else:
first_px_pos = corner_pos - (ss_vec * cls.frag_ss_pixels)
tiles.append(GeometryFragment(
corner_pos=first_px_pos,
ss_vec=ss_vec,
fs_vec=fs_vec,
ss_pixels=cls.frag_ss_pixels,
fs_pixels=cls.frag_fs_pixels,
))
modules.append(tiles)
return cls(modules, filename=path)
def inspect(self, axis_units='px', frontview=True):
"""Plot the 2D layout of this detector geometry.
Returns a matplotlib Axes object.
Parameters
----------
axis_units : str
Show the detector scale in pixels ('px') or metres ('m').
frontview : bool
If True (the default), x increases to the left, as if you were looking
along the beam. False gives a 'looking into the beam' view.
"""
ax = super().inspect(axis_units=axis_units, frontview=frontview)
scale = self._get_plot_scale_factor(axis_units)
# Label modules and tiles
for ch, module in enumerate(self.modules):
s = 'Q{Q}M{M}'.format(Q=(ch // 4) + 1, M=(ch % 4) + 1)
cx, cy, _ = module[0].centre() * scale
ax.text(cx, cy, s, fontweight='bold',
verticalalignment='center',
horizontalalignment='center')
for t in [1]:
cx, cy, _ = module[t].centre() * scale
ax.text(cx, cy, 'T{}'.format(t + 1),
verticalalignment='center',
horizontalalignment='center')
ax.set_title('DSSC detector geometry ({})'.format(self.filename))
return ax
@staticmethod
def split_tiles(module_data):
# Split into 2 tiles along the fast-scan axis
return np.split(module_data, 2, axis=-1)
def plot_data_fast(self,
data, *,
axis_units='px',
frontview=True,
ax=None,
figsize=None,
colorbar=False,
**kwargs):
ax = super().plot_data_fast(data,
axis_units=axis_units,
frontview=frontview,
ax=ax,
figsize=figsize,
colorbar=colorbar,
**kwargs)
# Squash image to physically equal aspect ratio, so a circle projected
# on the detector looks like a circle on screen.
ax.set_aspect(204/236.)
return ax
@classmethod
def _tile_slice(cls, tileno):
tile_offset = tileno * cls.frag_fs_pixels
fs_slice = slice(tile_offset, tile_offset + cls.frag_fs_pixels)
ss_slice = slice(0, cls.frag_ss_pixels) # Every tile covers the full pixel range
return ss_slice, fs_slice
def to_distortion_array(self, allow_negative_xy=False):
"""Return distortion matrix for DSSC detector, suitable for pyFAI.
Parameters
----------
allow_negative_xy: bool
If False (default), shift the origin so no x or y coordinates are
negative. If True, the origin is the detector centre.
Returns
-------
out: ndarray
Array of float 32 with shape (2048, 512, 6, 3).
The dimensions mean:
- 2048 = 16 modules * 128 pixels (slow scan axis)
- 512 pixels (fast scan axis)
- 6 corners of each pixel
- 3 numbers for z, y, x
"""
# Overridden only for docstring
return super().to_distortion_array(allow_negative_xy=allow_negative_xy)
@classmethod
def _adjust_pixel_coords(cls, ss_coords, fs_coords, centre):
# Shift odd-numbered rows by half a pixel.
fs_coords[1::2] -= 0.5
if centre:
# Vertical (slow scan) centre is 2/3 of the way to the start of the
# next row of hexagons, because the tessellating pixels extend
# beyond the start of the next row.
ss_coords += 2/3
fs_coords += 0.5
class DSSC_Geometry(DSSC_1MGeometry):
"""DEPRECATED: Use DSSC_1MGeometry instead"""
def __init__(self, modules, filename='No file'):
super().__init__(modules, filename)
warnings.warn(
"DSSC_Geometry has been renamed to DSSC_1MGeometry.", stacklevel=2
)
| StarcoderdataPython |
9753884 | # pylint: disable=arguments-differ,unused-argument
from typing import Generic, List, TypeVar
from datafiles import Missing, converters, datafile
from datafiles.utils import dedent
from . import xfail_on_latest
@xfail_on_latest
def test_generic_converters(expect):
S = TypeVar("S")
T = TypeVar("T")
class Pair(Generic[S, T], converters.Converter):
first: S
second: T
def __init__(self, first: S, second: T) -> None:
self.first = first
self.second = second
@classmethod
def to_python_value(cls, deserialized_data, *, target_object=None):
paired = zip(cls.CONVERTERS, deserialized_data) # type: ignore
values = [convert.to_python_value(val) for convert, val in paired]
return cls(*values)
@classmethod
def to_preserialization_data(cls, python_value, *, default_to_skip=None):
values = [python_value.first, python_value.second]
paired = zip(cls.CONVERTERS, values) # type: ignore
return [convert.to_preserialization_data(val) for convert, val in paired]
@datafile("../tmp/sample.yml")
class Dictish:
contents: List[Pair[str, converters.Number]]
d = Dictish([Pair[str, converters.Number]("pi", 3.14)]) # type: ignore
expect(d.datafile.text) == dedent(
"""
contents:
- - pi
- 3.14
"""
)
d = Dictish(Missing) # type: ignore
expect(d.contents[0].first) == "pi"
expect(d.contents[0].second) == 3.14
d.datafile.text = dedent(
"""
contents:
- - degrees
- 360
"""
)
expect(d.contents[0].first) == "degrees"
expect(d.contents[0].second) == 360
| StarcoderdataPython |
8193908 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import os
import random
import sys
from collections import defaultdict
from multiprocessing import Pool
import time
from threading import Timer
# import matplotlib.pyplot as plt
# import networkx as nx
import numpy as np
__author__ = "<NAME>"
__version__ = "3.3.0"
# G = nx.DiGraph()
# functions
def read_file(network_file, seeds):
"""
read network data and selected seed from files
"""
# e = []
first_line = network_file.readline()
data = str.split(first_line)
node_num = int(data[0])
nodes = range(1, node_num + 1)
edge_num = int(data[1])
network = defaultdict(dict)
data_lines = network_file.readlines()
for line in data_lines:
data = str.split(line)
head = int(data[0])
tail = int(data[1])
prob = float(data[2])
network[head][tail] = prob
# e += [(data[0], data[1], float(data[2]))]
# G.add_weighted_edges_from(e)
# nx.draw(G)
# plt.show()
seeds_lines = seeds.readlines()
seeds = []
for line in seeds_lines:
seeds.append(int(str.split(line)[0]))
return nodes, edge_num, network, seeds
def IC(network, seeds):
"""
independent cascade model
"""
active_node = seeds[:]
node_queue = active_node[:]
while node_queue:
head = node_queue.pop(0)
for tail in network[head]:
if tail not in active_node:
prob = random.random()
if prob <= network[head][tail]:
active_node.append(tail)
node_queue.append(tail)
return len(active_node)
def LT(network, seeds):
"""
linear threshold model
"""
active_node = seeds[:]
node_queue = active_node[:]
prob_record = defaultdict(float)
node_threshold = defaultdict(float)
while node_queue:
head = node_queue.pop(0)
for tail in network[head]:
if tail not in active_node:
if not node_threshold[tail]:
node_threshold[tail] = random.random()
prob_record[tail] += network[head][tail]
if tail not in active_node and prob_record[tail] >= node_threshold[tail]:
active_node.append(tail)
node_queue.append(tail)
return len(active_node)
def command_line():
parser = argparse.ArgumentParser(description='This program is used to evaluate the \
performance of IMP algorithms by processing either one of the two basic diffusion \
models on the given network with the given seeds.')
parser.add_argument("-i", "--network", metavar="<social network>", required=True,
type=open, help="the absolute path of the social network file")
parser.add_argument("-s", "--seeds", metavar="<seed set>", required=True,
type=open, help="the absolute path of the seed set file")
parser.add_argument("-m", "--model", metavar="<diffusion model>",
required=True, help="only IC or LT")
parser.add_argument("-b", "--termination", metavar="<termination type>", required=True,
type=(lambda x: x == '1'), help="only 0 or 1, 0 to use default conditions while 1 to use time budget")
parser.add_argument("-t", "--time_budget", metavar="<time budget>", required=True,
type=int, help="positive number which indicates the running time in seconds allowed")
parser.add_argument("-r", "--random_seed", metavar="<random seed>", required=True,
type=int, help="seed for random")
return parser.parse_args()
def main():
args = command_line()
nodes, edge_num, network, seeds = read_file(args.network, args.seeds)
random.seed(args.random_seed)
if args.termination:
count = 0.
iterations = 0
if args.model == "IC":
start = time.clock()
while time.clock() - start < args.time_budget:
count += IC(network, seeds)
iterations += 1
elif args.model == "LT":
start = time.clock()
while time.clock() - start < args.time_budget:
count += LT(network, seeds)
iterations += 1
print("model: {}".format(args.model))
print("result: {}".format(count / iterations))
else:
result = default_evaluation(network, seeds, args.model)
print("model: {}".format(args.model))
print("result: {}".format(result))
def default_evaluation(network, seeds, model=None):
if model == "IC":
count_IC = 0.
for i in range(10000):
count_IC += IC(network, seeds)
return count_IC / 10000
elif model == "LT":
count_LT = 0.
for i in range(10000):
count_LT += LT(network, seeds)
return count_LT / 10000
if __name__ == "__main__":
main()
| StarcoderdataPython |
9633968 | <filename>sandbox/pages/forms.py
from django import forms
from clientaddress.models import *
from bibliothek.Widgets import *
class Widerrufform(forms.ModelForm):
class Meta:
model = Clientaddress
exclude = ["id","trash","is_deleteable","is_editable","create_date","modified_date","create_user","modified_user","delete_user",'user_rights_link','addressindividualfields','searchvriteria_address_link','Telefon_address_link','Email_address_link','Telefax_address_link','group_rights-link','change_date','owner','occurrence','searchcriteria_address_link','task_assigned_to']
widgets = {'salutation': selectfield, 'firstname': textinputfeld, 'lastname': textinputfeld, 'companyname': textinputfeld, 'street': textinputfeld, 'zip': integerfeld, 'city': textinputfeld}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['product'] = forms.CharField(
widget=textinputfeld,
required=False)
self.fields['order_date'] = forms.CharField(
widget=textinputfeld,
required=False)
self.fields['email'] = forms.EmailField(max_length = 200,widget=textinputfeld,
required=True)
| StarcoderdataPython |
385268 | <gh_stars>1000+
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dump routes from a TMS570 AIO node."""
import socket
import sys
import textwrap
from makani.avionics.common import aio
from makani.avionics.common import pack_avionics_messages
from makani.avionics.network import aio_node
from makani.avionics.network import message_type
from makani.lib.python import c_helpers
aio_node_helper = c_helpers.EnumHelper('AioNode', aio_node)
message_type_helper = c_helpers.EnumHelper('MessageType', message_type)
def _MacToString(mac):
return '%02X:%02X:%02X:%02X:%02X:%02X' % (
mac.a, mac.b, mac.c, mac.d, mac.e, mac.f)
def _FormatResponse(source, msg):
source_name = aio_node_helper.ShortName(source)
mac = _MacToString(msg.entry.ethernet_address)
mcast = (msg.entry.ethernet_address.a & 1) == 1
mcast_str = 'Multicast' if mcast else 'Unicast'
port_str = ('mask 0x%02X' if mcast else 'port %d') % msg.entry.port_map
print ('%s: %s %s on VLAN %d to %s'
' (valid=%d age=%d static=%d arl_con=%d priority=%d)'
% (source_name, mcast_str, mac, msg.entry.vlan_id, port_str,
msg.entry.valid, msg.entry.age, msg.entry.static_entry,
msg.entry.arl_con, msg.entry.priority))
def main():
if len(sys.argv) != 2:
print textwrap.dedent("""
Inspect the switch routing table of an AIO node. Currently only access
switches are supported.
Usage: route_dump <node_name>"""[1:])
sys.exit(-1)
aio_client = aio.AioClient(['kMessageTypeDumpRoutesRequest',
'kMessageTypeDumpRoutesResponse'],
timeout=1)
request_msg = pack_avionics_messages.DumpRoutesRequestMessage()
request_msg.target = aio_node_helper.Value(sys.argv[1])
aio_client.Send(request_msg, 'kMessageTypeDumpRoutesRequest',
'kAioNodeOperator')
responses = []
while True:
try:
(_, header, msg) = aio_client.Recv()
mtype = message_type_helper.ShortName(header.type)
if mtype == 'DumpRoutesResponse' and header.source == request_msg.target:
responses.append(msg)
except socket.timeout:
break
def _SortKey(msg):
return '%3X%s'%(msg.entry.vlan_id, _MacToString(msg.entry.ethernet_address))
for msg in sorted(responses, key=_SortKey):
_FormatResponse(request_msg.target, msg)
if __name__ == '__main__':
main()
| StarcoderdataPython |
8148148 | text = "X-DSPAM-Confidence: 0.8475";
num = text.find('0')
get = text[23:]
Get = print(float(get))
| StarcoderdataPython |
1617187 | from django.db import models
class Award(models.Model):
name=models.CharField(max_length=300)
description=models.TextField(max_length=5000)
developer=models.CharField(max_length=300)
created_date=models.DateField()
averangeRating=models.FloatField(default=0)
image=models.URLField(default=None, null=True)
linktosite=models.URLField(default=None, null=True)
def __str__(self):
return self.name
# Create your models here.
| StarcoderdataPython |
12838651 | <gh_stars>1-10
from nltk.tokenize import word_tokenize
from nltk import pos_tag
from nltk.tokenize import PunktSentenceTokenizer
from nltk.corpus import state_union
from nltk import RegexpParser
train_text = state_union.raw("2005-GWBush.txt")
sample_text = state_union.raw("2006-GWBush.txt")
custom_sent_tokenizer = PunktSentenceTokenizer(sample_text)
tokenized = custom_sent_tokenizer.tokenize(sample_text)
def process_content():
try:
for i in tokenized[:5]:
tagged = pos_tag(word_tokenize(i)) # tagset='universal'
chunkGram = r"""Chunk : {<.*>+}
}<VB.?|IN|DT|TO>{"""
chunkParser = RegexpParser(chunkGram)
chunked = chunkParser.parse(tagged)
print(chunked)
for subtree in chunked.subtrees(filter=lambda t:t.label() == "Chunk"):
print(subtree)
chunked.draw()
except Exception as e:
print(str(e))
process_content() | StarcoderdataPython |
338837 | <gh_stars>0
import csv
from _common import create_mobile_library_file
DATA_SOURCE = '../raw/birmingham.csv'
def run():
timetable = 'https://www.birmingham.gov.uk/info/50163/library_services/1479/mobile_library_service/3'
mobiles = []
with open(DATA_SOURCE, 'r') as raw:
reader = csv.reader(raw, delimiter=',', quotechar='"')
for row in reader:
day = row[0]
arrival = row[1]
departure = row[2]
community = row[3]
stop_name = row[4]
latitude = row[5]
longitude = row[6]
frequency = row[7]
start = row[8]
route = row[9]
address = stop_name + ', ' + community
mobiles.append(
['Mobile', route, community, stop_name, address, '', longitude, latitude,
day, 'Public', arrival, departure, frequency, start, '', '', timetable]
)
create_mobile_library_file('Birmingham', 'birmingham.csv', mobiles)
run()
| StarcoderdataPython |
1627827 | <reponame>AndyVirginia/-<filename>generator.py
from fractions import Fraction
import random
class Ari_Expression():
'''算术表达式的生成'''
def __init__(self, max_num):
self.init_operators()
self.init_nums(max_num)
self.init_expression()
def init_num(self, max_num):
'''随机生成数'''
denominator = random.randint(1, max_num)
numerator = random.randint(0, max_num)
return Fraction(numerator, denominator)
def insert_bracket(self):
'''插入括号'''
bracket = ['(', 'null', ')']
if len(self.operators) > 1:
x = random.randint(0, len(self.operators))
while x < len(self.operators):
y = random.randint(x, len(self.operators))
low = False
for a in self.operators[x:y+1]:
if a in ['+', '-']:
low = True
break
try:
if self.operators[y+1] in ['×', '÷'] and low:
self.operators.insert(x, '(')
self.operators.insert(y+2,')')
except IndexError:
pass
x = y+2
def init_operators(self):
'''随机生成一个运算符并随机插入括号'''
self.operators = []
operator = ['+', '-', '×', '÷', 'null']
for x in range(3):
if x == 1:
self.operators.append(random.choice(operator[:-2]))
else:
y = random.choice(operator)
if y != 'null':
self.operators.append(y)
self.insert_bracket()
def init_nums(self, max_num):
self.nums = []
self.nums.append(self.init_num(max_num))
for x in range(len(self.operators)):
y = self.init_num(max_num)
if self.operators[x] == '÷':
while y.numerator == 0:
y = self.init_num(max_num)
self.nums.append(y)
def str_num(self, num):
'''字符串化一个分数'''
inter = int(num.numerator / num.denominator)
numerator = int(num.numerator % num.denominator)
str_num = ''
if numerator != 0:
str_num += str(numerator) + '/' + str(num.denominator)
if not str_num:
'''如果为空'''
str_num += str(inter)
else:
if inter == 0:
return str_num
else:
str_num = str(inter) + '`' + str_num
return str_num
def init_expression(self):
'''生成一个算术表达式的字符串形式'''
self.str = ''
i = 0
self.exp = []
again = False
for x in self.operators:
if again:
self.str += x + ' '
elif x == '(':
self.str += x + ' '
elif x == ')':
self.str += self.str_num(self.nums[i]) + ' '
i += 1
self.str += x + ' '
again = True
else:
self.str += self.str_num(self.nums[i]) + ' '
self.str += x + ' '
i += 1
self.str += self.str_num(self.nums[-1]) + ' ='
| StarcoderdataPython |
9792498 | import numpy as np
import pylab as plt
from scipy.special import erf
from scipy.integrate import simps
from scipy.linalg import cho_solve
#from ChoSolver import choSolve, choBackSubstitution
def styblinsky(x):
return (x[0]**4 - 16*x[0]**2 + 5*x[0] + x[1]**4 - 16*x[1]**2 + 5*x[1])/2.
def rosenbrock(x):
a = 1
b = 100
return (a-x[0])**2 + b*(x[1] - x[0]**2)**2
def complexInjunction(x):
Nm = len(x)
a = np.arange(Nm)
A = np.outer(np.cos(np.arange(Nm)),np.sin(1j*np.arange(Nm))-Nm)
y = np.exp(1j*A.dot(x))
return -np.abs((np.min(y)/np.max(y)).real)
def mean(x):
#return styblinsky(x)
return np.log10(1+rosenbrock(x))# + rosenbrock((x-1))
return np.sqrt((x[0]-0.5)**2 + (x[1])**2)
def M52(XX,theta):
theta0 = theta[0]
nu = theta[1]
lengthScales = theta[2:]
N = XX.shape[0]
r2 = np.zeros([N,N],dtype=np.double)
K = np.zeros([N,N],dtype=np.double)
i = 0
while i < len(lengthScales):
r2 += (XX[:,i,:,i]/lengthScales[i])**2
i += 1
K += r2*(5./3.)
np.sqrt(5*r2,out=r2)
K += 1+r2
np.exp(-r2,out=r2)
K *= r2
K *= theta0
return K
def expK(XX,theta):
theta0 = theta[0]
nu = theta[1]
lengthScales = theta[2:]
N = XX.shape[0]
K = np.zeros([N,N],dtype=np.double)
i = 0
while i < len(lengthScales):
K -= (XX[:,i,:,i]/lengthScales[i])**2
i += 1
K /= 2.
np.exp(K,out=K)
K *= theta0
#K += nu**2*np.eye(N)
return K
def expK_derivative(XX,theta):
theta0 = theta[0]
nu = theta[1]
lengthScales = theta[2:]
N = XX.shape[0]
Kdiff = np.zeros([N,N,len(theta)],dtype=np.double)
K = np.zeros([N,N],dtype=np.double)
#0 -> exp(-r^2)
#1 -> 2*eye(N)*nu
#2: ->-2r*eye(-r^2)*-2*(x1[i]-x2[i])^2/(lengthScale[i])^3
i = 0
while i < len(lengthScales):
Kdiff[:,:,0] -= (XX[:,i,:,i]/lengthScales[i])**2
Kdiff[:,:,2+i] += 4*XX[:,i,:,i]**2/lengthScales[i]**3
i += 1
#*r
#np.rollaxis(K[:,:,2:],2,0) *= np.sqrt(-Kdiff[:,:,0])
K /= 2.
np.exp(K,out=K)
K *= theta0
K += nu**2*np.eye(N)
return K
class Prior(object):
def __init__(self, **kwargs):
for key in kwargs.keys():
setattr(self,key,kwargs[key])
def domain(self):
'''Get domain of prior'''
return None
def sample(self,N=1):
'''get a sample from the distribution'''
return None
def pdf(self,x):
'''get the pdf at x'''
return None
class UniformPrior(Prior):
def __init__(self,xmin,xmax):
d = {"xmin":float(min(xmin,xmax)),"xmax":float(max(xmin,xmax)),"width":float(max(xmin,xmax) - min(xmin,xmax))}
super(UniformPrior,self).__init__(**d)
def sample(self,N=1):
return np.random.uniform(low=self.xmin,high=self.xmax,size=N)
def pdf(self,x):
out = np.ones_like(x)
out /= self.width
out[x>self.xmax] *= 0.
out[x<self.xmin] *= 0.
return out
class NormalPrior(Prior):
def __init__(self,mean,std):
d = {"mean":float(mean),"std":float(std)}
super(NormalPrior,self).__init__(**d)
def sample(self,N=1):
return self.mean + self.std*np.random.normal(size=N)
def pdf(self,x):
return np.exp(-(x - self.mean)**2/self.std**2/2.)/np.sqrt(2*np.pi)/self.std
class LogNormalPrior(Prior):
def __init__(self,mean,std):
d = {"mean":float(mean),"std":float(std)}
super(LogNormalPrior,self).__init__(**d)
def sample(self,N=1):
return np.random.lognormal(mean=self.mean, sigma=self.std, size=N)
def pdf(self,x):
return np.exp(-(np.log(x) - self.mean)**2/self.std**2/2.)/np.sqrt(2*np.pi)/self.std/x
class ClassPrior(Prior):
def __init__(self,numClasses,weights=None):
if weights is None:
weights = np.ones(numClasses,dtype=np.double)/numClasses
d = {"numClasses":float(numClasses),"weights":float(weights)}
super(ClassPrior,self).__init__(**d)
def sample(self,N=1):
samples = np.zeros(N,dtype=np.int64)
i = 0
while i < N:
c = -1
while c == -1:
c_ = np.random.randint(self.numClasses)
if np.random.uniform() < self.weights[c_]:
c = c_
samples[i] = c
i += 1
return samples
def pdf(self,x):
return self.weights[np.int64(x)]
class DiscretePrior(Prior):
def __init__(self,values,prior=None):
if prior is None:
prior = UniformPrior(np.min(values),np.max(values))
d = {"values":values,"prior":prior}
super(DiscretePrior,self).__init__(**d)
def sample(self,N=1):
samples = np.zeros(N,dtype=np.int64)
i = 0
while i < N:
c = -1
while c == -1:
c_ = np.random.randint(len(self.values))
if np.random.uniform() < self.prior.pdf(self.values[c_]):
c = c_
samples[i] = self.values[c]
i += 1
return samples
def pdf(self,x):
return self.prior.pdf(x)
if __name__ == '__main__':
def sampleX(xPriors,N):
X = np.zeros([N,len(xPriors)],dtype=np.double)
for i in range(len(xPriors)):
X[:,i] = xPriors[i].sample(N)
return X
def computeAquisition(Xstar,X,y,thetaPriors,iteration=1):
Xstar = np.atleast_2d(Xstar)
shape = []
indices = []
for thetaPrior in thetaPriors:
ar = thetaPrior.values
shape.append(len(ar))
indices.append(np.arange(len(ar)))
n = len(thetaPriors)
postTheta = np.zeros(shape,dtype=np.double)
COMP = np.zeros(shape,dtype=np.double)
DF = np.zeros(shape,dtype=np.double)
LML = np.zeros(shape,dtype=np.double)
Xboth = np.vstack([X,Xstar])
XXboth = np.subtract.outer(Xboth,Xboth)
arg = np.argsort(y)
xbest = X[arg[0],:]
fbest = y[arg[0]]
aq_full = np.zeros([Xstar.shape[0]]+shape,dtype=np.double)
for idx in product(*indices):
theta = np.zeros(len(indices),dtype=np.double)
for i in range(len(idx)):
theta[i] = thetaPriors[i].values[idx[i]]
nu = theta[1]
#Kboth = expK(XXboth,theta)
Kboth = M52(XXboth,theta)
K00 = Kboth[0:X.shape[0],0:X.shape[0]]
K00 += nu**2*np.eye(X.shape[0])
K01 = Kboth[0:X.shape[0],X.shape[0]:]
K10 = K01.T
K11 = Kboth[X.shape[0]:,X.shape[0]:]
L = np.linalg.cholesky(K00)
alpha = cho_solve((L,False),y)#choSolve(L,y,False)
#mu[j] = sum_i alpha[i]K01[i,j]
mu = K10.dot(alpha)
#cov = K11 - K10.(K00+sigma)(^-1).K01
V = choBackSubstitution(L,K01,True,False)
std = np.sqrt(np.diag(K11 - V.T.dot(V)))
gamma = (fbest - mu)/std
#POI
cum = (1 + erf(gamma/np.sqrt(2)))/2.
#return
#EI
aq = std*(gamma*cum + np.exp(-gamma**2/2)/np.sqrt(2*np.pi))
#aq = (1./(iteration+1))*std - mu
datafit = -y.dot(alpha)/2.
complexity = np.sum(np.log(np.diag(L)))
marLik = np.exp(datafit - complexity - np.log(2*np.pi)*n/2.)
COMP[idx] = complexity
DF[idx] = datafit
LML[idx] = np.log(marLik)
prior = 1.
for t,tp in zip(theta,thetaPriors):
prior *= tp.pdf(t)
postTheta[idx] = marLik * prior
aq_full[ [slice(0,Xstar.shape[0])]+list(idx)] = aq*postTheta[idx]
prob = np.copy(postTheta)
for axis in range(len(thetaPriors)):
aq_full = simps(aq_full,thetaPriors[len(thetaPriors)-axis-1].values,axis=len(thetaPriors)-axis)
prob = simps(prob,thetaPriors[len(thetaPriors)-axis-1].values,axis=len(thetaPriors)-axis-1)
aq_full /= prob
postTheta /= prob
return aq_full,postTheta
def maximizeAquisition(xPriors,X,y,thetaPriors=None,iteration=0):
'''Using gradient (or steepest if desired) maximize the Expected Improvment aquisition
while integration over aquisition hyper parameters.
'''
if thetaPriors is None:
#Set up thetaPriors
res = 10
#theta0 ~ max(y) - min(y), uniform, log spacing 4 mag
m2 = np.max(y) - np.min(y)
m1 = m2/1e4
theta0Prior = DiscretePrior(10**np.linspace(np.log10(m1),np.log10(m2),res),
prior=UniformPrior(m1,m2))
# nu ~ obs noise. similarly but scaled down by 10%
m2 = (np.max(y) - np.min(y))/10.
m1 = (m2/1e4)/10.
nuPrior = DiscretePrior(10**np.linspace(np.log10(m1),np.log10(m2),res),
prior=UniformPrior(m1,m2))
thetaPriors = [theta0Prior,nuPrior]
for i in range(len(xPriors)):
#handles uniform x priors right now
m2 = (xPriors[i].xmax - xPriors[i].xmin)*10.
m1 = (xPriors[i].xmax - xPriors[i].xmin)/10.
lsPrior = DiscretePrior(10**np.linspace(np.log10(m1),np.log10(m2),res),
prior=UniformPrior(m1,m2))
thetaPriors.append(lsPrior)
for thetaPrior in thetaPriors:
assert isinstance(thetaPrior,DiscretePrior), "one theta prior is not discrete"
from itertools import product
#First sample points to initialize maximization
#create aquisition at x
Xstar = sampleX(xPriors,max(2,len(thetaPriors))**max(2,len(xPriors)))
Xstar = sampleX(xPriors,10**max(2,len(xPriors)))
arg = np.argsort(y)
xbest = X[arg[0],:]
fbest = y[arg[0]]
aq_all = []
Xstar_all = []
N = len(y)
aq_init,postTheta = computeAquisition(Xstar,X,y,thetaPriors,iteration)
aq_all.append(aq_init)
Xstar_all.append(Xstar)
arg = np.argsort(aq_init)
Xsimp = Xstar[arg[-len(xPriors)-1:],:]
aq_simp = aq_init[arg[-len(xPriors)-1:]]
#min to max
alpha,gamma,rho,sigma = 1.,2.,0.5,0.5
iter = 0
NonCovergent = True
while NonCovergent:
if iter >= 5:
break
iter += 1
#order for min (flip aq sign)
arg = np.argsort(-aq_simp)
aq_simp = aq_simp[arg]
Xsimp = Xsimp[arg,:]
#print(Xsimp,aq_simp)
#centorid except last
x0 = np.mean(Xsimp[:-1,:],axis=0)
#reflection
xr = x0 + alpha*(x0 - Xsimp[-1,:])
aq_r,postTheta = computeAquisition(xr,X,y,thetaPriors,iteration)
#print(xr,aq_r)
aq_all.append(aq_r)
Xstar_all.append(xr)
if -aq_simp[0] <= -aq_r and -aq_r < -aq_simp[-2]:
Xsimp[-1,:] = xr
aq_simp[-1] = aq_r
continue
#expansion
if -aq_r < -aq_simp[0]:
xe = x0 + gamma*(xr - x0)
aq_e,postTheta = computeAquisition(xe,X,y,thetaPriors,iteration)
aq_all.append(aq_e)
Xstar_all.append(xe)
if -aq_e < -aq_r:
Xsimp[-1,:] = xe
aq_simp[-1] = aq_e
continue
else:
Xsimp[-1,:] = xr
aq_simp[-1] = aq_r
continue
#contractions
xc = x0 + rho*(Xsimp[-1,:] - x0)
aq_c,postTheta = computeAquisition(xc,X,y,thetaPriors,iteration)
aq_all.append(aq_c)
Xstar_all.append(xc)
if -aq_c < -aq_simp[-1]:
Xsimp[-1,:] = xc
aq_simp[-1] = aq_c
continue
#shrink
for i in range(Xsimp.shape[0]):
Xsimp[i,:] = Xsimp[0,:] + sigma*(Xsimp[i,:] - Xsimp[0,:])
xbest_nm = Xsimp[0,:]
#print(xbest_nm)
aq_all = np.hstack(aq_all)
Xstar = np.vstack(Xstar_all)
arg = np.argsort(aq_all)
xbest = Xstar[arg[-1],:]
if True:
vmin = np.min(aq_all)
vmax = np.max(aq_all)
plt.figure()
sc=plt.scatter(Xstar[:,0],Xstar[:,1],c=aq_all,
vmin=vmin,vmax=vmax,alpha=0.6)
plt.scatter(xbest[0],xbest[1],c='red',alpha=0.6)
plt.scatter(xbest_nm[0],xbest_nm[1],c='red',marker='*',alpha=0.6)
plt.colorbar(sc)
plt.show()
fig,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)
ax1.plot(thetaPriors[0].values,
simps(simps(simps(postTheta,thetaPriors[3].values,axis=3),
thetaPriors[2].values,axis=2),
thetaPriors[1].values,axis=1))
ax1.set_xlabel("theta0")
ax2.plot(thetaPriors[1].values,
simps(simps(simps(postTheta,thetaPriors[3].values,axis=3),
thetaPriors[2].values,axis=2),
thetaPriors[0].values,axis=0))
ax2.set_xlabel("nu")
ax3.plot(thetaPriors[2].values,
simps(simps(simps(postTheta,thetaPriors[3].values,axis=3),
thetaPriors[1].values,axis=1),
thetaPriors[0].values,axis=0))
ax3.set_xlabel("ls0")
ax4.plot(thetaPriors[3].values,
simps(simps(simps(postTheta,thetaPriors[2].values,axis=2),
thetaPriors[1].values,axis=1),
thetaPriors[0].values,axis=0))
ax4.set_xlabel("ls1")
plt.show()
return xbest
#Set up data
np.random.seed(12344)
nu = 0.01
xPriors = [UniformPrior(-1,1.5),
UniformPrior(-1,1.5)]
thetaPriors = [DiscretePrior(10**np.linspace(np.log10(0.1),np.log10(5),10),prior=UniformPrior(0,5)),
DiscretePrior(10**np.linspace(np.log10(0.001),np.log10(0.5),10),prior=LogNormalPrior(np.log(0.1),np.log(0.5/0.01))),
DiscretePrior(np.linspace(0.5,6,10),prior=LogNormalPrior(np.log(1),np.log(6/0.5))),
DiscretePrior(np.linspace(0.5,6,10),prior=LogNormalPrior(np.log(1),np.log(6/0.5)))]
X,Y = np.meshgrid(np.linspace(xPriors[0].xmin,xPriors[0].xmax,100),
np.linspace(xPriors[1].xmin,xPriors[1].xmax,100),
indexing='ij')
A = []
for x,y in zip(X.flatten(),Y.flatten()):
A.append(mean(np.array([x,y])))
Niter = 10
minidx = np.zeros([4,Niter],dtype=np.double)
for r in range(4):
score = []
#plt.figure()
c1 = plt.contour(X,Y,np.array(A).reshape(X.shape),20)
plt.clabel(c1,inline=1,fontsize=10)
plt.title("True")
plt.xlabel("x")
plt.ylabel("y")
arg = np.argsort(A)
plt.scatter(X.flatten()[arg[0]],Y.flatten()[arg[0]],zorder=20,c='red',marker='*',alpha=1)
#sample corners and center
xCorners = []
for xPrior in xPriors:
xCorners.append([xPrior.xmin,xPrior.xmax])
from itertools import product
Xdata = []
y = []
for x in product(*xCorners):
Xdata.append(np.array(x))
y.append(mean(Xdata[-1]) + nu*np.random.normal())
Xdata.append(np.mean(np.array(xCorners),axis=1))
y.append(mean(Xdata[-1]) + nu*np.random.normal())
Xdata = np.array(Xdata)
y = np.array(y)
sc=plt.scatter(Xdata[:,0],Xdata[:,1],c=y,vmin=np.min(y),vmax=np.max(y),alpha=0.6)
arg = np.argsort(y)
plt.scatter(Xdata[arg[0],0],Xdata[arg[0],1],c='red',vmin=np.min(y),vmax=np.max(y),alpha=1)
plt.colorbar(sc)
plt.show()
#do iterations to find min
arg = np.argsort(y)
fbest = y[arg[0]]
xprev = Xdata[arg[0]]
i = 0
while i < Niter:
#do gradient decent to find max of full aquisition
xnext = maximizeAquisition(xPriors,Xdata,y,thetaPriors=None,iteration=i)
xprev = xnext
#print(y)
f = mean(xnext) + nu*np.random.normal()
Xdata = np.vstack([Xdata,xnext])
y = np.hstack([y,f])
fbest = np.min(y)
score.append(f)
print(xnext,f,fbest)
i += 1
c1 = plt.contour(X,Y,np.array(A).reshape(X.shape),20)
plt.clabel(c1,inline=1,fontsize=10)
plt.title("True")
plt.xlabel("x")
plt.ylabel("y")
arg = np.argsort(A)
plt.scatter(X.flatten()[arg[0]],Y.flatten()[arg[0]],zorder=20,c='red',marker='*',alpha=1)
sc=plt.scatter(Xdata[:,0],Xdata[:,1],c=y,vmin=np.min(y),vmax=np.max(y),alpha=0.6)
arg = np.argsort(y)
plt.scatter(Xdata[arg[0],0],Xdata[arg[0],1],c='red',vmin=np.min(y),vmax=np.max(y),alpha=1)
plt.colorbar(sc)
plt.show()
plt.plot(score)
plt.ylabel('score (lower better)')
plt.xlabel("iteration")
plt.show()
minidx[r,:] = score
plt.plot(np.mean(minidx,axis=0))
plt.plot(np.mean(minidx,axis=0)+np.std(minidx,axis=0),ls='--')
plt.plot(np.mean(minidx,axis=0)-np.std(minidx,axis=0),ls='--')
plt.show()
| StarcoderdataPython |
1787455 | <reponame>JohnZhang000/adaptive-jpeg-compression
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 3 14:51:04 2021
@author: ubuntu204
"""
import os
import numpy as np
from six.moves import cPickle as pickle
# from scipy.misc import imread
import platform
from PIL import Image
def load_pickle(f):
version = platform.python_version_tuple() # 取python版本号
if version[0] == '2':
return pickle.load(f) # pickle.load, 反序列化为python的数据类型
elif version[0] == '3':
return pickle.load(f, encoding='latin1')
raise ValueError("invalid python version: {}".format(version))
def load_CIFAR_batch(filename):
""" load single batch of cifar """
with open(filename, 'rb') as f:
datadict = load_pickle(f) # dict类型
X = datadict['data'] # X, ndarray, 像素值
Y = datadict['labels'] # Y, list, 标签, 分类
X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype(np.float32)/255.0
Y = np.array(Y)
return X, Y
def load_CIFAR_train(filename):
""" load single batch of cifar """
data_list = []
label_list = []
for i in range(1,6):
file = 'data_batch_{0}'.format(i)
f = os.path.join(filename,file)
data, label = load_CIFAR_batch(f)
data_list.append(data)
label_list.append(label)
X = np.concatenate(data_list)
Y = np.concatenate(label_list)
return X,Y
def load_imagenet_filenames(dataset_dir,features):
filename=dataset_dir+'.txt'
with open(filename, 'r') as f:
data_list=f.readlines()
label_list=[]
image_list=[]
for data in data_list:
sysnet,name=data.split('/')
label_list.append(features[sysnet])
image_list.append(data.replace('\n',''))
return image_list,label_list
def load_imagenet_batch(batch_idx,batch_size,data_dir,data_list,label_list):
filenames=data_list[batch_idx*batch_size:(batch_idx+1)*batch_size]
labels=np.array(label_list[batch_idx*batch_size:(batch_idx+1)*batch_size])
images=np.zeros([batch_size,224,224,3])
for file_idx,file in enumerate(filenames):
image = Image.open(os.path.join(data_dir,file)).convert('RGB').resize([224,224])
images[file_idx,...] = np.asarray(image)/255.0
images=images.transpose(0,3,1,2).astype(np.float32)
# images=images
return images,labels | StarcoderdataPython |
3446645 | <reponame>matslindh/pytest-image-diff
from typing import BinaryIO, Union, Tuple, Optional
from PIL.Image import Image
from typing_extensions import Literal, Protocol
PathOrFileType = Union[str, bytes, BinaryIO]
ImageFileType = Union[Image, PathOrFileType]
ImageSize = Tuple[int, int]
class ImageRegressionCallableType(Protocol):
def __call__(
self,
image: ImageFileType,
threshold: Optional[float] = None,
suffix: Optional[str] = None,
) -> bool:
pass
class ImageDiffCallableType(Protocol):
def __call__(
self,
image: ImageFileType,
image_2: ImageFileType,
threshold: Optional[float] = None,
suffix: Optional[str] = None,
) -> bool:
pass
# ``opts.orientation`` can be 'lr' for left-and-right,
# 'tb' for top-and-bottom, or 'auto' for automatic.
OrientationType = Literal["auto", "lr", "tb"]
| StarcoderdataPython |
1795394 | <reponame>enthought/etsproxy
# proxy module
from __future__ import absolute_import
from apptools.naming.pyfs_state_factory import *
| StarcoderdataPython |
3539138 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
filament_watch.py
Cancel the print on OctoPrint if the filament is not feeding (e.g. due to
jam, out of filament, etc.)
"""
##############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) 2015 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
##############################################################################
import time
import logging
import argparse
import os
import socket
import yaml
from .octoprint_ctl import OctoPrintAccess
from .microcontroller_if import ArduinoInterface
from .web_server import WebServer
def get_config():
'''Combine command line arguments and configuration file and return the configuration to use'''
parser = argparse.ArgumentParser()
parser.add_argument('--dev', help='Arduino serial device')
parser.add_argument('--baudrate', type=int, help='Arduino baud rate')
parser.add_argument('--apikey', help='OctoPrint API key')
parser.add_argument('--octoprinthost', help='Hostname of OctoPrint server')
parser.add_argument('--csvlog', help='CSV log of filament status')
parser.add_argument('--alarmchangethreshold', type=float, help='Cancel print if filament movement falls below this threshold')
parser.add_argument('--alarmminprinttime', type=int, help='Only cancel print after print has been running this many seconds')
parser.add_argument('--alarmaction', help='Action to take on filament not feeding')
parser.add_argument('--encoderscalingfactor', type=float, help='Conversion factor from encoder to mm')
parser.add_argument('--windowduration', type=int, help='Average measurements over this number of seconds')
parser.add_argument('--httpport', type=int, help='Port for status HTTP server')
parser.add_argument('--debug', action='store_true', help='Enable debug logs')
parser.add_argument('--config', default=os.path.expanduser("~/.filament_watch"), help='Configuration file')
args = parser.parse_args()
default_config = {
'dev': '/dev/serial/by-id/usb-Adafruit_Adafruit_Mini_Metro_328_ADAOFIOls-if00-port0',
'baudrate': 115200,
'apikey': None,
'octoprinthost': '127.0.0.1',
'csvlog': None,
'alarmchangethreshold': 0.1,
'alarmminprinttime': 120,
'alarmaction': 'cancel',
'encoderscalingfactor': 0.040,
'windowduration': 120,
'httpport': None,
}
# Load config from file, or use defaults
config_modified = False
if os.path.isfile(args.config):
with open(args.config) as cfg_file:
config = yaml.load(cfg_file)
else:
config = default_config
# Update config with command line settings
for arg_name in default_config:
if vars(args)[arg_name] is not None:
config[arg_name] = vars(args)[arg_name]
config_modified = True
elif arg_name not in config:
config[arg_name] = default_config[arg_name]
config_modified = True
# Store new config
if config_modified:
with open(args.config, 'w') as cfg_file:
yaml.dump(config, cfg_file, default_flow_style=False)
config['debug'] = args.debug
return config
def log_msg(logger, web_server, msg):
'''Log an info level message to all logging facilities'''
logger.info(msg)
if web_server:
web_server.log(msg)
def get_this_host_ip():
'''Returns the IP address of this computer used to connect to the internet
(i.e. not the loopback interface's IP)'''
# Adapted from Alexander at http://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib/1267524#1267524
ghbn_ips = socket.gethostbyname_ex(socket.gethostname())[2]
ip_list = [ip for ip in ghbn_ips if not ip.startswith("127.")]
if len(ip_list) > 0:
return ip_list[0]
# Find the IP used to connect to the internet
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(('8.8.8.8', 80))
gsn_ip = sock.getsockname()[0]
sock.close()
return gsn_ip
def main(): # pylint: disable=too-many-locals
"""Main processing loop"""
config = get_config()
recent_length = config['windowduration']
web_history_length = 120
idle_logging_interval = 60
log_level = logging.INFO
if config['debug']:
log_level = logging.DEBUG
logging.basicConfig(format='%(asctime)-15s %(name)-30s %(levelname)-8s %(message)s', level=log_level)
logger = logging.getLogger(__name__)
logging.getLogger("requests").setLevel(logging.WARNING)
if not config['apikey']:
logger.error('OctoPrint API key not specified!')
return
filament_watch = ArduinoInterface(config['dev'], config['baudrate'], recent_length)
octoprint = OctoPrintAccess(config['octoprinthost'], config['apikey'], recent_length)
if config['httpport']:
web_server = WebServer(config['httpport'], config['debug'])
logger.info('Status URL: http://%s:%d/', get_this_host_ip(), config['httpport'])
web_server.start()
else:
web_server = None
try:
field_names = ['Time', 'Alarm', 'Printing', 'Valid',
'Filament Position', 'Measured Change', 'GCode Change',
'Summary', 'State', 'Filename', 'File Position',
'File Size', 'G-code Filament Position', 'G-code Filament Total',
'Bed Target', 'Bed Actual', 'Hot End Target', 'Hot End Actual']
log_msg(logger, web_server, 'Monitoring %s' % (config['dev']))
if config['csvlog']:
csv = open(config['csvlog'], 'w')
csv.write(','.join(field_names))
csv.write('\n')
else:
csv = None
printing_count = 0
skipped_log_count = idle_logging_interval
web_gcode_history = []
web_actual_history = []
while True:
pos, meas_change_raw = filament_watch.get_pos_change()
if pos != None:
meas_change_norm = meas_change_raw * config['encoderscalingfactor']
logger.debug('New position is %d (%+.1f)', pos, meas_change_norm)
stat = octoprint.status()
logger.debug('OctoPrint status: printing=%d "%s" "%s" %.1f/%.1f %.1f/%.1f',
stat['printing'],
stat['summary'],
stat['state'],
stat['bed_target'],
stat['bed_actual'],
stat['tool0_target'],
stat['tool0_actual'])
if stat['printing']:
if printing_count == 0:
log_msg(logger, web_server, 'Printing has started (%s)' % (stat['state']))
printing_count += 1
else:
if printing_count != 0:
log_msg(logger, web_server, 'Printing has stopped (%s)' % (stat['state']))
printing_count = 0
valid = False
if printing_count >= config['alarmminprinttime']:
valid = True
alarm = False
if valid and (meas_change_norm / stat['gcode_change']) < config['alarmchangethreshold']:
alarm = True
logger.debug('State: printing_count=%d alarm=%d', printing_count, alarm)
chart_time = time.time() * 1000
if web_server:
web_server.update({
'gcode': [chart_time, stat['gcode_change']],
'actual': [chart_time, meas_change_norm],
'gcode_history': web_gcode_history,
'actual_history': web_actual_history,
'history_length': web_history_length,
'alarm': alarm,
'printing': stat['printing'],
'valid': valid,
'time_to_valid': config['alarmminprinttime'] - printing_count,
'filament_pos': pos,
'summary': stat['summary'],
'file_pos': stat['file_pos'],
'bed_target': stat['bed_target'],
'bed_actual': stat['bed_actual'],
'tool0_target': stat['tool0_target'],
'tool0_actual': stat['tool0_actual'],
})
# Make the history mirror the javascript state before it does addPoint
web_gcode_history.append([chart_time, stat['gcode_change']])
web_actual_history.append([chart_time, meas_change_norm])
if len(web_gcode_history) > web_history_length:
web_gcode_history.pop(0)
web_actual_history.pop(0)
if stat['printing'] or alarm or meas_change_raw != 0 or skipped_log_count >= (idle_logging_interval - 1):
fields = [
time.strftime('%H:%M:%S'), alarm, stat['printing'], valid,
pos, meas_change_norm, stat['gcode_change'],
stat['summary'], stat['state'], stat['file_name'], stat['file_pos'],
stat['file_size'], stat['gcode_filament_pos'], stat['gcode_filament_total'],
stat['bed_target'], stat['bed_actual'], stat['tool0_target'], stat['tool0_actual']]
fields = [str(x) for x in fields]
if csv:
csv.write(','.join(fields))
csv.write('\n')
csv.flush()
skipped_log_count = 0
else:
skipped_log_count += 1
if alarm:
logger.error('Alarm triggered - canceling job')
octoprint.issue_job_cmd(config['alarmaction'])
finally:
if csv:
csv.flush()
csv.close()
csv = None
if web_server:
web_server.stop()
web_server = None
| StarcoderdataPython |
6625132 | '''
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Algorithms/CloudMasking/modis_surface_reflectance_qa_band.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Algorithms/CloudMasking/modis_surface_reflectance_qa_band.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Algorithms/CloudMasking/modis_surface_reflectance_qa_band.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Algorithms/CloudMasking/modis_surface_reflectance_qa_band.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
'''
# %%
'''
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The magic command `%%capture` can be used to hide output from a specific cell.
'''
# %%
# %%capture
# !pip install earthengine-api
# !pip install geehydro
# %%
'''
Import libraries
'''
# %%
import ee
import folium
import geehydro
# %%
'''
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()`
if you are running this notebook for this first time or if you are getting an authentication error.
'''
# %%
# ee.Authenticate()
ee.Initialize()
# %%
'''
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
'''
# %%
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
# %%
'''
## Add Earth Engine Python script
'''
# %%
# Modis Cloud Masking example.
# Calculate how frequently a location is labeled as clear (i.e. non-cloudy)
# according to the "internal cloud algorithm flag" of the MODIS "state 1km"
# QA band.
# A function to mask out pixels that did not have observations.
# maskEmptyPixels = function(image) {
def maskEmptyPixels(image):
# Find pixels that had observations.
withObs = image.select('num_observations_1km').gt(0)
return image.updateMask(withObs)
# }
# A function to mask out cloudy pixels.
# maskClouds = function(image) {
def maskClouds(image):
# Select the QA band.
QA = image.select('state_1km')
# Make a mask to get bit 10, the internal_cloud_algorithm_flag bit.
bitMask = 1 << 10
# Return an image masking out cloudy areas.
return image.updateMask(QA.bitwiseAnd(bitMask).eq(0))
# }
# Start with an image collection for a 1 month period.
# and mask out areas that were not observed.
collection = ee.ImageCollection('MODIS/006/MOD09GA') \
.filterDate('2010-04-01', '2010-05-01') \
.map(maskEmptyPixels)
# Get the total number of potential observations for the time interval.
totalObsCount = collection \
.select('num_observations_1km') \
.count()
# Map the cloud masking function over the collection.
collectionCloudMasked = collection.map(maskClouds)
# Get the total number of observations for non-cloudy pixels for the time
# interval. The result is unmasked to set to unity so that all locations
# have counts, and the ratios later computed have values everywhere.
clearObsCount = collectionCloudMasked \
.select('num_observations_1km') \
.count() \
.unmask(0)
Map.addLayer(
collectionCloudMasked.median(),
{'bands': ['sur_refl_b01', 'sur_refl_b04', 'sur_refl_b03'],
'gain': 0.07,
'gamma': 1.4
},
'median of masked collection'
)
Map.addLayer(
totalObsCount,
{'min': 84, 'max': 92},
'count of total observations',
False
)
Map.addLayer(
clearObsCount,
{'min': 0, 'max': 90},
'count of clear observations',
False
)
Map.addLayer(
clearObsCount.toFloat().divide(totalObsCount),
{'min': 0, 'max': 1},
'ratio of clear to total observations'
)
# %%
'''
## Display Earth Engine data layers
'''
# %%
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map | StarcoderdataPython |
395667 | <reponame>rabernat/scikit-downscale
import numpy as np
from scipy.spatial import cKDTree
from sklearn.base import RegressorMixin
from sklearn.linear_model import LinearRegression
from sklearn.linear_model.base import LinearModel
from sklearn.utils.validation import check_is_fitted
from .utils import ensure_samples_features
class AnalogBase(LinearModel, RegressorMixin):
_fit_attributes = ["kdtree_", "y_"]
def fit(self, X, y):
""" Fit Analog model using a KDTree
Parameters
----------
X : pd.Series or pd.DataFrame, shape (n_samples, 1)
Training data
y : pd.Series or pd.DataFrame, shape (n_samples, 1)
Target values.
Returns
-------
self : returns an instance of self.
"""
self.kdtree_ = cKDTree(X, **self.kdtree_kwargs)
self.y_ = y
return self
class AnalogRegression(AnalogBase):
""" AnalogRegression
Parameters
----------
n_analogs: int
Number of analogs to use when building linear regression
kdtree_kwargs : dict
Keyword arguments to pass to the scipy.spatial.cKDTree constructor
query_kwargs : dict
Keyword arguments to pass to the scipy.spatial.cKDTree.query method
lr_kwargs : dict
Keyword arguments to pass to the sklear.linear_model.LinearRegression
constructor
Attributes
----------
kdtree_ : scipy.spatial.cKDTree
KDTree object
"""
def __init__(self, n_analogs=200, kdtree_kwargs={}, query_kwargs={}, lr_kwargs={}):
self.n_analogs = n_analogs
self.kdtree_kwargs = kdtree_kwargs
self.query_kwargs = query_kwargs
self.lr_kwargs = lr_kwargs
def predict(self, X):
""" Predict using the AnalogRegression model
Parameters
----------
X : DataFrame, shape (n_samples, 1)
Samples.
Returns
-------
C : pd.DataFrame, shape (n_samples, 1)
Returns predicted values.
"""
check_is_fitted(self, self._fit_attributes)
predicted = np.empty(len(X))
# TODO - extract from lr_model's below.
self.stats = {}
for i, (_, row) in enumerate(X.iterrows()):
# predict for this time step
predicted[i] = self._predict_one_step(row.values)
return predicted
def _predict_one_step(self, X):
# get analogs
kmax = max(len(self.kdtree_.data), self.n_analogs)
_, inds = self.kdtree_.query(X, k=kmax, **self.query_kwargs)
# extract data to train linear regression model
x = ensure_samples_features(self.kdtree_.data[inds - 1])
y = ensure_samples_features(self.y_.values[inds - 1])
# train linear regression model
lr_model = LinearRegression(**self.lr_kwargs).fit(x, y)
# predict for this time step
predicted = lr_model.predict(ensure_samples_features(X))
return predicted
class PureAnalog(AnalogBase):
""" PureAnalog
Attributes
----------
kdtree_ : scipy.spatial.cKDTree
KDTree object
n_analogs : int
Number of analogs to use
thresh : float
Subset analogs based on threshold
stats : bool
Calculate fit statistics during predict step
kdtree_kwargs : dict
Dictionary of keyword arguments to pass to cKDTree constructor
query_kwargs : dict
Dictionary of keyword arguments to pass to `cKDTree.query`
"""
def __init__(
self,
n_analogs=200,
kind="best_analog",
thresh=None,
stats=True,
kdtree_kwargs={},
query_kwargs={},
):
self.thresh = thresh
self.stats = stats
self.kdtree_kwargs = kdtree_kwargs
self.query_kwargs = query_kwargs
if kind == "best_analog" or n_analogs == 1:
self.n_analogs = 1
self.kind = "best_analog"
else:
self.n_analogs = n_analogs
self.kind = kind
def predict(self, X):
"""Predict using the PureAnalog model
Parameters
----------
X : pd.Series or pd.DataFrame, shape (n_samples, 1)
Samples.
Returns
-------
C : pd.DataFrame, shape (n_samples, 1)
Returns predicted values.
"""
check_is_fitted(self, self._fit_attributes)
self.stats_ = {}
dist, inds = self.kdtree_.query(X, k=self.n_analogs, **self.query_kwargs)
analogs = np.take(self.y_.values, inds, axis=0)
if self.thresh is not None:
# TODO: rethink how the analog threshold is applied.
# There are certainly edge cases not dealt with properly here
# particularly in the weight analogs case
analog_mask = analogs > self.thresh
masked_analogs = analogs[analog_mask]
if self.kind == "best_analog":
predicted = analogs
elif self.kind == "sample_analogs":
# get 1 random index to sample from the analogs
rand_inds = np.random.randint(low=0, high=self.n_analogs, size=len(X))
# select the analog now
predicted = select_analogs(analogs, rand_inds)
elif self.kind == "weight_analogs":
# take weighted average
# work around for zero distances (perfect matches)
tiny = 1e-20
weights = 1.0 / np.where(dist == 0, tiny, dist)
if self.thresh:
predicted = np.average(masked_analogs, weights=weights, axis=1)
else:
predicted = np.average(analogs.squeeze(), weights=weights, axis=1)
elif self.kind == "mean_analogs":
if self.thresh is not None:
predicted = masked_analogs.mean(axis=1)
else:
predicted = analogs.mean(axis=1)
else:
raise ValueError("got unexpected kind %s" % self.kind)
if self.thresh is not None:
# for mean/weight cases, this fills nans when all analogs
# were below thresh
predicted = np.nan_to_num(predicted, nan=0.0)
if self.stats:
# calculate the standard deviation of the anlogs
if self.thresh is None:
self.stats_["error"] = analogs.std(axis=1)
else:
self.stats_["error"] = analogs.where(analog_mask).std(axis=1)
# calculate the probability of precip
self.stats_["pop"] = np.where(analog_mask, 1, 0).mean(axis=1)
return predicted
def select_analogs(analogs, inds):
# todo: this is possible with fancy indexing
out = np.empty(len(analogs))
for i, ind in enumerate(inds):
out[i] = analogs[i, ind]
return out
| StarcoderdataPython |
8088785 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch_geometric.utils import remove_self_loops, add_self_loops, softmax
from tqdm import tqdm, trange
from scipy import sparse
class ClassBoundaryLoss(_Loss):
__constants__ = ['reduction', 'flow', 'edge_index', 'margin']
def __init__(self, margin, size_average=None, reduce=None, reduction='mean', flow='source_to_target'): # by default is fine
super(ClassBoundaryLoss, self).__init__(
size_average, reduce, reduction)
assert flow in ['source_to_target', 'target_to_source']
self.i, self.j = (0, 1) if flow == 'target_to_source' else (1, 0)
self.margin = margin
self.flag = 150
# self.sparse_lil_matrix = None
def forward(self, attention, class_target, edge_index, idx_mask, nodes):
edge_index, _ = remove_self_loops(edge_index)
edge_index, _ = add_self_loops(edge_index, num_nodes=nodes)
class_boundary_loss = torch.tensor(0.).to(attention.device)
# create til format sparse adj matrix
# import pdb; pdb.set_trace()
# data = np.ones(len(edge_index[0]))
# sparse_lil_matrix = sparse.lil_matrix(sparse.coo_matrix(data, edge_index.tolist()))
self.idx_mask = torch.nonzero(idx_mask).flatten()
# self.flag -= 1
for idx in range(len(self.idx_mask)):
node_idx = self.idx_mask[idx]
neibs_coo = torch.where(edge_index[self.i] == node_idx)[0]
pos_neibs, neg_neibs = [], []
for neib in (neibs_coo.flatten()):
# if class_target[node_idx].shape[0] > 1:
# if all(class_target[edge_index[self.j][neib]] == class_target[node_idx]):
# pos_neibs.append(attention[neib])
if class_target[edge_index[self.j][neib]] == class_target[node_idx]:
pos_neibs.append(attention[neib])
else:
neg_neibs.append(attention[neib])
if pos_neibs and neg_neibs:
pos_neibs, neg_neibs = torch.stack(pos_neibs).sum(
0), torch.stack(neg_neibs).sum(0)
# import pdb; pdb.set_trace()
# class_boundary_loss += torch.relu(
# neg_neibs - pos_neibs + self.margin).mean()
for pos_neib_att in pos_neibs:
for neg_neib_att in neg_neibs:
if self.flag <=0:
import pdb; pdb.set_trace()
class_boundary_loss += torch.relu(
neg_neib_att - pos_neib_att + self.margin).mean() # 1.head 2.
# import pdb; pdb.set_trace()
# same_class_flag
if self.reduction == 'mean':
class_boundary_loss /= len(self.idx_mask)
return class_boundary_loss
# class GraphStructureLoss(_Loss):
# __constants__ = ['reduction', 'flow', 'edge_index', 'margin']
# def __init__(self, edge_index, idx_mask, margin, size_average=None, reduce=None, reduction='mean', flow='source_to_target'): # by default is fine
# super(GraphStructureLoss, self).__init__(
# size_average, reduce, reduction)
# assert flow in ['source_to_target', 'target_to_source']
# self.i, self.j = (0, 1) if flow == 'target_to_source' else (1, 0)
# self.idx_mask = torch.nonzero(idx_mask).flatten()
# self.margin = margin
# self.edge_index = edge_index
# def forward(self, attention, class_target):
# class_boundary_loss = 0.
# for idx in range(len(self.idx_mask)):
# node_idx = self.idx_mask[idx]
# neibs_coo = torch.where(self.edge_index[self.i] == node_idx)[0]
# pos_neibs, neg_neibs = [], []
# for neib in neibs_coo.flatten():
# if class_target[self.edge_index[self.j][neib]] == class_target[node_idx]:
# pos_neibs.append(attention[neib])
# else:
# neg_neibs.append(attention[neib])
# if pos_neibs and neg_neibs:
# # pos_neibs, neg_neibs = torch.LongTensor(pos_neibs).to(attention.device), torch.LongTensor(neg_neibs).to(attention.device)
# for pos_neib_att in pos_neibs:
# for neg_neib_att in neg_neibs:
# class_boundary_loss += torch.relu(
# neg_neib_att - pos_neib_att + self.margin).mean()
# if self.reduction == 'mean':
# class_boundary_loss /= len(self.idx_mask)
# import pdb; pdb.set_trace()
# return class_boundary_loss
| StarcoderdataPython |
11233809 | <filename>observations/r/intqrt.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def intqrt(path):
"""intqrt
Data loads lazily. Type data(intqrt) into the console.
A data.frame with 124 rows and 23 variables:
- r3. bond equiv. yield, 3 mo T-bill
- r6. bond equiv. yield, 6 mo T-bill
- r12. yield on 1 yr. bond
- p3. price of 3 mo. T-bill
- p6. price of 6 mo. T-bill
- hy6. 100\*(p3 - p6[\_n-1])/p6[\_n-1])
- hy3. r3\*(91/365)
- spr63. r6 - r3
- hy3\_1. hy3[\_n-1]
- hy6\_1. hy6[\_n-1]
- spr63\_1. spr63[\_n-1]
- hy6hy3\_1. hy6 - hy3\_1
- cr3. r3 - r3\_1
- r3\_1. r3[\_n-1]
- chy6. hy6 - hy6\_1
- chy3. hy3 - hy3\_1
- chy6\_1. chy6[\_n-1]
- chy3\_1. chy3[\_n-1]
- cr6. r6 - r6\_1
- cr6\_1. cr6[\_n-1]
- cr3\_1. cr3[\_n-1]
- r6\_1. r6[\_n-1]
- cspr63. spr63 - spr63\_1
https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_
isbn_issn=9781111531041
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `intqrt.csv`.
Returns:
Tuple of np.ndarray `x_train` with 124 rows and 23 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'intqrt.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/wooldridge/intqrt.csv'
maybe_download_and_extract(path, url,
save_file_name='intqrt.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| StarcoderdataPython |
223107 | <reponame>gluesolutions/glue-vispy-viewers
import numpy as np
from vispy.visuals.transforms import (ChainTransform, NullTransform,
MatrixTransform, STTransform)
from vispy.visuals.transforms.base_transform import InverseTransform
from vispy.visuals.transforms._util import arg_to_vec4
def as_matrix_transform(transform):
"""
Simplify a transform to a single matrix transform, which makes it a lot
faster to compute transformations.
Raises a TypeError if the transform cannot be simplified.
"""
if isinstance(transform, ChainTransform):
matrix = np.identity(4)
for tr in transform.transforms:
# We need to do the matrix multiplication manually because VisPy
# somehow doesn't mutliply matrices if there is a perspective
# component. The equation below looks like it's the wrong way
# around, but the VisPy matrices are transposed.
matrix = np.matmul(as_matrix_transform(tr).matrix, matrix)
return MatrixTransform(matrix)
elif isinstance(transform, InverseTransform):
matrix = as_matrix_transform(transform._inverse)
return MatrixTransform(matrix.inv_matrix)
elif isinstance(transform, NullTransform):
return MatrixTransform()
elif isinstance(transform, STTransform):
return transform.as_matrix()
elif isinstance(transform, MatrixTransform):
return transform
else:
raise TypeError("Could not simplify transform of type {0}".format(type(transform)))
try:
from glue.utils.qt import fix_tab_widget_fontsize # noqa
except ImportError:
import platform
from glue.utils.qt import get_qapp
def fix_tab_widget_fontsize(tab_widget):
"""
Because of a bug in Qt, tab titles on MacOS X don't have the right font size
"""
if platform.system() == 'Darwin':
app = get_qapp()
app_font = app.font()
tab_widget.setStyleSheet('font-size: {0}px'.format(app_font.pointSize()))
class NestedSTTransform(STTransform):
glsl_map = """
vec4 st_transform_map(vec4 pos) {
return vec4((pos.xyz * $innerscale.xyz + $innertranslate.xyz * pos.w).xyz
* $scale.xyz + $translate.xyz * pos.w, pos.w);
}
"""
glsl_imap = """
vec4 st_transform_imap(vec4 pos) {
return vec4((((pos.xyz - $innertranslate.xyz * pos.w) / $innerscale.xyz)
- $translate.xyz * pos.w) / $scale.xyz, pos.w);
}
"""
def __init__(self):
self.inner = STTransform()
super(NestedSTTransform, self).__init__()
@arg_to_vec4
def map(self, coords):
coords = self.inner.map(coords)
coords = super(NestedSTTransform, self).map(coords)
return coords
@arg_to_vec4
def imap(self, coords):
coords = super(NestedSTTransform, self).imap(coords)
coords = self.inner.imap(coords)
return coords
def _update_shaders(self):
self._shader_map['scale'] = self.scale
self._shader_map['translate'] = self.translate
self._shader_imap['scale'] = self.scale
self._shader_imap['translate'] = self.translate
self._shader_map['innerscale'] = self.inner.scale
self._shader_map['innertranslate'] = self.inner.translate
self._shader_imap['innerscale'] = self.inner.scale
self._shader_imap['innertranslate'] = self.inner.translate
| StarcoderdataPython |
12801431 | import os
import cv2
import torch
from torch.utils.data import Dataset
class ChestHeartDataset(Dataset):
def __init__(self, img_ids, data_dir, transform=None):
self.img_ids = img_ids
self.transform = transform
self.img_dir = os.path.join(data_dir, 'images')
self.mask_dir = os.path.join(data_dir, 'masks')
self.img_ext = ".jpg"
self.mask_ext = ".png"
self.num_classes = 3
self.fucking = True
def __len__(self):
return len(self.img_ids)
def __getitem__(self, idx: int):
img_id = self.img_ids[idx]
# 原图
img = cv2.imread(os.path.join(self.img_dir, img_id + self.img_ext))
# 标签
mask = cv2.imread(os.path.join(self.mask_dir, img_id + self.mask_ext), cv2.IMREAD_GRAYSCALE)
# 数据增强
if self.transform is not None:
augmented = self.transform(image=img, mask=mask)
img = augmented['image']
mask = augmented['mask']
# 图片
img = img.transpose(2, 0, 1)
img = torch.tensor(img, dtype=torch.float)
# mask
mask = torch.tensor(mask, dtype=torch.long)
return img, mask, {'img_id': img_id}
# if self.fucking:
# import matplotlib.pyplot as plt
# print(img.shape, mask.shape)
# print("img_id", img_id)
# # plt.imshow(img)
# plt.imshow(mask)
# plt.show()
# self.fucking = False
| StarcoderdataPython |
6549791 | <reponame>moslog/exam-app
from datetime import datetime
def resolve_date(date: datetime) -> str:
months = [
'Januari',
'Februari',
'Maret',
'April',
'Mei',
'Juni',
'Juli',
'Agustus',
'September',
'Oktober',
'November',
'Desember'
]
res = f'{date.day} {months[date.month - 1]} {date.year}'
return res | StarcoderdataPython |
1740151 | <filename>Labs/fixbold.py
#! /usr/bin/env python3
# fix the **bold** tags in html pre sections
import sys
from tempfile import mkstemp
import os
filename = sys.argv[1]
inf = open(filename, "r")
fd, tfile = mkstemp('','tmp','.',True)
outf = open(tfile, "w")
INPRE = False
for line in inf:
if line[:5] == "<pre>":
INPRE = True
if line[len(line)-7:] == "</pre>":
INPRE = False
# look for **some stuff** that's INSIDE a pre section
if INPRE:
if line.count("**") == 2 and line.count("****") == 0:
# convert them from ** to <strong>xxx</strong>
new = line.replace("**","<strong>", 1)
line = new.replace("**","</strong>", 1)
outf.write(line)
inf.close()
outf.close()
os.close(fd)
os.rename(tfile, filename)
| StarcoderdataPython |
363369 | from agrirouter.constants.media_types import ContentTypes
class SoftwareOnboardingHeader:
def __init__(self,
reg_code,
application_id,
signature=None,
content_type=ContentTypes.APPLICATION_JSON.value
):
self._set_params(reg_code, application_id, signature, content_type)
def get_header(self) -> dict:
return self.params
def sign(self, signature: str):
self.params["X-Agrirouter-Signature"] = signature
def _set_params(self, reg_code: str, application_id: str, signature: str, content_type: str):
header = dict()
header["Authorization"] = f"Bearer {reg_code}"
header["Content-Type"] = content_type
header["X-Agrirouter-ApplicationId"] = application_id
header["X-Agrirouter-Signature"] = signature if signature else ""
self.params = header
| StarcoderdataPython |
8073496 | # SPDX-FileCopyrightText: 2021 <NAME>
# Modified by <NAME> 2021 all the keys bar the modifier key
# can now be used as layer select and input keys
# prints debug messages via debug serial port (USB)
# sudo cat /dev/ttyACM0
# SPDX-License-Identifier: MIT
# An advanced example of how to set up a HID keyboard.
# There are four layers defined out of fifteen possible,
# selected by pressing and holding key 0 (bottom left),
# then tapping one of the coloured layer selector keys to switch layer.
# The defined layer colours are as follows:
# * layer 1: pink: numpad-style keys, 0-9, delete, and enter.
# * layer 2: blue: sends strings on each key press
# * layer 3: yellow: media controls, rev, play/pause, fwd on row one, vol. down, mute,
# vol. up on row two
# * layer 4: white: sends mixxx controls
import board
import time
from keybow2040 import Keybow2040
import usb_hid
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS
from adafruit_hid.keycode import Keycode
from adafruit_hid.consumer_control import ConsumerControl
from adafruit_hid.consumer_control_code import ConsumerControlCode
# Set up Keybow
i2c = board.I2C()
keybow = Keybow2040(i2c)
keys = keybow.keys
# Set up the keyboard and layout
keyboard = Keyboard(usb_hid.devices)
layout = KeyboardLayoutUS(keyboard)
# Set up consumer control (used to send media key presses)
consumer_control = ConsumerControl(usb_hid.devices)
# Our layers. The key of item in the layer dictionary is the key number on
# Keybow to map to, and the value is the key press to send.
# Note that key 0 is reserved as the modifier
# purple - numeric keypad
layer_1 = {4: Keycode.ZERO,
5: Keycode.ONE,
6: Keycode.FOUR,
7: Keycode.SEVEN,
8: Keycode.DELETE,
9: Keycode.TWO,
10: Keycode.FIVE,
11: Keycode.EIGHT,
12: Keycode.ENTER,
13: Keycode.THREE,
14: Keycode.SIX,
15: Keycode.NINE}
# blue - words
layer_2 = {7: "pack ",
11: "my ",
15: "box ",
6: "with ",
10: "five ",
14: "dozen ",
5: "liquor ",
9: "jugs "}
# yellow - media controls
layer_3 = {6: ConsumerControlCode.VOLUME_DECREMENT,
7: ConsumerControlCode.SCAN_PREVIOUS_TRACK,
10: ConsumerControlCode.MUTE,
11: ConsumerControlCode.PLAY_PAUSE,
14: ConsumerControlCode.VOLUME_INCREMENT,
15: ConsumerControlCode.SCAN_NEXT_TRACK}
# white - mixxx
layer_4 = {2: Keycode.X,
5: Keycode.D,
7: Keycode.T,
8: Keycode.SPACE,
13: Keycode.L,
15: Keycode.Y}
layers = {1: layer_1,
2: layer_2,
3: layer_3,
4: layer_4}
selectors = {1: keys[1],
2: keys[2],
3: keys[3],
4: keys[4]}
# Define the modifier key and layer selector keys
modifier = keys[0]
# Start on layer 1
current_layer = 1
# The colours for each layer
colours = {1: (255, 0, 255),
2: (0, 255, 255),
3: (255, 255, 0),
4: (128, 128, 128)}
layer_keys = range(0, 16)
# dictionary of sets (sets cannot be changed but can be replaced)
LEDs = {0: (64, 0, 0),
1: (128, 0, 0),
2: (196, 0, 0),
3: (255, 0, 0),
4: (0, 4, 0),
5: (0, 128, 0),
6: (0, 12, 0),
7: (0, 196, 0),
8: (0, 0, 64),
9: (0, 0, 128),
10: (0, 0, 196),
11: (0, 0, 255),
12: (64, 64, 0),
13: (128, 128, 0),
14: (196, 196, 0),
15: (255, 255, 0)}
# Set the LEDs for each key in the current layer
for k in layers[current_layer].keys():
keys[k].set_led(*colours[current_layer])
print("Starting!")
mode = 0
count = 0
# To prevent the strings (as opposed to single key presses) that are sent from
# refiring on a single key press, the debounce time for the strings has to be
# longer.
short_debounce = 0.03
long_debounce = 0.15
debounce = 0.03
fired = False
while True:
# Always remember to call keybow.update()
keybow.update()
# if no key is pressed ensure not locked in layer change mode
if ((mode == 2) & keybow.none_pressed()):
mode = 0
if modifier.held:
# set to looking to change the keypad layer
for layer in layers.keys():
# If the modifier key is held, light up the layer selector keys
if mode == 1:
print("Looking for layer select")
# Set the LEDs for each key in selectors
for k in layer_keys:
keys[k].set_led(0, 0, 0)
for k in selectors.keys():
keys[k].set_led(*colours[k])
keys[0].set_led(0, 255, 0)
mode = 2
# Change current layer if layer key is pressed
if selectors[layer].pressed:
if mode >= 1:
mode = 0
current_layer = layer
print("Layer Changed:", current_layer)
# Set the LEDs for each key in the current layer
for k in layer_keys:
keys[k].set_led(0, 0, 0)
for k in layers[current_layer].keys():
keys[k].set_led(*colours[current_layer])
else:
# set to look for a key presses
if mode == 0:
print("Looking for key press on layer:", current_layer)
mode = 1
# Set the LEDs for each key in the current layer
for k in layer_keys:
keys[k].set_led(0, 0, 0)
for k in layers[current_layer].keys():
keys[k].set_led(*colours[current_layer])
# Loop through all of the keys in the layer and if they're pressed, get the
# key code from the layer's key map
for k in layers[current_layer].keys():
if keys[k].pressed:
key_press = layers[current_layer][k]
# If the key hasn't just fired (prevents refiring)
if not fired:
fired = True
# Send the right sort of key press and set debounce for each
# layer accordingly (layer 2 needs a long debounce)
if (
(current_layer == 1) |
(current_layer == 4)
):
debounce = short_debounce
keyboard.send(key_press)
elif current_layer == 2:
debounce = long_debounce
layout.write(key_press)
elif current_layer == 3:
debounce = short_debounce
consumer_control.send(key_press)
# If enough time has passed, reset the fired variable
if fired and time.monotonic() - keybow.time_of_last_press > debounce:
fired = False
| StarcoderdataPython |
12846218 | #!/usr/bin/env python
import rospy
from std_msgs.msg import Header
from geometry_msgs.msg import PoseStamped
from nav_msgs.msg import Odometry
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import cv2
import sys
import numpy as np
import message_filters
import tf
class DepthImageProcessor:
def __init__(self):
self.HOME_BASE_KEY = 1
rospy.init_node('depth_image_processor')
self.bridge = CvBridge()
self.listener = tf.TransformListener()
# subscribe to depth image and segmentation result
self.base_pose_pub = rospy.Publisher("/base_station/pose", PoseStamped, queue_size=3)
rospy.Subscriber("/stereo/depth_image", Image, callback=self.depth_image_callback)
rospy.Subscriber("/segmented_image", Image, callback=self.segmentat_image_callback)
self.odom_frame = "odom"
self.camera_frame = "scout_1_tf/camera_link"
self.depth_image = None
print("started node")
rospy.spin()
def depth_image_callback(self, depth_image):
try:
cv_mat_depth = self.bridge.imgmsg_to_cv2(depth_image, desired_encoding="passthrough")
except CvBridgeError, e:
raise e
self.depth_image = cv_mat_depth
# # Convert the depth image to a Numpy array
# self.depth_image = np.array(cv_mat_depth, dtype=np.float32)
def segmentat_image_callback(self, segmentation_image):
try:
cv_mat_seg = self.bridge.imgmsg_to_cv2(segmentation_image, desired_encoding="mono8")
except CvBridgeError, e:
raise e
if self.depth_image is not None:
cv_mat_seg = np.array(cv_mat_seg)
cv_mat_seg[cv_mat_seg != self.HOME_BASE_KEY] = 0
cv_mat_seg[cv_mat_seg > 0] = 1
masked_depth = cv2.bitwise_and(self.depth_image, self.depth_image, mask=cv_mat_seg)
# convert depth mask to numpy and clean
np_array = np.array(masked_depth).flatten()
where_are_NaNs = np.isnan(np_array)
np_array[where_are_NaNs] = 0
count = np.count_nonzero(np_array)
sum = np_array.sum()
dist = sum / count
obj_pose = PoseStamped()
obj_pose.header = Header()
obj_pose.header.frame_id = self.camera_frame
obj_pose.pose.position.x = dist
obj_pose.pose.position.y = 0
obj_pose.pose.position.z = 0
final_pose = self.listener.transformPose(self.odom_frame, obj_pose)
self.base_pose_pub.publish(final_pose)
# print(obj_pose)
# print(final_pose)
# cv2.imshow("masked_data", masked_depth)
# cv2.waitKey(0)
if __name__ == "__main__":
DepthImageProcessor()
| StarcoderdataPython |
1690904 | # $Id: __init__.py 7646 2013-04-17 14:17:37Z milde $
# Author: <NAME> <<EMAIL>>
# Copyright: This module has been placed in the public domain.
"""
This package contains Docutils parser modules.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import Component
if sys.version_info < (2,5):
from docutils._compat import __import__
class Parser(Component):
component_type = 'parser'
config_section = 'parsers'
def parse(self, inputstring, document):
"""Override to parse `inputstring` into document tree `document`."""
raise NotImplementedError('subclass must override this method')
def setup_parse(self, inputstring, document):
"""Initial parse setup. Call at start of `self.parse()`."""
self.inputstring = inputstring
self.document = document
document.reporter.attach_observer(document.note_parse_message)
def finish_parse(self):
"""Finalize parse details. Call at end of `self.parse()`."""
self.document.reporter.detach_observer(
self.document.note_parse_message)
_parser_aliases = {
'restructuredtext': 'rst',
'rest': 'rst',
'restx': 'rst',
'rtxt': 'rst',}
def get_parser_class(parser_name):
"""Return the Parser class from the `parser_name` module."""
parser_name = parser_name.lower()
if parser_name in _parser_aliases:
parser_name = _parser_aliases[parser_name]
try:
module = __import__(parser_name, globals(), locals(), level=1)
except ImportError:
module = __import__(parser_name, globals(), locals(), level=0)
return module.Parser
| StarcoderdataPython |
4820622 | """Tests for application configuration."""
import pytest
from almanac import (
ConflictingPromoterTypesError,
current_app,
InvalidCallbackTypeError
)
from .utils import get_test_app
@pytest.mark.asyncio
async def test_prompt_str_customization():
app = get_test_app()
app.bag.counter = 0
@app.prompt_text()
def inc_prompt():
inner_app_ref = current_app()
inner_app_ref.bag.counter += 1
return f'{inner_app_ref.bag.counter}> '
assert app.current_prompt_str == '1> '
assert app.current_prompt_str == '2> '
assert app.current_prompt_str == '3> '
@pytest.mark.asyncio
async def test_invalid_prompt_str_callback():
app = get_test_app()
with pytest.raises(InvalidCallbackTypeError):
@app.prompt_text()
async def async_callback():
return 'prompt> '
@pytest.mark.asyncio
async def test_exit_callback():
app = get_test_app()
app.bag.exit_count = 0
register_exit_callback = app.on_exit()
async def inc_counter():
inner_app_ref = current_app()
inner_app_ref.bag.exit_count += 1
for i in range(5):
register_exit_callback(inc_counter)
await app.run_on_exit_callbacks()
assert app.bag.exit_count == 5
@pytest.mark.asyncio
async def test_invalid_exit_callback():
app = get_test_app()
with pytest.raises(InvalidCallbackTypeError):
@app.on_exit()
def sync_callback():
pass
@pytest.mark.asyncio
async def test_init_callback():
app = get_test_app()
app.bag.init_count = 0
register_init_callback = app.on_init()
async def inc_counter():
inner_app_ref = current_app()
inner_app_ref.bag.init_count += 1
for i in range(5):
register_init_callback(inc_counter)
await app.run_on_init_callbacks()
assert app.bag.init_count == 5
@pytest.mark.asyncio
async def test_invalid_init_callback():
app = get_test_app()
with pytest.raises(InvalidCallbackTypeError):
@app.on_init()
def sync_callback():
pass
@pytest.mark.asyncio
async def test_conflicting_type_promoters():
app = get_test_app()
app.add_promoter_for_type(bool, bool)
with pytest.raises(ConflictingPromoterTypesError):
app.add_promoter_for_type(bool, str)
app = get_test_app()
app.add_promoter_for_type(int, str)
with pytest.raises(ConflictingPromoterTypesError):
@app.promoter_for(int)
def promoter_callback(x: int) -> str:
return str(x)
| StarcoderdataPython |
9682569 | <gh_stars>0
# coding=utf-8
# flake8: noqa
from itertools import islice
from typing import List
from hypothesis import given
from hypothesis.strategies import integers
from oeis import recaman
@given(integers(min_value=2, max_value=100))
def test_recaman_greater_than_zero(n: int) -> None:
r: List[int] = list(islice(recaman(), n))
assert r[n - 1] >= 0
@given(integers(min_value=2, max_value=100))
def test_recaman_property(n: int) -> None:
r: List[int] = list(islice(recaman(), n))
assert abs(r[n - 1] - r[n - 2]) == n - 1
| StarcoderdataPython |
6589088 | import RPi.GPIO as GPIO
import time
import os
import commands
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
GPIO.setup(11, GPIO.OUT)
GPIO.output(11, 1)
time.sleep(0.5)
GPIO.output(11, 0)
time.sleep(0.5)
GPIO.output(11, 1)
time.sleep(0.5)
GPIO.output(11, 0)
time.sleep(0.5)
GPIO.output(11, 1)
time.sleep(0.5)
GPIO.output(11, 0)
time.sleep(0.5)
GPIO.output(11, 1)
time.sleep(0.5)
GPIO.output(11, 0)
time.sleep(0.5)
GPIO.output(11, 1)
time.sleep(0.5)
GPIO.output(11, 0)
time.sleep(0.5)
os.system("sudo /sbin/shutdown -h now")
| StarcoderdataPython |
9779886 | <gh_stars>0
import datetime
from json import loads, JSONDecodeError
import re
from .net.http import ApiRequester
from .models.response import Response
from .exceptions.error import ParameterError, EmptyApiKeyError, \
UnparsableApiResponseError
class Client:
__default_url = "https://domains-subdomains-discovery.whoisxmlapi.com" \
"/api/v1"
_api_requester: ApiRequester or None
_api_key: str
_re_api_key = re.compile(r'^at_[a-z0-9]{29}$', re.IGNORECASE)
_PARSABLE_FORMAT = 'json'
JSON_FORMAT = 'json'
XML_FORMAT = 'xml'
__DATETIME_OR_NONE_MSG = 'Value should be None or an instance of ' \
'datetime.date'
def __init__(self, api_key: str, **kwargs):
"""
:param api_key: str: Your API key.
:key base_url: str: (optional) API endpoint URL.
:key timeout: float: (optional) API call timeout in seconds
"""
self._api_key = ''
self.api_key = api_key
if 'base_url' not in kwargs:
kwargs['base_url'] = Client.__default_url
self.api_requester = ApiRequester(**kwargs)
@property
def api_key(self) -> str:
return self._api_key
@api_key.setter
def api_key(self, value: str):
self._api_key = Client._validate_api_key(value)
@property
def api_requester(self) -> ApiRequester or None:
return self._api_requester
@api_requester.setter
def api_requester(self, value: ApiRequester):
self._api_requester = value
@property
def base_url(self) -> str:
return self._api_requester.base_url
@base_url.setter
def base_url(self, value: str or None):
if value is None:
self._api_requester.base_url = Client.__default_url
else:
self._api_requester.base_url = value
@property
def timeout(self) -> float:
return self._api_requester.timeout
@timeout.setter
def timeout(self, value: float):
self._api_requester.timeout = value
def get(self, **kwargs) -> Response:
"""
Get parsed API response as a `Response` instance.
:key domains: Required if domains aren't specified.
Dictionary. Take a look at API documentation for the format
:key subdomains: Required if domains aren't specified
Dictionary. Take a look at API documentation for the format
:key since_date: Optional. datetime.date. Min date by default.
:return: `Response` instance
:raises ConnectionError:
:raises DomainDiscoveryApiError: Base class for all errors below
:raises ResponseError: response contains an error message
:raises ApiAuthError: Server returned 401, 402 or 403 HTTP code
:raises BadRequestError: Server returned 400 or 422 HTTP code
:raises HttpApiError: HTTP code >= 300 and not equal to above codes
:raises ParameterError: invalid parameter's value
"""
kwargs['output_format'] = Client._PARSABLE_FORMAT
response = self.get_raw(**kwargs)
try:
parsed = loads(str(response))
if 'domainsCount' in parsed:
return Response(parsed)
raise UnparsableApiResponseError(
"Could not find the correct root element.", None)
except JSONDecodeError as error:
raise UnparsableApiResponseError(
"Could not parse API response",
error)
def get_raw(self, **kwargs) -> str:
"""
Get raw API response.
:key domains: Required if subdomains aren't specified.
Dictionary. Take a look at API documentation for the format
:key subdomains: Required if domains aren't specified
Dictionary. Take a look at API documentation for the format
:key since_date: Optional. datetime.date. Min date by default.
:key output_format: Optional. Use constants JSON_FORMAT and XML_FORMAT
:return: str
:raises ConnectionError:
:raises DomainDiscoveryApiError: Base class for all errors below
:raises ResponseError: response contains an error message
:raises ApiAuthError: Server returned 401, 402 or 403 HTTP code
:raises BadRequestError: Server returned 400 or 422 HTTP code
:raises HttpApiError: HTTP code >= 300 and not equal to above codes
:raises ParameterError: invalid parameter's value
"""
if self.api_key == '':
raise EmptyApiKeyError('')
if 'domains' in kwargs:
domains = Client._validate_domains(kwargs['domains'])
else:
domains = None
if 'subdomains' in kwargs:
subdomains = Client._validate_subdomains(kwargs['subdomains'])
else:
subdomains = None
if not domains and not subdomains:
raise ParameterError(
"Required either domains or subdomains")
if 'response_format' in kwargs:
kwargs['output_format'] = kwargs['response_format']
if 'output_format' in kwargs:
output_format = Client._validate_output_format(
kwargs['output_format'])
else:
output_format = Client._PARSABLE_FORMAT
if 'since_date' in kwargs:
since_date = Client._validate_date(kwargs['since_date'])
else:
since_date = Client._validate_date(datetime.date.min)
return self._api_requester.post(self._build_payload(
self.api_key,
domains,
subdomains,
since_date,
output_format
))
@staticmethod
def _build_payload(
api_key,
domains,
subdomains,
since_date,
output_format,
) -> dict:
tmp = {
'apiKey': api_key,
'domains': domains,
'subdomains': subdomains,
'sinceDate': since_date,
'outputFormat': output_format,
}
payload = {}
for k, v in tmp.items():
if v is not None:
payload[k] = v
return payload
@staticmethod
def _validate_api_key(api_key) -> str:
if Client._re_api_key.search(
str(api_key)
) is not None:
return str(api_key)
else:
raise ParameterError("Invalid API key format.")
@staticmethod
def _validate_date(value: datetime.date or None):
if value is None or isinstance(value, datetime.date):
return str(value)
raise ParameterError(Client.__DATETIME_OR_NONE_MSG)
@staticmethod
def _validate_domains(value) -> dict:
if value is None:
raise ParameterError("Domain list cannot be None.")
else:
return Client._validate_terms(value)
@staticmethod
def _validate_output_format(value: str):
if value.lower() in [Client.JSON_FORMAT, Client.XML_FORMAT]:
return value.lower()
raise ParameterError(
f"Response format must be {Client.JSON_FORMAT} "
f"or {Client.XML_FORMAT}")
@staticmethod
def _validate_subdomains(value) -> dict:
if value is None:
raise ParameterError("Subdomain list cannot be None.")
else:
return Client._validate_terms(value)
@staticmethod
def _validate_terms(value) -> dict:
include, exclude = [], []
if type(value) is dict:
if 'include' in value:
include = list(map(lambda s: str(s), value['include']))
include = list(
filter(lambda s: s is not None and len(s) > 0, include))
if 4 <= len(include) <= 1:
raise ParameterError("Include terms list must have "
"from 1 to 4 terms.")
if 'exclude' in value:
exclude = list(map(lambda s: str(s), value['exclude']))
exclude = list(
filter(lambda s: s is not None and len(s) > 0, exclude))
if 4 <= len(exclude) <= 0:
raise ParameterError("Exclude terms list must have "
"from 0 to 4 terms.")
if include:
return {'include': include, 'exclude': exclude}
raise ParameterError("Expected a dict with 2 lists of strings.")
| StarcoderdataPython |
361068 | import configargparse
import generation
import utils
__VERSION__ = '0.4.0'
def main():
parser = configargparse.ArgParser(default_config_files=['config.ini'])
parser.add('--width', required=False, type=int, help='SVG width')
parser.add('--height', required=False, type=int, help='SVG height')
parser.add('--color_palette', required=False, action='append', help='list of colors for stars')
parser.add('--font_size', required=False, type=int, help='font size for planet name')
parser.add('--nb_stars', required=False, type=int, help='number of stars')
parser.add('--min_size_stars', required=False, type=int, help='minimal star size')
parser.add('--max_size_stars', required=False, type=int, help='maximal star size')
parser.add('--color_proba', required=False, type=float, help='probability of coloured star')
parser.add('--nb_planets', required=False, type=int, help='number of planets')
parser.add('--distance_planet', required=False, type=int, help='distance in pixel between each planet')
parser.add('--min_size_planet', required=False, type=int, help='minimal planet size')
parser.add('--max_size_planet', required=False, type=int, help='maximal planet size')
parser.add('--ring_proba', required=False, type=float, help='probability of a ringed planet')
parser.add('--min_ring', required=False, type=int, help='minimal number of rings')
parser.add('--max_ring', required=False, type=int, help='maximal number of rings')
parser.add('--moon_proba', required=False, type=float, help='probability for a planet to have moons')
parser.add('--distance_moon', required=False, type=int, help='distance between each moon')
parser.add('--min_nb_moons', required=False, type=int, help='minimal number of moon')
parser.add('--max_nb_moons', required=False, type=int, help='maximal number of moon')
parser.add('--min_size_moon', required=False, type=int, help='minimal size of moon')
parser.add('--max_size_moon', required=False, type=int, help='maximal size of moon')
parser.add('--id', required=False, help='random seed for generation')
parser.add('-f', '--filename', default='solar_system.svg', required=False, help='file name to save')
parser.add('-v', '--version', action='version', version=__VERSION__)
options = parser.parse_args()
#print(options)
#print(parser.format_values())
if options.id is None:
options.id = utils.generate_id(5, options.nb_planets)
#print(options.id)
print('Generating planetary system "{}"'.format(options.id))
generation.generate(options)
if __name__ == '__main__':
main()
| StarcoderdataPython |
5000756 | <filename>backend/exam/admin.py
from django.contrib import admin
from exam.models import (
Question,
QuestionGroup,
Exam,
Choice,
Topic,
QuestionChoice,
SelectedChoices
)
admin.site.register(Question)
admin.site.register(Exam)
admin.site.register(Choice)
admin.site.register(Topic)
admin.site.register(QuestionGroup)
admin.site.register(QuestionChoice)
admin.site.register(SelectedChoices)
| StarcoderdataPython |
1630541 | #!/user/bin/python
import time
import os
import logging
# http://www.pythonforbeginners.com/files/reading-and-writing-files-in-python
# https://www.andreas-jung.com/contents/a-python-decorator-for-measuring-the-execution-time-of-methods
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
# print '%r (%r, %r) %2.2f sec' % \
# (method.__name__, args, kw, te-ts)
printStr='%r %2.2f sec' % \
(method.__name__, te-ts)
logging.info(printStr)
return result
return timed | StarcoderdataPython |
3443763 | #! /usr/bin/env python
# This file is part of the Astrometry.net suite.
# Licensed under a 3-clause BSD style license - see LICENSE
# Used to trim down the "hpslit"-merged USNO-B files before
# building indices out of them.
from __future__ import print_function
import sys
from optparse import OptionParser
try:
import pyfits
except ImportError:
try:
from astropy.io import fits as pyfits
except ImportError:
raise ImportError("Cannot import either pyfits or astropy.io.fits")
from numpy import *
from astrometry.util.fits import *
from astrometry.util.healpix import *
from astrometry.util.starutil_numpy import *
from astrometry.util.usnob_cuts import *
def trim(infn, outfn):
print('Reading', infn)
X = fits_table(infn, columns=[
'num_detections', 'flags', 'an_diffraction_spike',
'field_1', 'field_3', 'magnitude_1', 'magnitude_3',
'field_0', 'field_2', 'magnitude_0', 'magnitude_2',
'ra', 'dec',
])
print('Read', len(X), 'sources')
print('Applying cuts')
I = usnob_apply_cuts(X)
# drop now-unwanted columns
for c in ['flags', 'an_diffraction_spike',
'num_detections' ]:
X.delete_column(c)
X.cut(I)
print('Kept', len(X), 'sources')
del I
print('Computing average mags')
X.field_0 = X.field_0.astype(np.int16)
X.field_1 = X.field_1.astype(np.int16)
X.field_2 = X.field_2.astype(np.int16)
X.field_3 = X.field_3.astype(np.int16)
X.magnitude_0 = X.magnitude_0.astype(np.float32)
X.magnitude_1 = X.magnitude_1.astype(np.float32)
X.magnitude_2 = X.magnitude_2.astype(np.float32)
X.magnitude_3 = X.magnitude_3.astype(np.float32)
usnob_compute_average_mags(X)
for c in [
'field_1', 'field_3', 'magnitude_1', 'magnitude_3',
'field_0', 'field_2', 'magnitude_0', 'magnitude_2']:
X.delete_column(c)
X.r_mag = X.r_mag.astype(np.float32)
X.b_mag = X.b_mag.astype(np.float32)
print('Writing output to', outfn)
X.writeto(outfn)
del X
if __name__ == '__main__':
#for hp in range(12):
if False:
# fitscopy usnob-07.fits"[#row<100000000]" usnob-07-a.fits
# fitscopy usnob-07.fits"[#row>=100000000]" usnob-07-b.fits
infn = 'usnob-07-a.fits'
outfn = 'usnob-trimmed-07-a.fits'
trim(infn, outfn)
if False:
infn = 'usnob-07-b.fits'
outfn = 'usnob-trimmed-07-b.fits'
trim(infn, outfn)
# cp usnob-trimmed-07-a.fits 07a.fits
# tabmerge usnob-trimmed-07-b.fits+1 07a.fits+1
# mv 07a.fits usnob-trimmed-07.fits
if False:
infn = 'usnob-10-a.fits'
outfn = 'usnob-trimmed-10-a.fits'
trim(infn, outfn)
if True:
infn = 'usnob-10-b.fits'
outfn = 'usnob-trimmed-10-b.fits'
trim(infn, outfn)
#for hp in range(7,12):
#for hp in range(8,12):
for hp in range(11,12):
infn = 'usnob-%02i.fits' % hp
outfn = 'usnob-trimmed-%02i.fits' % hp
trim(infn, outfn)
| StarcoderdataPython |
5168690 | from .Affine2DMat import Affine2DMat, Affine2DUniMat
from .math_ import (intersect_two_line, polygon_area, segment_length,
segment_to_vector)
from .nms import nms
| StarcoderdataPython |
261714 | <gh_stars>0
# Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MusicVAE generation script."""
# TODO(adarob): Add support for models with conditioning.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow as tf2
from magenta import music as mm
from magenta.models.music_vae import TrainedModel
from magenta.models.music_vae import configs
flags = tf.app.flags
logging = tf.logging
FLAGS = flags.FLAGS
flags.DEFINE_string(
'run_dir', None,
'Path to the directory where the latest checkpoint will be loaded from.')
flags.DEFINE_string(
'checkpoint_file', None,
'Path to the checkpoint file. run_dir will take priority over this flag.')
flags.DEFINE_string(
'output_dir', '/tmp/music_vae/generated',
'The directory where MIDI files will be saved to.')
flags.DEFINE_string(
'config', None,
'The name of the config to use.')
flags.DEFINE_string(
'mode', 'sample',
'Generate mode (either `sample` or `interpolate`).')
flags.DEFINE_string(
'input_midi_1', None,
'Path of start MIDI file for interpolation.')
flags.DEFINE_string(
'input_midi_2', None,
'Path of end MIDI file for interpolation.')
flags.DEFINE_integer(
'num_outputs', 5,
'In `sample` mode, the number of samples to produce. In `interpolate` '
'mode, the number of steps (including the endpoints).')
flags.DEFINE_integer(
'max_batch_size', 8,
'The maximum batch size to use. Decrease if you are seeing an OOM.')
flags.DEFINE_float(
'temperature', 0.5,
'The randomness of the decoding process.')
flags.DEFINE_string(
'log', 'INFO',
'The threshold for what messages will be logged: '
'DEBUG, INFO, WARN, ERROR, or FATAL.')
def _slerp(p0, p1, t):
"""Spherical linear interpolation."""
omega = np.arccos(
np.dot(np.squeeze(p0/np.linalg.norm(p0)),
np.squeeze(p1/np.linalg.norm(p1))))
so = np.sin(omega)
return np.sin((1.0-t)*omega) / so * p0 + np.sin(t*omega)/so * p1
def run(config_map):
"""Load model params, save config file and start trainer.
Args:
config_map: Dictionary mapping configuration name to Config object.
Raises:
ValueError: if required flags are missing or invalid.
"""
date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
if FLAGS.run_dir is None == FLAGS.checkpoint_file is None:
raise ValueError(
'Exactly one of `--run_dir` or `--checkpoint_file` must be specified.')
if FLAGS.output_dir is None:
raise ValueError('`--output_dir` is required.')
tf.gfile.MakeDirs(FLAGS.output_dir)
if FLAGS.mode != 'sample' and FLAGS.mode != 'interpolate':
raise ValueError('Invalid value for `--mode`: %s' % FLAGS.mode)
if FLAGS.config not in config_map:
raise ValueError('Invalid config name: %s' % FLAGS.config)
config = config_map[FLAGS.config]
config.data_converter.max_tensors_per_item = None
if FLAGS.mode == 'interpolate':
if FLAGS.input_midi_1 is None or FLAGS.input_midi_2 is None:
raise ValueError(
'`--input_midi_1` and `--input_midi_2` must be specified in '
'`interpolate` mode.')
input_midi_1 = os.path.expanduser(FLAGS.input_midi_1)
input_midi_2 = os.path.expanduser(FLAGS.input_midi_2)
if not os.path.exists(input_midi_1):
raise ValueError('Input MIDI 1 not found: %s' % FLAGS.input_midi_1)
if not os.path.exists(input_midi_2):
raise ValueError('Input MIDI 2 not found: %s' % FLAGS.input_midi_2)
input_1 = mm.midi_file_to_note_sequence(input_midi_1)
input_2 = mm.midi_file_to_note_sequence(input_midi_2)
def _check_extract_examples(input_ns, path, input_number):
"""Make sure each input returns exactly one example from the converter."""
tensors = config.data_converter.to_tensors(input_ns).outputs
if not tensors:
print(
'MusicVAE configs have very specific input requirements. Could not '
'extract any valid inputs from `%s`. Try another MIDI file.' % path)
sys.exit()
elif len(tensors) > 1:
basename = os.path.join(
FLAGS.output_dir,
'%s_input%d-extractions_%s-*-of-%03d.mid' %
(FLAGS.config, input_number, date_and_time, len(tensors)))
for i, ns in enumerate(config.data_converter.from_tensors(tensors)):
mm.sequence_proto_to_midi_file(ns, basename.replace('*', '%03d' % i))
print(
'%d valid inputs extracted from `%s`. Outputting these potential '
'inputs as `%s`. Call script again with one of these instead.' %
(len(tensors), path, basename))
sys.exit()
logging.info(
'Attempting to extract examples from input MIDIs using config `%s`...',
FLAGS.config)
_check_extract_examples(input_1, FLAGS.input_midi_1, 1)
_check_extract_examples(input_2, FLAGS.input_midi_2, 2)
logging.info('Loading model...')
if FLAGS.run_dir:
checkpoint_dir_or_path = os.path.expanduser(
os.path.join(FLAGS.run_dir, 'train'))
else:
checkpoint_dir_or_path = os.path.expanduser(FLAGS.checkpoint_file)
model = TrainedModel(
config, batch_size=min(FLAGS.max_batch_size, FLAGS.num_outputs),
checkpoint_dir_or_path=checkpoint_dir_or_path)
if FLAGS.mode == 'interpolate':
logging.info('Interpolating...')
_, mu, _ = model.encode([input_1, input_2])
z = np.array([
_slerp(mu[0], mu[1], t) for t in np.linspace(0, 1, FLAGS.num_outputs)])
results = model.decode(
length=config.hparams.max_seq_len,
z=z,
temperature=FLAGS.temperature)
elif FLAGS.mode == 'sample':
logging.info('Sampling...')
mean_mu = np.load('two_med/data/mean_mu.npy')
mean_sigma = np.load('two_med/data/mean_sigma.npy')
session = tf2.keras.backend.get_session()
init = tf2.global_variables_initializer()
session.run(init)
forwards = (True, False)
ks = (16, 32, 64, 128)
hidden_layer_ns = (1, 2)
for forward in forwards:
for k in ks:
for hidden_layer_n in hidden_layer_ns:
model_path = 'two_med/data/model_k_{:d}_hln_{:d}_dir_{:s}.h5'.format(k, hidden_layer_n,
'forward' if forward else 'backward')
z1, z2 = melody_helper(model_path, mean_mu, mean_sigma, session)
results1 = model.sample(
n=1,
length=config.hparams.max_seq_len,
temperature=FLAGS.temperature,
force_z=z1)
results2 = model.sample(
n=1,
length=config.hparams.max_seq_len,
temperature=FLAGS.temperature,
force_z=z2)
basename = os.path.join(
FLAGS.output_dir,
'k_{:d}_hln_{:d}_dir_{:s}_*.mid'.format
(k, hidden_layer_n, 'forward' if forward else 'backward'))
logging.info('Outputting %d files as `%s`...', FLAGS.num_outputs, basename)
for i, ns in enumerate([results1[0], results2[0]]):
mm.sequence_proto_to_midi_file(ns, basename.replace('*', '%03d' % i))
logging.info('Done.')
def melody_helper(model_path, mean_mu, mean_sigma, session):
model2 = tf2.keras.models.load_model(model_path)
model2.compile(loss=tf.keras.losses.MeanSquaredError(), optimizer=tf.keras.optimizers.Adam())
zmid = np.zeros((1, 3, 512))
zmid[0, :1, :] = mean_mu
zmid[0, 2, :] = mean_sigma
z2 = model2(zmid).eval(session=session)[0, 1, :]
return mean_mu, z2
def main(unused_argv):
logging.set_verbosity(FLAGS.log)
run(configs.CONFIG_MAP)
def console_entry_point():
with tf.device('/device:cpu:0'):
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
| StarcoderdataPython |
8042676 | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Optional, Tuple, List, Union
from .utils import _get_as_snowflake
from .asset import Asset
from .object import Object
if TYPE_CHECKING:
from .types.guild import (
GuildPreview as GuildPreviewPayload,
GuildFeature
)
from .state import ConnectionState
from .emoji import Emoji
from .guild import Guild
__all__: Tuple[str, ...] = (
'GuildPreview',
)
class GuildPreview:
"""Represents a Guild's Preview.
Attributes
----------
guild: Union[:class:`Guild`, :class:`Object`]
The guild that the preview belongs to. If the bot is not in the guild
then :class:`Object` will be used instead.
guild_id: :class:`int`
The ID of the guild that the preview belongs to.
name: :class:`str`
The name of the guild.
emojis: List[:class:`Emoji`]
A list of emojis that the guild has.
features: List[:class:`str`]
A list of features that the guild has. The features that a guild can have are
subject to arbitrary change by Discord.
They are currently as follows:
- ``ANIMATED_ICON``: Guild can upload an animated icon.
- ``BANNER``: Guild can upload and use a banner. (i.e. :attr:`.banner`)
- ``COMMERCE``: Guild can sell things using store channels.
- ``COMMUNITY``: Guild is a community server.
- ``DISCOVERABLE``: Guild shows up in Server Discovery.
- ``FEATURABLE``: Guild is able to be featured in Server Discovery.
- ``INVITE_SPLASH``: Guild's invite page can have a special splash.
- ``MEMBER_VERIFICATION_GATE_ENABLED``: Guild has Membership Screening enabled.
- ``MONETIZATION_ENABLED``: Guild has enabled monetization.
- ``MORE_EMOJI``: Guild has increased custom emoji slots.
- ``MORE_STICKERS``: Guild has increased custom sticker slots.
- ``NEWS``: Guild can create news channels.
- ``PARTNERED``: Guild is a partnered server.
- ``PREVIEW_ENABLED``: Guild can be viewed before being accepted via Membership Screening.
- ``PRIVATE_THREADS``: Guild has access to create private threads.
- ``SEVEN_DAY_THREAD_ARCHIVE``: Guild has access to the seven day archive time for threads.
- ``THREE_DAY_THREAD_ARCHIVE``: Guild has access to the three day archive time for threads.
- ``TICKETED_EVENTS_ENABLED``: Guild has enabled ticketed events.
- ``VANITY_URL``: Guild can have a vanity invite URL (e.g. discord.gg/discord-api).
- ``VERIFIED``: Guild is a verified server.
- ``VIP_REGIONS``: Guild has VIP voice regions.
- ``WELCOME_SCREEN_ENABLED``: Guild has enabled the welcome screen.
approximate_member_count: :class:`int`
An approximate member count.
approximate_presence_count: :class:`int`
An approximate number of online members in this guild.
description: Optional[:class:`str`]
The description for the guild, if the guild is discoverable.
"""
__slots__: Tuple[str, ...] = (
'_state',
'guild_id',
'name',
'_icon',
'_splash',
'_discovery_splash',
'emojis',
'features',
'approximate_member_count',
'approximate_presence_count',
'description'
)
def __init__(self, *, data: GuildPreviewPayload, guild: Union[Guild, Object], state: ConnectionState):
self._state: ConnectionState = state
self.guild_id: int = _get_as_snowflake(data, 'id') # type: ignore
self.name: str = data['name']
self._icon: Optional[str] = data.get('icon')
self._splash: Optional[str] = data.get('splash')
self._discovery_splash: Optional[str] = data.get('discovery_splash')
self.emojis: Tuple[Emoji, ...] = tuple(map(lambda d: state.store_emoji(guild, d), data.get('emojis', []))) # type: ignore
self.features: List[GuildFeature] = data.get('features', [])
self.approximate_member_count: int = data['approximate_member_count']
self.approximate_presence_count: int = data['approximate_presence_count']
self.description: Optional[str] = data.get('description')
@property
def guild(self) -> Optional[Guild]:
"""Optional[:class:`Guild`]: Returns the :class:`Guild` that the preview is for, if the client is in it."""
return self._state._get_guild(self.guild_id)
@property
def icon(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's icon asset, if available."""
if self._icon is None:
return None
return Asset._from_guild_icon(self._state, self.guild_id, self._icon)
@property
def splash(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's invite splash asset, if available."""
if self._splash is None:
return None
return Asset._from_guild_image(self._state, self.guild_id, self._splash, path='splashes')
@property
def discovery_splash(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's discovery splash asset, if available."""
if self._discovery_splash is None:
return None
return Asset._from_guild_image(self._state, self.guild_id, self._discovery_splash, path='discovery-splashes') | StarcoderdataPython |
8026249 |
import re
from enum import Enum
from multiprocessing import Lock
from systemtools.duration import *
from systemtools.location import *
from systemtools.printer import *
from systemtools.logger import *
from systemtools.file import *
from systemtools.basics import * # stripAccents, reduceBlank
from datatools.htmltools import *
from unidecode import unidecode
from nlptools.preprocessing import *
from nlptools.stopword import *
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
import scipy
import numpy as np
import textstat
# textstatInstalledSingleton = None
def stylo\
(
document,
asNpArray=False,
allowedFunctions=\
[
'automated_readability_index',
'avg_character_per_word',
'avg_letter_per_word',
'avg_sentence_length',
'avg_sentence_per_word',
'char_count',
'coleman_liau_index',
'letter_count',
'lexicon_count',
'lix',
'reading_time',
'rix',
'sentence_count',
'smog_index',
'spache_readability',
'dale_chall_readability_score',
'dale_chall_readability_score_v2',
'difficult_words',
'gunning_fog',
],
logger=None, verbose=True,
):
# global textstatInstalledSingleton
# if textstatInstalledSingleton:
# import textstat
# else:
# try:
# import textstat
# except:
# path = strToTmpFile("-e git://github.com/shivam5992/textstat.git#egg=textstat")
# bash("pip install -r " + path)
# import textstat
# textstatInstalledSingleton = True
features = []
for name in allowedFunctions:
try:
funct = textstat.__dict__[name]
feature = funct(document)
assert isinstance(feature, int) or isinstance(feature, float)
feature = float(feature)
features.append(feature)
except Exception as e:
features.append(0.0)
if asNpArray:
return np.array(features)
else:
return features
def toNgrams\
(
docs,
mode="document",
ngrams=1,
doLower=False,
flattenSentences=False,
logger=None,
verbose=True,
):
"""
This function convert a list of docs (a document can be composed of sentences (list) or words (str))
The returned object has the same shape of the input docs object, but if you set flattenSentences as True, all sentences will be flattened.
"""
assert docs is not None and len(docs) > 0 and len(docs[0]) > 0
if isinstance(docs[0][0], list):
sentencesCorpus = True
else:
sentencesCorpus = False
if not sentencesCorpus and flattenSentences:
raise Exception("You give documents that are not composed of sentences but you set flattenSentences as True")
if sentencesCorpus:
if ngrams > 1 and not flattenSentences:
logWarning("You gave a corpus of sentences but you set ngrams > 1 and flattenSentences as False, you won't get ngrams riding on 2 sentences...", logger, verbose=verbose)
if flattenSentences:
docs = [flattenLists(doc) for doc in docs]
if not doLower:
logWarning("You set doLower as False", logger=logger, verbose=verbose)
ngramss = []
# bp(docs, 5)
for sentences in pb(docs, logger=logger, verbose=verbose and len(docs) > 20, message="Extracting " + str(ngrams) + "-grams"):
if not sentencesCorpus or flattenSentences:
sentences = [sentences]
sentencesNgrams = []
# bp(sentences, 5)
for sentence in sentences:
# bp(sentence, 5)
if sentence is None:
raise Exception("Found None in docs")
elif len(sentence) == 0:
logWarning("Found an empty doc", logger, verbose=verbose)
elif not isinstance(sentence[0], str):
raise Exception("Found a word that is not a str")
else:
if len(sentence) >= ngrams:
grams = []
for i in range(len(sentence) - ngrams + 1):
ngram = " ".join(sentence[i:i + ngrams])
if doLower:
ngram = ngram.lower()
grams.append(ngram)
sentencesNgrams.append(grams)
else:
sentencesNgrams.append([])
# if len(sentencesNgrams) > 0:
if not sentencesCorpus or flattenSentences:
ngramss.append(sentencesNgrams[0])
else:
ngramss.append(sentencesNgrams)
return ngramss
def extractNgrams\
(
docs,
ngrams=1,
minDF=1,
doLower=False,
returnDF=False,
useTuple=False,
flattenSentences=False,
logger=None,
verbose=True
):
"""
You must give a list of documents. A document is either a list of words (str) or a liste of sentences (list).
This function extract the vocab of a corpus (i.e. set of ngrams).
"""
assert docs is not None and len(docs) > 0 and len(docs[0]) > 0
if isinstance(docs[0][0], list):
sentencesCorpus = True
else:
sentencesCorpus = False
if not sentencesCorpus and flattenSentences:
raise Exception("You give documents that are not composed of sentences but you set flattenSentences as True")
if sentencesCorpus:
if ngrams > 1 and not flattenSentences:
logWarning("You gave a corpus of sentences but you set ngrams > 1 and flattenSentences as False, you won't get ngrams riding on 2 sentences...", logger, verbose=verbose)
if flattenSentences:
docs = [flattenLists(doc) for doc in docs]
else:
docs = flattenLists(docs)
if not doLower:
logWarning("You set doLower as False", logger=logger, verbose=verbose)
vocDF = dict()
for doc in pb(docs, logger=logger, verbose=verbose and len(docs) > 20, message="Extracting " + str(ngrams) + "-grams"):
if doc is None:
logWarning("Found None in docs", logger, verbose=verbose)
elif len(doc) == 0:
logWarning("Found an empty doc", logger, verbose=verbose)
elif not isinstance(doc[0], str):
raise Exception("Found a word that is not a str")
else:
if len(doc) >= ngrams:
alreadySeenInThisDoc = set()
for i in range(len(doc) - ngrams + 1):
if useTuple:
ngram = tuple(doc[i:i + ngrams])
else:
ngram = " ".join(doc[i:i + ngrams])
if doLower:
if useTuple:
ngram = tuple([e.lower() for e in ngram])
else:
ngram = ngram.lower()
if ngram not in vocDF:
vocDF[ngram] = 1
elif ngram not in alreadySeenInThisDoc:
vocDF[ngram] += 1
alreadySeenInThisDoc.add(ngram)
ngramsToDelete = set()
for ngram, count in vocDF.items():
if count < minDF:
ngramsToDelete.add(ngram)
for ngram in ngramsToDelete:
del vocDF[ngram]
if returnDF:
return vocDF
else:
return set(vocDF.keys())
def generateTFIDF(docs, minDF=1, sublinearTF=True, ngramRange=(1, 1), doLower=False, logger=None, verbose=True):
tt = TicToc(logger=logger, verbose=verbose)
tt.tic()
if doLower:
docs = copy.deepcopy(docs)
tt.tic("Docs copied")
warnFreeRAM(logger=logger, verbose=verbose)
for doc in docs:
for i in range(len(doc)):
doc[i] = doc[i].lower()
tt.tic("Words lowered")
tfidf = TfidfVectorizer\
(
analyzer='word',
tokenizer=lambda x: x,
preprocessor=lambda x: x,
token_pattern=None,
# lowercase=True, # Doesn't work because we erased preprocessor
ngram_range=ngramRange,
sublinear_tf=sublinearTF,
min_df=minDF,
)
# We generate tfidf vectors:
log("Computing TFIDF...", logger, verbose=verbose)
tfidf.fit(docs)
tt.tic("TFIDF computed")
warnFreeRAM(logger=logger, verbose=verbose)
tfidfMatrix = tfidf.transform(docs)
tt.tic("TFIDF matrix generated")
warnFreeRAM(logger=logger, verbose=verbose)
# We log informations:
log("TFIDF data shape: " + str(tfidfMatrix.shape), logger, verbose=verbose)
log("TFIDF voc len: " + str(len(tfidf.vocabulary_)), logger, verbose=verbose)
tt.toc("TFIDF computation done")
return (tfidf, tfidfMatrix)
def flattenedIndexes(sentences, ngrams=1, doLower=False, returnFlattenedDoc=False, logger=None, verbose=True):
"""
This function convert a document composed of sentences in a list of indexes.
For example:
>>> (ngramIndexes, grams) = flattenedIndexes([['a', 'b'], ['c'], ['d']], ngrams=2, returnFlattenedDoc=True)
>>> ngramIndexes
[{0}, {0, 1}, {1, 2}]
>>> grams
['a b', 'b c', 'c d']
The 2-grams 'a b' is only in the first sentence but the 2-grams 'b c' is in the first sentence and the second sentence.
"""
if not doLower:
logWarning("You set doLower as False", logger=logger, verbose=verbose)
if sentences is None or len(sentences) == 0:
if returnFlattenedDoc:
return ([], [])
else:
return []
assert isinstance(sentences[0], list) and isinstance(sentences[0][0], str)
# First we get sentence indexes of each word:
indexes = []
currentIndex = 0
for sentence in sentences:
for word in sentence:
indexes.append(currentIndex)
currentIndex += 1
# Then we get ngrams:
grams = toNgrams\
(
[sentences],
ngrams=ngrams,
flattenSentences=True,
doLower=doLower,
logger=logger,
verbose=verbose,
)[0]
# And we calculate indexes:
ngramIndexes = []
gramIndex = 0
for gram in grams:
ngramIndexes.append(set())
for i in range(ngrams):
try:
ngramIndexes[gramIndex].add(indexes[gramIndex + i])
except Exception as e:
logException(e, logger=logger, verbose=verbose)
gramIndex += 1
if returnFlattenedDoc:
return (ngramIndexes, grams)
else:
return ngramIndexes
class TFIDF:
def __init__\
(
self,
docs,
doLower=True,
sublinearTF=True,
ngramRange=(1, 1),
minDF=1,
cumhistoIntervalsSize=1000,
logger=None,
verbose=True,
):
"""
This class is a wrapper of `sklearn.feature_extraction.text.TfidfVectorizer`. It takes documents and generates TFIDF vectors of a given ngrams range. It handle either already tokenized docs for words or already tokenized docs for sentences and words. You can automatically access useful data such as specific TFIDF values using `TFIDFValue(docId, ngram)`, filter sentences that have a high max TFIDF value given a deletion ratio using `removeSentences(deletionRatio)` and so on.
"""
# All vars:
self.logger = logger
self.verbose = verbose
self.minDF = minDF
self.ngramRange = ngramRange
self.sublinearTF = sublinearTF
self.doLower = doLower
self.cumhistoIntervalsSize = cumhistoIntervalsSize
# Computed vars:
self.tops = None
self.voc = None
self.vocIndexes = None
self.tfidf = None
self.tfidfMatrix = None
self.tfidfVectors = None
self.tfidfMap = None
self.maxTFIDFs = None
self.cumhisto = None
self.cumhistoIntervals = None
# We keep sentences in memory:
if isinstance(docs[0][0], list):
self.sentencesCorpus = True
else:
self.sentencesCorpus = False
if self.sentencesCorpus:
self.docsSentences = docs
else:
self.docsSentences = None
# We handle docs:
assert docs is not None
assert len(docs) > 1
if isinstance(docs[0][0], list):
logWarning("You provided a list of sentences, we flatten all docs.", self)
# docs = flattenLists(docs)
docs = [flattenLists(doc) for doc in docs]
assert isinstance(docs[0][0], str)
for doc in docs:
assert len(doc) > 0
assert isinstance(doc[0], str)
self.docs = docs
# We generate TFIDF:
self.__generate()
def __generate(self):
# We get tfidf:
(self.tfidf, self.tfidfMatrix) = generateTFIDF\
(
self.docs,
minDF=self.minDF,
sublinearTF=self.sublinearTF,
ngramRange=self.ngramRange,
doLower=self.doLower,
logger=self.logger,
verbose=self.verbose
)
self.vocIndexes = self.tfidf.vocabulary_
self.getVoc()
def getTFIDFMatrix(self):
"""
Return the matrix docs vocabulary with all TFIDF values, a scipy.sparse.csr.csr_matrix of shape (docs count, vocabulary size)
"""
return self.tfidfMatrix
def getTFIDFVectors(self, ngrams=1):
"""
Return docs with TFIDF values instead of tokens
"""
if ngrams != 1:
raise Exception("ngrams > 1 not yet implemented")
if self.tfidfVectors is None:
tfidfScores = []
pbar = ProgressBar(len(self.docs), verbose=self.verbose and (len(self.docs) > 1000), logger=self.logger, message="Building TFIDF tokens")
for docId in range(len(self.docs)):
# https://stackoverflow.com/questions/34449127/sklearn-tfidf-transformer-how-to-get-tf-idf-values-of-given-words-in-documen
feature_index = self.tfidfMatrix[docId,:].nonzero()[1]
currentScores = np.array([self.tfidfMatrix[docId, x] for x in feature_index])
aaa = dict()
for i in range(len(feature_index)):
aaa[feature_index[i]] = currentScores[i]
tokensTFIDF = []
for word in self.docs[docId]:
if self.doLower:
word = word.lower()
tokensTFIDF.append(aaa[self.vocIndexes[word]])
tfidfScores.append(tokensTFIDF)
pbar.tic()
self.tfidfVectors = tfidfScores
return self.tfidfVectors
def getVoc(self):
"""
Return the list of ngrams
"""
if self.voc is None:
self.voc = [None] * len(self.vocIndexes)
for word, index in self.vocIndexes.items():
self.voc[index] = word
return self.voc
def getVocIndexes(self):
"""
Return a mapping voc -> index
"""
return self.vocIndexes
def getTFIDFMap(self):
"""
Return a list docId -> (dict of ngram -> tfidf value)
"""
if self.tfidfMap is None:
self.tfidfMap = []
for i in range(self.tfidfMatrix.shape[0]):
self.tfidfMap.append(dict())
cx = scipy.sparse.coo_matrix(self.tfidfMatrix)
pbar = ProgressBar(self.tfidfMatrix.shape[0], logger=self.logger, verbose=self.verbose, message="Collecting TFIDF values")
alreadySeenDocs = set()
for docId, vocId, tfidfValue in zip(cx.row, cx.col, cx.data):
ngram = self.voc[vocId]
ngrams = ngram.count(" ") + 1
self.tfidfMap[docId][ngram] = tfidfValue
if docId not in alreadySeenDocs:
pbar.tic()
alreadySeenDocs.add(docId)
return self.tfidfMap
def getTFIDFValue(self, docId, ngram):
"""
Return the TFIDF value of a ngram in a specific doc
"""
valuesDict = self.getTFIDFMap()[docId]
if ngram not in valuesDict:
logError('"' + ngram + '"' + " not in doc " + str(docId), self)
return 0.0
else:
return valuesDict[ngram]
def getTops(self):
"""
This method takes tfidfMatrix a sparse matric (from `generateTFIDF`).
It takes voc a list of ngrams corresponding to tfidfMatrix columns.
It return top ngrams (according to there tfidf values) for each doc looking:
[
{
1: [ sunye, bosu, ..., jan., ryan ],
2: [ sarah bean, master jay, ..., and former, added . ],
<ngrams>: [ <word>, <word>, ..., <word>, <word> ]
},
<doc>,
...,
{
1: [ hu, candid, ..., of, is ],
2: [ private talks, with hu, ..., to a, in a ],
3: [ worshipped at a, with some olympic, ..., , he said, as well as ]
}
]
"""
if self.tops is None:
self.getVoc()
self.tops = []
for i in range(self.tfidfMatrix.shape[0]):
grams = {1: [], 2: [], 3: []}
self.tops.append(grams)
cx = scipy.sparse.coo_matrix(self.tfidfMatrix)
pbar = ProgressBar(self.tfidfMatrix.shape[0], logger=self.logger, verbose=self.verbose, message="Collecting TFIDF values")
alreadySeenDocs = set()
for docId, vocId, tfidfValue in zip(cx.row, cx.col, cx.data):
ngram = self.voc[vocId]
ngrams = ngram.count(" ") + 1
self.tops[docId][ngrams].append((ngram, tfidfValue))
if docId not in alreadySeenDocs:
pbar.tic()
alreadySeenDocs.add(docId)
for i in pb(list(range(len(self.tops))), logger=self.logger, verbose=self.verbose, message="Sorting ngrams by TFIDF values"):
for u in self.tops[i].keys():
self.tops[i][u] = [e[0] for e in sorted(self.tops[i][u], key=lambda x: x[1], reverse=True)]
return self.tops
def getMaxTFIDFsPerSentence(self):
"""
To use this function, you must give a corpus of docs composed of sentences at the init step.
This function return a structure looking:
[
<doc 0>,
{
<ngrams>: [<max tfidf value of sentence 0>, <max tfidf value of sentence 1>, <...>],
2: [0.2, 0.1],
<...>,
},
<...>,
]
"""
assert self.sentencesCorpus
if self.maxTFIDFs is None:
self.getTFIDFMap()
self.maxTFIDFs = []
maxNgrams = self.ngramRange[1]
docId = 0
for doc in pb(self.docsSentences, logger=self.logger, verbose=self.verbose, message="Collecting max TFIDF value per sentence"):
perNgrams = dict()
for ngrams in range(1, maxNgrams + 1):
(sentenceIndexes, flattenedSentences) = flattenedIndexes(doc, doLower=self.doLower, ngrams=ngrams, returnFlattenedDoc=True)
allMax = [-1] * len(doc)
for i in range(len(flattenedSentences)):
sentenceHit = sentenceIndexes[i]
ngram = flattenedSentences[i]
for hit in sentenceHit:
value = self.getTFIDFValue(docId, ngram)
if value > allMax[hit]:
allMax[hit] = value
perNgrams[ngrams] = allMax
self.maxTFIDFs.append(perNgrams)
docId += 1
return self.maxTFIDFs
def getCumhistoIntervals(self):
if self.cumhistoIntervals is None:
self.getCumhisto()
return self.cumhistoIntervals
def getCumhisto(self):
"""
This method return the cumulative histogram of tfidf values.
Example of structure:
{
<ngrams>: [<count of sentences so that the max TFIDF is higher than this value in self.cumhistoIntervals>, <...>]
'2': [ 39600, 39600, 35000, ..., 84, 2, 2, 0, 0, 0, 0, 0, 0, 0 ],
}
"""
if self.cumhisto is None:
tt = TicToc(logger=self.logger, verbose=self.verbose)
tt.tic()
maxTFIDFs = self.getMaxTFIDFsPerSentence()
maxNgrams = len(maxTFIDFs[0])
intervalsSize = self.cumhistoIntervalsSize
# We calculate intervals:
minis, maxis = dict(), dict()
for ngrams in range(1, maxNgrams + 1):
if ngrams not in minis:
minis[ngrams] = None
if ngrams not in maxis:
maxis[ngrams] = None
for doc in maxTFIDFs:
currentMin = min(doc[ngrams])
if minis[ngrams] is None or currentMin < minis[ngrams]:
minis[ngrams] = currentMin
currentMax = max(doc[ngrams])
if maxis[ngrams] is None or currentMax > maxis[ngrams]:
maxis[ngrams] = currentMax
tt.tic("We got min and max TFIDF values")
intervals = dict()
for ngrams in range(1, maxNgrams + 1):
mini = minis[ngrams]
maxi = maxis[ngrams]
epsilon = 0.01 * (maxi - mini)
mini = mini - epsilon
maxi = maxi + epsilon
jump = (maxi - mini) / intervalsSize
intervals[ngrams] = list(np.arange(mini, maxi, jump))
# We make cumulative histograms:
cumhisto = dict()
for ngrams in range(1, maxNgrams + 1):
currentIntervals = intervals[ngrams]
if ngrams not in cumhisto:
cumhisto[ngrams] = [0] * len(currentIntervals)
for currentMaxTFIDFs in maxTFIDFs:
currentMaxTFIDFs = currentMaxTFIDFs[ngrams]
for value in currentMaxTFIDFs:
for i in range(len(currentIntervals)):
if value > currentIntervals[i]:
cumhisto[ngrams][i] += 1
tt.tic("We calculated the cumulative histogram of tfidf values")
self.cumhisto = cumhisto
self.cumhistoIntervals = intervals
return self.cumhisto
def getBlackNgrams(self, deletionRatio, *args, **kwargs):
"""
Return a black list of ngrams for each document
The black list is calculate according a ratio of deletion of all sentences in the cirpus
Each ngram in the black list is an indicator when chossing to delete or not a sentence in the corpus
The structure looks like:
[
<list of ngrams for doc 1>,
[<ngram 1>, <ngram 2>, ...],
...
]
"""
maxTFIDFs = self.getMaxTFIDFsPerSentence()
cumhisto = self.getCumhisto()
tfidfThresholds = getOptimalTFIDFThresholds\
(
maxTFIDFs, cumhisto, deletionRatio, self.getCumhistoIntervals(),
*args, logger=self.logger, verbose=self.verbose, **kwargs
)
blackNgrams = []
maxNgrams = len(maxTFIDFs[0])
for docId in pb(list(range(len(maxTFIDFs))),
logger=self.logger, verbose=self.verbose,
message="Collecting ngrams TFIDF black list for threshold " + str(tfidfThresholds)):
blackNgrams.append(set())
voc = self.getTFIDFMap()[docId]
for ngram in voc:
ngrams = ngram.count(" ") + 1
theshold = tfidfThresholds[ngrams]
currentTFIDF = self.getTFIDFValue(docId, ngram)
if currentTFIDF >= theshold:
blackNgrams[docId].add(ngram)
return blackNgrams
def removeSentences(self, deletionRatio, *args, **kwargs):
assert self.sentencesCorpus
maxTFIDFs = self.getMaxTFIDFsPerSentence()
cumhisto = self.getCumhisto()
intervals = self.getCumhistoIntervals()
tfidfThresholds = getOptimalTFIDFThresholds(maxTFIDFs, cumhisto, deletionRatio, intervals,
*args, logger=self.logger, verbose=self.verbose, **kwargs)
newDocs = []
maxNgrams = len(maxTFIDFs[0])
for docId in range(len(maxTFIDFs)):
newsSentences = []
for sentenceId in range(len(maxTFIDFs[docId][list(maxTFIDFs[docId].keys())[0]])):
foundHigher = False
for ngrams in range(1, maxNgrams + 1):
if maxTFIDFs[docId][ngrams][sentenceId] > tfidfThresholds[ngrams]:
foundHigher = True
break
if not foundHigher:
newsSentences.append(self.docsSentences[docId][sentenceId])
newDocs.append(newsSentences)
return newDocs
def estimateDeletion(maxTFIDFs, cumhisto, deletionRatio, intervals, logger=None, verbose=True):
"""
This function calculate how final deletion ratio which will be higher in case we handle multiple ngrams...
"""
tfidfThresholds = dict()
maxNgrams = len(maxTFIDFs[0])
for ngrams in range(1, maxNgrams + 1):
countThreshold = deletionRatio * cumhisto[ngrams][0]
for i in range(len(cumhisto[ngrams])):
if cumhisto[ngrams][i] < countThreshold:
break
tfidfThresholds[ngrams] = intervals[ngrams][i]
sentencesToRemove = []
for docId in range(len(maxTFIDFs)):
currentSentencesToRemove = set()
currentMaxTFIDFs = maxTFIDFs[docId]
for ngrams in range(1, maxNgrams + 1):
for sentenceId in range(len(currentMaxTFIDFs[ngrams])):
if currentMaxTFIDFs[ngrams][sentenceId] >= tfidfThresholds[ngrams]:
currentSentencesToRemove.add(sentenceId)
sentencesToRemove.append(currentSentencesToRemove)
deletedCount = 0
totalCount = 0
for docId in range(len(maxTFIDFs)):
currentSentencesToRemove = sentencesToRemove[docId]
newDoc = []
for sentenceId in range(len(maxTFIDFs[docId][list(maxTFIDFs[docId].keys())[0]])):
if sentenceId in currentSentencesToRemove:
deletedCount += 1
totalCount += 1
# log("We delete " + str(int(deletedCount / totalCount * 100)) + "% of sentences", logger=logger, verbose=verbose)
# print(tfidfThresholds)
return deletedCount / totalCount
def estimateOptimalDeletionRatio(maxTFIDFs, cumhisto, targetDeletionRatio, intervals, *args,
minimumDichotomicMove=0.000001,
logger=None, verbose=True, **kwargs):
deletionRatio = targetDeletionRatio
move = targetDeletionRatio / 2
while move > minimumDichotomicMove:
computedDeletionRatio = estimateDeletion(maxTFIDFs, cumhisto, deletionRatio, intervals,
*args, logger=logger, verbose=verbose, **kwargs)
if computedDeletionRatio < targetDeletionRatio:
deletionRatio = deletionRatio + move
else:
deletionRatio = deletionRatio - move
move = move / 2
return deletionRatio
def getOptimalTFIDFThresholds(maxTFIDFs, cumhisto, targetDeletionRatio, intervals,
*args, logger=None, verbose=True, **kwargs):
optimalDeletionRatio = estimateOptimalDeletionRatio(maxTFIDFs, cumhisto, targetDeletionRatio, intervals, *args, logger=logger, verbose=verbose, **kwargs)
tfidfThresholds = dict()
maxNgrams = len(maxTFIDFs[0])
for ngrams in range(1, maxNgrams + 1):
countThreshold = optimalDeletionRatio * cumhisto[ngrams][0]
for i in range(len(cumhisto[ngrams])):
if cumhisto[ngrams][i] < countThreshold:
break
tfidfThresholds[ngrams] = intervals[ngrams][i]
return tfidfThresholds
def filterDocuments(*args, **kwargs):
return filterCorpus(*args, **kwargs)
def filterDocs(*args, **kwargs):
return filterCorpus(*args, **kwargs)
def filterCorpus\
(
docs, minDF=None, maxDF=None,
removeEmptySentences=True, removeEmptyDocs=False,
allowEmptyDocs=True,
emptyDocMessage="A document doesn't have words anymore after filtering.",
logger=None, verbose=True, debug=False,
):
"""
Args:
docs: The corpus to filter, can be a list of list of words or a list of list of sentences (which is a list of words).
minDF (int or float): Can be a ratio on documents count or a minimum document frequency.
maxDF (int or float): Use this arg instead of stop words. Can be a ratio on documents count or a top n most frequent words in terms of document frequency to remove.
"""
# We check the content:
assert docs is not None
assert docs[0] is not None
assert len(docs[0]) > 0
# We find if docs are a list of list of words or a list of list of sentences (which is a list of words):
isSentences = isinstance(docs[0][0], list)
# We calculate doc frequency for all terms:
tdf = dict()
for doc in docs:
if isSentences:
doc = set(flattenLists(doc))
else:
doc = set(doc)
for word in doc:
if word not in tdf:
tdf[word] = 0
tdf[word] += 1
if debug:
bp(sortBy(tdf, index=1, desc=True), logger, 5)
# We find blackMinDF:
docsCount = len(docs)
blackMinDF = set()
if minDF is not None:
if isinstance(minDF, int) and minDF >= 1:
for word, df in tdf.items():
if df < minDF:
blackMinDF.add(word)
elif isinstance(minDF, float) and minDF >= 0.0 and minDF <= 1.0:
for word, df in tdf.items():
r = df / docsCount
if r < minDF:
blackMinDF.add(word)
else:
raise Exception('minDF must be an integer >= 1 or a ratio >= 0.0 and <= 1.0')
if debug:
log("blackMinDF: " + b(blackMinDF, 5), logger, verbose=verbose)
if len(blackMinDF) > 0:
log("Voc removed because of minDF (" + str(len(blackMinDF)) + " elements):\n" + b(blackMinDF, 4), logger, verbose=verbose)
# We find blackMaxDF:
blackMaxDF = set()
if maxDF is not None:
if isinstance(maxDF, int) and maxDF >= 1:
blackMaxDF = set([e[0] for e in sortBy(tdf, index=1, desc=True)[:maxDF]])
elif isinstance(maxDF, float) and maxDF > 0.0 and maxDF <= 1.0:
for word, df in tdf.items():
r = df / docsCount
if r > maxDF:
blackMaxDF.add(word)
else:
raise Exception('maxDF must be an integer >= 1 or a ratio > 0.0 and <= 1.0')
if debug:
log("blackMaxDF: " + b(blackMaxDF, 5), logger, verbose=verbose)
if len(blackMaxDF) > 0:
log("Voc removed because of maxDF (" + str(len(blackMaxDF)) + " elements):\n" + b(blackMaxDF, 4), logger, verbose=verbose)
# We remove words:
blackWords = blackMinDF.union(blackMaxDF)
log(str(truncateFloat(len(blackWords) / len(tdf) * 100, 2)) + "% of voc will be removed.", logger, verbose=verbose)
if isSentences:
newDocs = []
for doc in docs:
newDoc = []
for sentence in doc:
newSentence = []
for word in sentence:
if word not in blackWords:
newSentence.append(word)
if not (removeEmptySentences and len(newSentence) == 0):
newDoc.append(newSentence)
if len(newDoc) == 0:
if not allowEmptyDocs:
raise Exception(emptyDocMessage)
else:
logWarning(emptyDocMessage, logger, verbose=verbose)
if not (removeEmptyDocs and len(newDoc) == 0):
newDocs.append(newDoc)
docs = newDocs
else:
newDocs = []
for doc in docs:
newDoc = []
for word in doc:
if word not in blackWords:
newDoc.append(word)
if len(newDoc) == 0:
if not allowEmptyDocs:
raise Exception(emptyDocMessage)
else:
logWarning(emptyDocMessage, logger, verbose=verbose)
if not (removeEmptyDocs and len(newDoc) == 0):
newDocs.append(newDoc)
docs = newDocs
# Finally we return the filtered corpus:
return docs
def filterCorpusTest1():
docs = fileToStrList(getExecDir(__file__) + "/test/testdata/filter-corpus/corpus1.txt")
for i in range(len(docs)):
docs[i] = docs[i].split()
bp(docs, 5)
docs = filterCorpus(docs, minDF=0.35, maxDF=None, debug=True)
bp(docs, 5)
def filterCorpusTest2():
docs = fileToStrList(getExecDir(__file__) + "/test/testdata/filter-corpus/corpus2.txt")
for i in range(len(docs)):
docs[i] = docs[i].split(".")
for u in range(len(docs[i])):
docs[i][u] = docs[i][u].split()
bp(docs, 5)
docs = filterCorpus(docs, minDF=3, maxDF=10, debug=True, allowEmptyDocs=False)
bp(docs, 5)
if __name__ == '__main__':
filterCorpusTest1() | StarcoderdataPython |
8095824 | <filename>train.py
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from glob import glob
import cv2
import numpy as np
import tensorflow as tf
import cfgs
from cfgs import logger
from model.model import generator, discriminator
from model.ops import composite_image, gen_random_mask_1ch
from tqdm import tqdm
def main():
'''
Main function
'''
with tf.variable_scope(name_or_scope='cp_gan') as scope:
# ___________________________Preparation Work______________________________________
foreground_plh = tf.placeholder(dtype=tf.float32,
shape=[cfgs.IMG_HEIGHT, cfgs.IMG_WIDTH, cfgs.CHANNEL])
background_plh = tf.placeholder(dtype=tf.float32,
shape=[cfgs.IMG_HEIGHT, cfgs.IMG_WIDTH, cfgs.CHANNEL])
shuffled_foreground_plh = tf.placeholder(dtype=tf.float32,
shape=[cfgs.IMG_HEIGHT, cfgs.IMG_WIDTH, cfgs.CHANNEL])
# ___________________________Image Preprocessing____________________________________
foreground = random_process_image(foreground_plh)
background = random_process_image(background_plh)
shuffled_foreground = random_process_image(shuffled_foreground_plh)
foreground = tf.expand_dims(foreground, axis=0)
background = tf.expand_dims(background, axis=0)
shuffled_foreground = tf.expand_dims(shuffled_foreground, axis=0)
# ___________________________Create the graph, etc__________________________________
g_mask_foreground = generator(inputs=foreground, name='generator')
score_foreground, d_mask_foreground = discriminator(inputs=foreground, name='discriminator')
composited_image = composite_image(foreground, background, g_mask_foreground)
anti_shortcut_image = composite_image(foreground=shuffled_foreground,
background=background,
mask=g_mask_foreground)
random_mask = tf.py_function(func=gen_random_mask_1ch, inp=[foreground.shape, 0.2], Tout=[tf.float32])
random_mask = tf.stack(values=[random_mask, random_mask, random_mask], axis=-1)
random_mask.set_shape(foreground.shape)
grounded_fake_image = composite_image(foreground=foreground,
background=background,
mask=random_mask)
scope.reuse_variables()
score_composite, d_mask_composite = discriminator(inputs=composited_image, name='discriminator', reuse=True)
scope.reuse_variables()
score_anti_shortcut, d_mask_anti_shortcut = discriminator(inputs=anti_shortcut_image, name='discriminator',
reuse=True)
scope.reuse_variables()
score_grounded_fake, d_mask_grounded_fake = discriminator(inputs=grounded_fake_image, name='discriminator',
reuse=True)
# _____________________________Define Losses_________________________________
# Score losses
# d_real = cross_entropy_loss(logits=score_foreground, labels=1.)
# d_fake = cross_entropy_loss(logits=score_composite, labels=0.)
# g_fake = cross_entropy_loss(logits=score_composite, labels=1.)
# g_anti_shortcut = cross_entropy_loss(logits=score_anti_shortcut, labels=0.)
# d_grounded_fake = cross_entropy_loss(logits=score_grounded_fake, labels=0.)
# Mask losses
# d_mask_real = mask_loss(model_mask=d_mask_foreground, mask=0.)
# d_mask_fake = mask_loss(model_mask=d_mask_composite, mask=g_mask_foreground)
# d_mask_anti_shortcut_loss = mask_loss(model_mask=d_mask_anti_shortcut, mask=g_mask_foreground)
# d_mask_grounded_fake_loss = mask_loss(model_mask=d_mask_grounded_fake, mask=random_mask)
# =====================================================
# WGAN Loss
d_real = -tf.reduce_mean(score_foreground)
d_fake = tf.reduce_mean(score_composite)
g_fake = -tf.reduce_mean(score_composite)
g_anti_shortcut = tf.reduce_mean(score_anti_shortcut)
d_grounded_fake = tf.reduce_mean(score_grounded_fake)
d_mask_real = tf.minimum(tf.reduce_mean(tf.squared_difference(d_mask_foreground, 0)),
tf.reduce_mean(tf.squared_difference(d_mask_foreground, 1)))
d_mask_fake = tf.minimum(tf.reduce_mean(tf.squared_difference(d_mask_composite, g_mask_foreground)),
tf.reduce_mean(tf.squared_difference(d_mask_composite, (1 - g_mask_foreground))))
d_mask_anti_shortcut_loss = tf.minimum(
tf.reduce_mean(tf.squared_difference(d_mask_anti_shortcut, g_mask_foreground)),
tf.reduce_mean(tf.squared_difference(d_mask_anti_shortcut, (1 - g_mask_foreground))))
d_mask_grounded_fake_loss = \
tf.minimum(tf.reduce_mean(tf.squared_difference(d_mask_grounded_fake, random_mask)),
tf.reduce_mean(tf.squared_difference(d_mask_grounded_fake, (1 - random_mask))))
# Aux loss
L_aux = d_mask_real + d_mask_fake + d_mask_anti_shortcut_loss + d_mask_grounded_fake_loss
L_g = g_fake + g_anti_shortcut
L_d = d_real + d_fake + d_grounded_fake + 0.1 * L_aux
# L_AUX_LOSS_1 = -tf.reduce_mean(tf.squared_difference(g_fake + g_anti_shortcut, 0))
# L_AUX_LOSS_2 = -tf.reduce_mean(tf.squared_difference(g_fake - d_real, 0))
g_optimizer = tf.train.RMSPropOptimizer(learning_rate=1e-4)
d_optimizer = tf.train.RMSPropOptimizer(learning_rate=1e-4)
# aux_optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
vars = tf.all_variables()
for var in vars:
print(var)
g_vars = [v for v in vars if 'generator' in v.name]
d_vars = [v for v in vars if 'discriminator' in v.name]
# ===========================WGAN clip D params===============================
clip_ops = []
for var in d_vars:
clip_bounds = [-.01, .01]
clip_ops.append(
tf.assign(
var,
tf.clip_by_value(var, clip_bounds[0], clip_bounds[1])
)
)
clip_disc_weights = tf.group(*clip_ops)
# ============================================================================
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
g_train_op = g_optimizer.minimize(L_g, var_list=g_vars)
d_train_op = d_optimizer.minimize(L_d, var_list=d_vars)
# aux_train_op = aux_optimizer.minimize(L_AUX_LOSS_1 + L_AUX_LOSS_2, var_list=g_vars)
# ________________________________Other Configurations___________________________________________
init_op = tf.initialize_all_variables()
saver = tf.train.Saver()
# _____________________Create a session for running operations in the Graph._____________________
with tf.Session() as sess:
# Initialize the variables (like the epoch counter).
sess.run(init_op)
# Start input enqueue threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for step in range(cfgs.MAX_ITERATION):
d_epoch_loss = 0
g_epoch_loss = 0
image_dict = dict()
for i in range(max(len(background_images), len(foreground_images))):
p_idx = np.random.randint(low=0, high=len(foreground_images))
s_idx = np.random.randint(low=0, high=len(background_images))
sh_idx = np.random.randint(low=0, high=len(foreground_images))
while sh_idx == p_idx:
sh_idx = np.random.randint(low=0, high=len(foreground_images))
fore = cv2.imread(foreground_images[p_idx]) / 127.5 - 1
back = cv2.imread(background_images[s_idx]) / 127.5 - 1
sh_fore = cv2.imread(foreground_images[sh_idx]) / 127.5 - 1
_, _, d_loss, g_loss, \
image_dict['foreground_img'], image_dict['background_img'], \
image_dict['shuffled_foreground_img'], image_dict['g_mask_foreground_img'], \
image_dict['d_mask_foreground_img'], image_dict['composited_img'], \
image_dict['anti_shortcut_img'], image_dict['random_mask_img'], \
image_dict['grounded_fake_img'], image_dict['d_mask_composite_img'], \
image_dict['d_mask_anti_shortcut_img'], image_dict['d_mask_grounded_fake_img'] = \
sess.run([d_train_op,
g_train_op,
L_d,
L_g,
foreground,
background,
shuffled_foreground,
g_mask_foreground,
d_mask_foreground,
composited_image,
anti_shortcut_image,
random_mask,
grounded_fake_image,
d_mask_composite,
d_mask_anti_shortcut,
d_mask_grounded_fake,
],
feed_dict={
foreground_plh: fore,
background_plh: back,
shuffled_foreground_plh: sh_fore
})
d_epoch_loss += d_loss
g_epoch_loss += g_loss
sess.run(clip_disc_weights)
logger.info('Epoch {} Generator Loss: {}'.format(step, g_epoch_loss))
logger.info('Epoch {} Discriminator Loss: {}'.format(step, d_epoch_loss))
logger.info('\n')
save_images(image_dict, path=cfgs.RESULT_PATH, epoch=step)
if step % (cfgs.MAX_ITERATION // 20) == 0:
saver.save(sess=sess, save_path=os.path.join(cfgs.CKPT_PATH, 'CPGAN.ckpt'), global_step=step)
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
def save_images(image_dict, path, epoch):
'''
Save images
:param image_dict: image names and values
:param path: save path
:param epoch: epoch
:return: None
'''
logger.info('Saving {} images...'.format(len(image_dict)))
for img in tqdm(image_dict):
save_path = os.path.join(path, img + '_' + str(epoch) + '.jpg')
_save_image(image_dict[img], path=save_path)
def _save_image(image, path):
'''
Save one image
:param image: image value
:param path: save path
:return: None
'''
image_shape = image.shape
if len(image_shape) == 4:
# Save mask
if 'mask' in path:
a = 0
b = 255.
else:
a = 1
b = 127.5
image = np.reshape((image + a) * b,
newshape=[image_shape[-3], image_shape[-2], image_shape[-1]]).astype(np.uint8)
cv2.imwrite(filename=path, img=image)
def random_process_image(image):
'''
Data augmentation
:param image: image
:return:
'''
image = tf.image.random_crop(image, size=[224, 224, cfgs.CHANNEL])
image = tf.image.random_flip_left_right(image)
# Some Image processing policies may not work.
# image = tf.image.random_brightness(image, max_delta=0.1)
# image = tf.image.random_contrast(image, lower=0, upper=0.1)
# image = tf.image.random_hue(image, max_delta=0.1)
# image = tf.image.random_saturation/(image, lower=0, upper=0.1)
return image
if __name__ == '__main__':
foreground_images = glob(os.path.join(cfgs.IMAGE_FOLDER, 'plane', '*.jpg'))
background_images = glob(os.path.join(cfgs.IMAGE_FOLDER, 'sky', '*.jpg'))
main()
| StarcoderdataPython |
308617 | <filename>stores/aliexpress.py
import json
import re
from json.decoder import JSONDecodeError
from common import get_session, message, find_str
conf = {'aep_usuc_f': 'isfm=y&site=esp&c_tp=USD&isb=y®ion=CL&b_locale=es_ES'}
pat = re.compile(r'^(https://[a-z]{2,3}\.aliexpress\.com/item/[0-9]+\.html)')
name = 'Aliexpress'
def parse(url):
clean_url = pat.findall(url)[0]
with get_session() as s:
data = s.get(clean_url, cookies=conf).text
try:
page_data = json.loads(find_str(data, 'data: ', ',\n'))
except JSONDecodeError:
return message(code='product_not_found')
if not page_data or 'priceModule' not in page_data:
return message(code='product_not_found')
prices = page_data['priceModule']
price_offer = price = prices['formatedPrice']
if 'formatedActivityPrice' in prices:
price_offer = prices['formatedActivityPrice']
return dict(
url=clean_url,
name=page_data['pageModule']['title'],
price=price,
price_sale=price_offer,
price_card=price_offer,
image=page_data['pageModule']['imagePath'],
raw=page_data
)
| StarcoderdataPython |
6519769 |
import unittest
from parameterized import parameterized as p
from solns.bubbleSort.bubbleSort import *
class UnitTest_BubbleSort(unittest.TestCase):
@p.expand([
[[4,5,1,8,10,11,0,7,9],[0,1,4,5,7,8,9,10,11]]
])
def test_naive(self,nums,expected):
self.assertEqual(Solution.naive(nums), expected)
| StarcoderdataPython |
11349471 | from picosystem import *
import math
# A simple raycaster demo, heavily inspired by https://lodev.org/cgtutor/raycasting.html
# Copy to your Pico along with "gadgetoid-raycaster.16bpp" for full textured output
textured = False
try:
# Read the 16-bits per pixel ARGB4444 texture into a buffer
# The texture map contains 32x32 pixel textures
WALLS = Buffer(160, 160)
open("gadgetoid-raycaster.16bpp", "rb").readinto(WALLS)
textured = True
except OSError:
# If texture lookup fails, fall back to untextured, solid colour walls
del WALLS
W = 120
H = 120
MAP_W = 16
MAP_H = 16
# Texture lookup
# These map a texture index (0-5) to a solid colour
TEXTURES = [
rgb(0, 0, 0),
rgb(15, 0, 0),
rgb(0, 15, 0),
rgb(0, 0, 15),
rgb(15, 15, 0),
rgb(15, 0, 15)
]
# Level data
# Each byte corresponds to a tile on the map,
# non-zero tiles are considered solid (walls)
WORLD = bytearray((
0x01, 0x02, 0x03, 0x04, 0x03, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x01, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x01, 0x01, 0x01, 0x01,
0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x01, 0x00, 0x01,
0x01, 0x04, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x01, 0x00, 0x01,
0x01, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
0x01, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x01, 0x00, 0x01,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x01, 0x00, 0x01, 0x00, 0x00, 0x01, 0x00, 0x01,
0x01, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x01, 0x00, 0x01, 0x01, 0x01, 0x01, 0x00, 0x01,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x01,
0x01, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01
))
# Player starting point
# Given as a float- the whole portion corresponds to a specific tile in the map
# The fractional portion is the player position within that tile.
player_x = 3.5
player_y = 3.5
# Player direction
# Given as a 2D vector: X and Y
# This is more efficient than storing an angle when it come to ray-casting.
player_dir_x = -1
player_dir_y = 0
# Player camera plane
# Encodes the player's field-of-view into a vector
plane_x = 0
plane_y = 0.40 # FOV == 2 * atan(plane_y/1.0)
# Player movement and rotation speed
move_speed = 1.0 / 40 * 10
rotate_speed = 1.0 / 40 * 3
# Pre-calculate the sine and cosine of the rotation speed
# this is used when we rotate the player's direction vector.
sinl_rotate_speed = math.sin(rotate_speed)
cosl_rotate_speed = math.cos(rotate_speed)
sinr_rotate_speed = math.sin(-rotate_speed)
cosr_rotate_speed = math.cos(-rotate_speed)
# Horizontal size of a ray's screen column, in pixels
# A ray size of two would make a single ray responsible for two adjacent columns of pixels
# The higher this number, the lower the horizontal resolution and the faster the algorithm.
raysize = 2
# Number of rays is equal to the screen width (in pixels) divided by the ray size (in pixels)
rays = int(W / raysize)
# Lookup table for the raycasting X-offset
ray_camera_x = [2 * x / rays - 1 for x in range(rays)]
# Function to get a specific tile from the map, with bounds checking
def get_tile_at(x, y):
x = int(x)
y = int(y)
if x < MAP_W and y < MAP_H:
return WORLD[y * MAP_W + x]
return 1 # Solid
# @micropython.native # Uncomment to emit native instructions and speed up code slightly
def update(tick):
global player_x, player_y, player_dir_x, player_dir_y, plane_x, plane_y
if button(UP): # Move forwards
new_x = player_x + player_dir_x * move_speed
new_y = player_y + player_dir_y * move_speed
# Only update the player's position if the tile being
# moved into is *empty*
if not get_tile_at(new_x, player_y):
player_x = new_x
if not get_tile_at(player_x, new_y):
player_y = new_y
if button(DOWN): # Move backwards
new_x = player_x - player_dir_x * move_speed
new_y = player_y - player_dir_y * move_speed
# Only update the player's position if the tile being
# moved into is *empty*
if not get_tile_at(new_x, player_y):
player_x = new_x
if not get_tile_at(player_x, new_y):
player_y = new_y
dir_x = player_dir_x
dir_y = player_dir_y
if button(LEFT): # Turn left
# Rotate the player's direction vector
player_dir_x = dir_x * cosl_rotate_speed - dir_y * sinl_rotate_speed
player_dir_y = dir_x * sinl_rotate_speed + dir_y * cosl_rotate_speed
# Rotate the camera plane
p_x = plane_x
plane_x = p_x * cosl_rotate_speed - plane_y * sinl_rotate_speed
plane_y = p_x * sinl_rotate_speed + plane_y * cosl_rotate_speed
if button(RIGHT): # Turn right
# Rotate the player's direction vector
player_dir_x = dir_x * cosr_rotate_speed - dir_y * sinr_rotate_speed
player_dir_y = dir_x * sinr_rotate_speed + dir_y * cosr_rotate_speed
# Rotate the camera plane
p_x = plane_x
plane_x = p_x * cosr_rotate_speed - plane_y * sinr_rotate_speed
plane_y = p_x * sinr_rotate_speed + plane_y * cosr_rotate_speed
# @micropython.native # Uncomment to emit native instructions and speed up code slightly
def draw(tick):
pen(0, 0, 6)
clear()
pen(0, 6, 0)
frect(0, 60, 120, 60)
# Step through each ray,
# these map to a vertical column on the screen from left to right.
for x in range(rays):
camera_x = ray_camera_x[x]
ray_dir_x = player_dir_x + plane_x * camera_x
ray_dir_y = player_dir_y + plane_y * camera_x
map_x = int(player_x)
map_y = int(player_y)
delta_dist_x = 1e30 if ray_dir_x == 0 else abs(1.0 / ray_dir_x)
delta_dist_y = 1e30 if ray_dir_y == 0 else abs(1.0 / ray_dir_y)
side_dist_x = 0
side_dist_y = 0
step_x = 0
step_y = 0
side = 0
texture = 0
# Based on the ray direction, figure out how far to
# step over the map grid in the X/Y axes each iteration
if ray_dir_x < 0:
step_x = -1
side_dist_x = (player_x - map_x) * delta_dist_x
else:
step_x = 1
side_dist_x = (map_x + 1.0 - player_x) * delta_dist_x
if ray_dir_y < 0:
step_y = -1
side_dist_y = (player_y - map_y) * delta_dist_y
else:
step_y = 1
side_dist_y = (map_y + 1.0 - player_y) * delta_dist_y
# Step the calculated X/Y distances until a wall is "hit"
# This is an implementation of the DDA algorithm.
# Excellent guide to how it works here: https://www.youtube.com/watch?v=NbSee-XM7WA
while True:
# Determine shortest axis and walk along it
if side_dist_x < side_dist_y:
side_dist_x += delta_dist_x
map_x += step_x
side = 0
else:
side_dist_y += delta_dist_y
map_y += step_y
side = 1
# Grab the texture at the current file
texture = get_tile_at(map_x, map_y)
# If it's nonzero, it's a wall
if texture > 0:
texture -= 1 # Subtract 1 to give us a zero-based index into the texture map
break
perpendicular_wall_distance = 0
wall_x = 0
# Figure out how far along the surface of the wall we hit
# by calculating the total travel distance of the ray
# along the axis opposite to our collission.
# If we hit the Y face of the wall we get the X distance
if side == 0:
perpendicular_wall_distance = side_dist_x - delta_dist_x
wall_x = player_y + perpendicular_wall_distance * ray_dir_y
else:
perpendicular_wall_distance = side_dist_y - delta_dist_y
wall_x = player_x + perpendicular_wall_distance * ray_dir_x
# We only want the fractional part of the distance above
# this gives us a scale for the X-coordinate into the wall texture
wall_x %= 1.0
wall_height = int(H / perpendicular_wall_distance)
screen_x = x * raysize
screen_y = int(-wall_height / 2 + H / 2)
if textured:
texture_offset = texture * 32
texture_x = int(wall_x * 32) + texture_offset
texture_y = 0
texture_src_w = 1
texture_src_h = 32
blit(WALLS, texture_x, texture_y, texture_src_w, texture_src_h, screen_x, screen_y, raysize, wall_height)
else:
pen(TEXTURES[texture])
frect(screen_x, screen_y, raysize, wall_height)
start()
| StarcoderdataPython |
8149662 | <gh_stars>0
# -*- coding: utf-8 -*-
# Grupo 11:
# 83597 <NAME>
# 84715 <NAME>
from search import Problem, Node, Graph, astar_search, breadth_first_tree_search, \
depth_first_tree_search, greedy_search
import sys
import copy
class RRState:
state_id = 0
def __init__(self, board):
self.board = board
self.id = RRState.state_id
RRState.state_id += 1
def __lt__(self, other):
""" Este método é utilizado em caso de empate na gestão da lista
de abertos nas procuras informadas. """
return self.id < other.id
def __eq__(self, other):
""" Verifica se dois estados são iguais, comparando as posições dos robots. """
return self.board.robot_positions == other.board.robot_positions
def __hash__(self):
return hash(self.board)
class Board:
""" Representacao interna de um tabuleiro de Ricochet Robots. """
def __init__(self, lines):
self.robot_positions = {}
self.walls = {}
self.dimension = int(lines[0][:-1])
# add robots
for i in range(1,5):
l_s = lines[i].split(' ')
self.robot_positions[l_s[0]] = (int(l_s[1]), int(l_s[2]))
# add target
l_s = lines[5].split(' ')
self.target = (l_s[0], int(l_s[1]), int(l_s[2]))
# add inside walls
for i in range(7, len(lines)):
l_s = lines[i].split(' ')
if l_s[0] == '\n' or l_s[1] == '\n': continue
position = (int(l_s[0]), int(l_s[1]))
wall = l_s[2][:-1]
self.checkWalls(position, wall)
# if there's a wall in one cell, there is on the one across the wall
if wall == 'u': self.checkWalls((position[0] - 1, position[1]), 'd')
elif wall == 'd': self.checkWalls((position[0] + 1, position[1]), 'u')
elif wall == 'l': self.checkWalls((position[0], position[1] - 1), 'r')
elif wall == 'r': self.checkWalls((position[0], position[1] + 1), 'l')
# add outside walls
for i in range(1, self.dimension + 1):
self.checkWalls((1, i), 'u')
self.checkWalls((self.dimension, i), 'd')
self.checkWalls((i, 1), 'l')
self.checkWalls((i, self.dimension), 'r')
def setRobotPosition(self, robot, new_position):
self.robot_positions[robot] = new_position
def getCellWall(self, position):
if position not in self.walls: return ''
return self.walls[position]
def robot_position(self, robot: str):
return self.robot_positions[robot]
# checks if cell is in "walls"; if so, appends; if not, creates
def checkWalls(self, position, value):
if position not in self.walls:
self.walls[position] = value
else:
self.walls[position] += value
def parse_instance(filename: str) -> Board:
""" Lê o ficheiro cujo caminho é passado como argumento e retorna
uma instância da classe Board. """
file = open(filename, 'r')
lines = file.readlines()
file.close()
return Board(lines)
class RicochetRobots(Problem):
def __init__(self, board: Board):
""" O construtor especifica o estado inicial. """
self.board = board
self.initial = RRState(board)
def actions(self, state: RRState):
""" Retorna uma lista de ações que podem ser executadas a
partir do estado passado como argumento. """
possible_actions = {'R':'udlr','G':'udlr','B':'udlr','Y':'udlr'}
robot_positions = state.board.robot_positions
for robot in robot_positions:
new_pos = ()
# check walls in the robot's current position, and remove them from the possible_actions
for w in state.board.getCellWall(robot_positions[robot]):
possible_actions[robot] = possible_actions[robot].replace(w, '')
for m in possible_actions[robot]:
# generate new positions
new_pos = calcMove(m, robot_positions[robot])
# if there's a robot in the new hypothetical position, we can't make that move
if new_pos in robot_positions.values():
possible_actions[robot] = possible_actions[robot].replace(m, '')
result = []
for r in possible_actions:
for m in possible_actions[r]:
result.append((r, m))
return result
def result(self, state: RRState, action):
""" Retorna o estado resultante de executar a 'action' sobre
'state' passado como argumento. A ação retornada deve ser uma
das presentes na lista obtida pela execução de
self.actions(state). action = (robot, move) """
state_copy = copy.deepcopy(state)
old_pos = state_copy.board.robot_position(action[0])
robot_positions = state_copy.board.robot_positions
while True:
# check if there's a wall in our way
if (old_pos in state_copy.board.walls) and \
(action[1] in state_copy.board.walls[old_pos]): break
new_pos = calcMove(action[1], old_pos)
if new_pos in robot_positions.values(): break
old_pos = new_pos
state_copy.board.setRobotPosition(action[0], old_pos)
return RRState(state_copy.board)
def goal_test(self, state: RRState):
""" Retorna True se e só se o estado passado como argumento é
um estado objetivo. Deve verificar se o alvo e o robô da
mesma cor ocupam a mesma célula no tabuleiro. """
target = state.board.target
position = state.board.robot_position(target[0])
return position == target[1:]
def h(self, node: Node):
""" Função heuristica utilizada para a procura A*. """
board = node.state.board
target = board.target[1:]
position = board.robot_positions[board.target[0]]
return abs(target[0] - position[0]) + abs(target[1] - position[1])
def calcMove(action, prev):
if action == 'u': return (prev[0] - 1, prev[1])
elif action == 'd': return (prev[0] + 1, prev[1])
elif action == 'l': return (prev[0], prev[1] - 1)
elif action == 'r': return (prev[0], prev[1] + 1)
if __name__ == "__main__":
rr = RicochetRobots(parse_instance(sys.argv[1]))
result_node = astar_search(rr).solution()
print(len(result_node), end = '')
for i in result_node:
print('\n%s %s' % (i[0], i[1]), end = '')
| StarcoderdataPython |
6429854 | <gh_stars>0
from mycroft import MycroftSkill, intent_file_handler, util
from .data import events
class Mathformula (MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
@intent_file_handler('formula.intent')
def handle_formula(self, message):
self.speak_dialog("formula")
self.log.info("asking about the formula of {}")
key = "what is the formula for" + term
def create_skill()
return Mathformula() | StarcoderdataPython |
3560668 | <reponame>Duckie-town-isu/tulip-control
# Copyright (c) 2013-2014 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""
Convenience functions for plotting
WARNING: The public functions dimension, newax, dom2vec, quiver will
eventually be removed. Their use in new applications is discouraged.
They come from https://github.com/johnyf/pyvectorized
"""
from __future__ import division
from __future__ import print_function
import logging
logger = logging.getLogger(__name__)
from warnings import warn
try:
from itertools import zip_longest as izip_longest
except ImportError:
from itertools import izip_longest
import numpy as np
try:
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d
except Exception as e:
plt = None
logger.error(e)
# from mayavi import mlab
def dimension(ndarray):
"""dimension of ndarray (DEPRECATED)
- ndim == 1:
dimension = 1
- ndim == 2:
dimension = shape[0]
"""
if ndarray.ndim < 2:
return ndarray.ndim
return ndarray.shape[0]
def newax(subplots=(1, 1), fig=None,
mode='list', dim=2):
"""Create (possibly multiple) new axes handles. (DEPRECATED)
@param fig: attach axes to this figure
@type fig: figure object,
should be consistent with C{dim}
@param subplots: number or layout of subplots
@type subplots: int or
2-tuple of subplot layout
@param mode: return the axes shaped as a
vector or as a matrix.
This is a convenience for later iterations
over the axes.
@type mode: 'matrix' | ['list']
@param dim: plot dimension:
- if dim == 2, then use matplotlib
- if dim == 3, then use mayavi
So the figure type depends on dim.
@return: C{(ax, fig)} where:
- C{ax}: axes created
- C{fig}: parent of ax
@rtype: list or list of lists,
depending on C{mode} above
"""
assert_pyplot()
# layout or number of axes ?
try:
subplot_layout = tuple(subplots)
except:
subplot_layout = (1, subplots)
# reasonable layout ?
if len(subplot_layout) != 2:
raise Exception('newax:'
'subplot layout should be 2-tuple or int.')
# which figure ?
if fig is None:
fig = plt.figure()
# create subplot(s)
(nv, nh) = subplot_layout
n = np.prod(subplot_layout)
try:
dim = tuple(dim)
except:
# all same dim
dim = [dim] * n
# matplotlib (2D) or mayavi (3D) ?
ax = []
for (i, curdim) in enumerate(dim):
if curdim == 2:
curax = fig.add_subplot(nv, nh, i + 1)
ax.append(curax)
else:
curax = fig.add_subplot(nv, nh, i + 1, projection='3d')
ax.append(curax)
if curdim > 3:
warn('ndim > 3, but plot limited to 3.')
if mode == 'matrix':
ax = list(_grouper(nh, ax))
# single axes ?
if subplot_layout == (1, 1):
ax = ax[0]
return (ax, fig)
def dom2vec(domain, resolution):
"""Matrix of column vectors for meshgrid points. (DEPRECATED)
Returns a matrix of column vectors for the meshgrid
point coordinates over a parallelepiped domain
with the given resolution.
Example
=======
>>> domain = [0, 1, 0,2]
>>> resolution = [4, 5]
>>> q = domain2vec(domain, resolution)
@param domain: extremal values of parallelepiped
@type domain: [xmin, xmax, ymin, ymax, ...]
@param resolution: # points /dimension
@type resolution: [nx, ny, ...]
@return: q = matrix of column vectors (meshgrid point coordinates)
@rtype: [#dim x #points]
See also vec2meshgrid, domain2meshgrid, meshgrid2vec.
"""
domain = _grouper(2, domain)
lambda_linspace = lambda dom, res: np.linspace(dom[0], dom[1], res)
axis_grids = map(lambda_linspace, domain, resolution)
pnt_coor = np.meshgrid(*axis_grids)
q = np.vstack(map(np.ravel, pnt_coor))
return q
def quiver(x, v, ax=None, **kwargs):
"""Multi-dimensional quiver. (DEPRECATED)
Plot v columns at points in columns of x
in axes ax with plot formatting options in kwargs.
>>> import numpy as np
>>> import matplotlib as mpl
>>> from pyvectorized import quiver, dom2vec
>>> x = dom2vec([0, 10, 0, 11], [20, 20])
>>> v = np.vstack(np.sin(x[1, :] ), np.cos(x[2, :] ) )
>>> quiver(mpl.gca(), x, v)
see also
matplotlib.quiver, mayavi.quiver3
@param x: points where vectors are based
each column is a coordinate tuple
@type x: 2d lil | numpy.ndarray
@param v: vectors which to base at points x
@type v: 2d lil | numpy.ndarray
@param ax: axes handle, e.g., ax = gca())
@param x: matrix of points where vectors are plotted
@type x: [#dim x #points]
@param v: matrix of column vectors to plot at points x
@type v: [#dim x #points]
@param kwargs: plot formatting
@return: handle to plotted object(s)
"""
assert_pyplot()
# multiple axes ?
try:
fields = [quiver(x, v, i, **kwargs) for i in ax]
return fields
except:
pass
if not ax:
ax = plt.gca()
dim = dimension(x)
if dim < 2:
raise Exception('ndim < 2')
elif dim < 3:
h = ax.quiver(x[0, :], x[1, :],
v[0, :], v[1, :], **kwargs)
else:
raise NotImplementedError
from mayavi.mlab import quiver3d
if ax:
print('axes arg ignored, mayavi used')
h = quiver3d(x[0, :], x[1, :], x[2, :],
v[0, :], v[1, :], v[2, :], **kwargs)
if dim > 3:
warn('quiver:ndim #dimensions > 3,' +
'plotting only 3D component.')
return h
def _grouper(n, iterable, fillvalue=None):
"""grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx
"""
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
def assert_pyplot():
if plt is None:
raise ImportError('Failed to import `matplotlib.pyplot`')
| StarcoderdataPython |
9681784 | import redis
r = redis.Redis(password='<PASSWORD>')
# 任务信息:任务类别——发送者——接受者——内容
task = '%s_%s_%s_%s'%('sendMail','<EMAIL>','<EMAIL>','hello world')
# 添加任务
r.lpush('pylk1',task)
| StarcoderdataPython |
3246959 | import matplotlib.pyplot as plt
import numpy as np
#t = [i for i in range (-10,20)]
t=np.linspace(-10, 10, 20)
plt.figure()
plt.subplot(221)
y = t*(t<=10)
plt.plot(t,y,color='red',linewidth=4)
plt.grid()
plt.title('y1=x')
plt.subplot(222)
y = (t*2)*[t<=10]
plt.plot(t,y,color='blue',linewidth=4)
plt.grid()
plt.title('y1=x^2')
plt.subplot(223)
y = (t*3)*[t<=10]
plt.plot(t,y,color='green',linewidth=4)
plt.grid()
plt.title('y1=x^3')
plt.subplot(224)
y = (t*4)*[t<=10]
plt.plot(t,y,color='black',linewidth=4)
plt.grid()
plt.title('y1=x^4')
plt.show() | StarcoderdataPython |
1928776 | import yaku.utils
def setup(ctx):
env = ctx.env
ctx.env["CC"] = ["clang"]
ctx.env["CC_TGT_F"] = ["-c", "-o"]
ctx.env["CC_SRC_F"] = []
ctx.env["CFLAGS"] = []
ctx.env["DEFINES"] = []
ctx.env["LINK"] = ["clang"]
ctx.env["LINKFLAGS"] = []
ctx.env["LINK_TGT_F"] = ["-o"]
ctx.env["LINK_SRC_F"] = []
ctx.env["SHAREDLIB_FMT"] = "lib%s.so"
ctx.env["SHLINK"] = ["clang", "-shared"]
ctx.env["SHLINKFLAGS"] = []
ctx.env["SHLINK_TGT_F"] = ["-o"]
ctx.env["SHLINK_SRC_F"] = []
ctx.env["MODLINK"] = ["clang", "-bundle", "-undefined", "dynamic_lookup"]
ctx.env["MODLINKFLAGS"] = []
ctx.env["MODLINK_TGT_F"] = ["-o"]
ctx.env["MODLINK_SRC_F"] = []
ctx.env["CPPPATH"] = []
ctx.env["CPPPATH_FMT"] = "-I%s"
ctx.env["LIBDIR"] = []
ctx.env["LIBS"] = []
ctx.env["LIB_FMT"] = "-l%s"
ctx.env["LIBDIR_FMT"] = "-L%s"
ctx.env["CC_OBJECT_FMT"] = "%s.o"
ctx.env["PROGRAM_FMT"] = "%s"
def detect(ctx):
if yaku.utils.find_program("clang") is None:
return False
else:
return True
| StarcoderdataPython |
4931945 | #!/usr/bin/python
# Copyright 2013 CereProc Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
# Template for creating modules for the idlak build system - CHANGE ME
import sys, os.path, time
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.split(__file__)[1])[0]
DESCRIPTION = 'Template for idlak build module - CHANGE ME'
# Add to path
sys.path = sys.path + [SCRIPT_DIR + '/../utils']
sys.path = sys.path + [SCRIPT_DIR]
# import voice build utilities
import build_configuration
def main():
# process the options based on the default build configuration
build_conf, parser = build_configuration.get_config(SCRIPT_NAME, DESCRIPTION, SCRIPT_NAME)
# parse commamd line
if __name__ == '__main__':
opts, args = parser.parse_args()
# and load custom configurations
if opts.bldconf:
build_conf.parse(opts.bldconf)
if opts.spkconf:
build_conf.parse(opts.spkconf)
else:
parser.error("Speaker configuration is required e.g. speaker_conf/bdl.xml")
build_conf.updatefromopts(opts)
# set up logging, check idlak-scratch, check dependencies and build as required
build_conf.set_build_environment(SCRIPT_NAME)
# MODULE SPECIFIC CODE
# get required input files from idlak-data
# get required directories from dependent modules
# examine general settings and set as appropriate
# process data
# END OF MODULE SPECIFIC CODE
build_conf.end_processing(SCRIPT_NAME)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4800907 | <reponame>stvstnfrd/openedx-webhooks
"""
Get information about people, repos, orgs, pull requests, etc.
"""
import datetime
import re
from typing import Dict, Iterable, Optional, Union
import yaml
from iso8601 import parse_date
from openedx_webhooks.lib.github.models import PrId
from openedx_webhooks.oauth import get_github_session
from openedx_webhooks.types import PrDict, PrCommentDict
from openedx_webhooks.utils import (
memoize,
memoize_timed,
paginated_get,
retry_get,
)
@memoize_timed(minutes=15)
def _read_repotools_yaml_file(filename):
"""Read a YAML file from the repo-tools-data repo."""
return yaml.safe_load(_read_repotools_file(filename))
def _read_repotools_file(filename):
"""
Read the text of a repo-tools-data file.
"""
github = get_github_session()
resp = github.get(f"https://raw.githubusercontent.com/edx/repo-tools-data/master/{filename}")
resp.raise_for_status()
return resp.text
def get_people_file():
return _read_repotools_yaml_file("people.yaml")
def get_orgs_file():
return _read_repotools_yaml_file("orgs.yaml")
def get_labels_file():
return _read_repotools_yaml_file("labels.yaml")
def get_orgs(key):
"""Return the set of orgs with a true `key`."""
orgs = get_orgs_file()
return {o for o, info in orgs.items() if info.get(key, False)}
def get_person_certain_time(person: Dict, certain_time: datetime.datetime) -> Dict:
"""
Return person data structure for a particular time
Arguments:
person: dict of a Github user info from people.yaml in repo-tools-data
certain_time: datetime.datetime object used to determine the state of the person
"""
# Layer together all of the applicable "before" clauses.
update_person = person.copy()
for before_date in sorted(person.get("before", {}), reverse=True):
if certain_time.date() > before_date:
break
update_person.update(person["before"][before_date])
return update_person
def is_internal_pull_request(pull_request: PrDict) -> bool:
"""
Was this pull request created by someone who works for edX?
"""
return _is_pull_request(pull_request, "internal")
def is_contractor_pull_request(pull_request: PrDict) -> bool:
"""
Was this pull request created by someone in an organization that does
paid contracting work for edX? If so, we don't know if this pull request
falls under edX's contract, or if it should be treated as a pull request
from the community.
"""
return _is_pull_request(pull_request, "contractor")
def is_bot_pull_request(pull_request: PrDict) -> bool:
"""
Was this pull request created by a bot?
"""
return pull_request["user"]["type"] == "Bot"
def is_draft_pull_request(pull_request: PrDict) -> bool:
"""
Is this a draft (or WIP) pull request?
"""
return pull_request.get("draft", False) or bool(re.search(r"\b(WIP|wip)\b", pull_request["title"]))
def _pr_author_data(pull_request: PrDict) -> Optional[Dict]:
"""
Get data about the author of the pull request, as of the
creation of the pull request.
Returns None if the author had no CLA.
"""
people = get_people_file()
author = pull_request["user"]["login"]
if author not in people:
# We don't know this person!
return None
person = people[author]
created_at = parse_date(pull_request["created_at"]).replace(tzinfo=None)
person = get_person_certain_time(people[author], created_at)
return person
def _is_pull_request(pull_request: PrDict, kind: str) -> bool:
"""
Is this pull request of a certain kind?
Arguments:
pull_request: the dict data read from GitHub.
kind (str): either "internal" or "contractor".
Returns:
bool
"""
person = _pr_author_data(pull_request)
if person is None:
return False
if person.get(kind, False):
# This person has the flag personally.
return True
the_orgs = get_orgs(kind)
if person.get("institution") in the_orgs:
# This person's institution has the flag.
return True
return False
def is_committer_pull_request(pull_request: PrDict) -> bool:
"""
Was this pull request created by a core committer for this repo
or branch?
"""
person = _pr_author_data(pull_request)
if person is None:
return False
if "committer" not in person:
return False
repo = pull_request["base"]["repo"]["full_name"]
org = repo.partition("/")[0]
branch = pull_request["base"]["ref"]
commit_rights = person["committer"]
if not commit_rights:
return False
if "orgs" in commit_rights:
if org in commit_rights["orgs"]:
return True
if "repos" in commit_rights:
if repo in commit_rights["repos"]:
return True
if "branches" in commit_rights:
for access_branch in commit_rights["branches"]:
if access_branch.endswith("*") and branch.startswith(access_branch[:-1]):
return True
elif branch == access_branch:
return True
return False
def pull_request_has_cla(pull_request: PrDict) -> bool:
"""Does this pull request have a valid CLA?"""
person = _pr_author_data(pull_request)
if person is None:
return False
agreement = person.get("agreement", "none")
return agreement != "none"
def get_blended_project_id(pull_request: PrDict) -> Optional[int]:
"""
Find the blended project id in the pull request, if any.
Returns:
An int ("[BD-5]" returns 5, for example) found in the pull request, or None.
"""
m = re.search(r"\[\s*BD\s*-\s*(\d+)\s*\]", pull_request["title"])
if m:
return int(m[1])
else:
return None
@memoize
def github_whoami():
self_resp = retry_get(get_github_session(), "/user")
self_resp.raise_for_status()
return self_resp.json()
def get_bot_username() -> str:
"""What is the username of the bot?"""
me = github_whoami()
return me["login"]
def get_bot_comments(prid: PrId) -> Iterable[PrCommentDict]:
"""Find all the comments the bot has made on a pull request."""
my_username = get_bot_username()
comment_url = f"/repos/{prid.full_name}/issues/{prid.number}/comments"
for comment in paginated_get(comment_url, session=get_github_session()):
# I only care about comments I made
if comment["user"]["login"] == my_username:
yield comment
def get_jira_issue_key(pr: Union[PrId, PrDict]) -> Optional[str]:
"""Find mention of a Jira issue number in bot-authored comments."""
if isinstance(pr, PrDict):
prid = PrId.from_pr_dict(pr)
else:
prid = pr
for comment in get_bot_comments(prid):
# search for the first occurrence of a JIRA ticket key in the comment body
match = re.search(r"\b([A-Z]{2,}-\d+)\b", comment["body"])
if match:
return match.group(0)
return None
| StarcoderdataPython |
328501 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.residual_prediction_net import ResidualPredictionNet
torch.manual_seed(42)
class VGG(nn.Module):
'''
VGG model
'''
def __init__(self, classifier_type, lr=1e-3, device="cpu", hidden=200, output=10, groups=5, depth=5, batch_norm=True):
super(VGG, self).__init__()
self.name = "VGG 16" + " with a linear classifier" if classifier_type == "linear" else " with a residual classifier"
self.classifier_type = classifier_type
# Define the VGG architecture
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(128, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(256, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2)
)
#initialize the type of classifer
if classifier_type == "linear":
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(512, hidden),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(hidden, hidden),
nn.ReLU(True),
nn.Linear(hidden, output)
)
else:
self.classifier = ResidualPredictionNet(
input_dim=512, hidden_dim=hidden,
output_dim=output, residual_depth=depth,
groups=groups, batch_norm=batch_norm,
dropout=True, device=device)
#initialize weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
# Putting it on the right device
if device == "cuda":
self.features = torch.nn.DataParallel(self.features)
self.cuda()
if classifier_type == "residual":
self.classifier.cuda()
self.optimizer = torch.optim.Adam(self.parameters(), lr=lr, weight_decay=1e-3)
self.total_params = sum(p.numel() for p in self.classifier.parameters() if p.requires_grad)
print("Initiated a VGG net with ", self.total_params, " classifier parameters!\n")
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x | StarcoderdataPython |
4825216 | import pandas as pd
import argparse
import PyFloraBook.in_out.data_coordinator as dc
# ---------------- GLOBALS ----------------
WEBSITE = 'OregonFlora'
OUTPUT_SUFFIX = 'species'
# ---------------- INPUT ----------------
# Parse arguments
parser = argparse.ArgumentParser(
description='Count the observed species'
)
parser.add_argument(
"-f", "--families", nargs='+',
help="Names of the families to be analyzed."
)
args = parser.parse_args()
families = args.families
subfolder = WEBSITE
raw_data_folder = dc.locate_raw_observations_folder() / subfolder
cleansed_data_folder = dc.locate_cleansed_data_folder() / subfolder
for family in families:
input_file_name = family + ".csv"
input_file_path = raw_data_folder / input_file_name
try:
data = pd.read_csv(str(input_file_path), encoding="ISO-8859-1")
except FileNotFoundError:
print(family, "not found!")
continue
# Sanity check: make sure the file contains data for the right family
if data['family'][0] != family:
print(data['family'][0], "!=", family)
continue
# Get rid of the weird "image" rows
data = data[data['data_type'] != 'image']
# Get rid of any data from the wrong counties
excluded_counties = [
"Baker", "Crook", "Gilliam", "Grant", "Harney", "Lake", "Malheur",
"Morrow", "Sherman", "Umatilla", "Union", "Wallowa", "Wheeler"
]
for county in excluded_counties:
data = data[data['county'] != county]
all_observed = data['taxon'].to_frame()
# ---------------- ANALYSIS ----------------
# Fix troublesome hybrid symbols
all_observed = all_observed['taxon'].str.replace('× ', '×').to_frame()
# Split into columns to get rid of variety information
all_observed = all_observed['taxon'].str.split(' ', expand=True, n=2)
if len(all_observed.columns) > 2:
all_observed.drop(2, axis=1, inplace=True)
all_observed['binomial'] = all_observed[0] + ' ' + all_observed[1]
# This is the number of observations of each species
species_counts = all_observed['binomial'].value_counts().to_frame()
species_counts.rename(columns={'binomial': 'count'}, inplace=True)
species_counts.index.name = 'binomial'
species_counts.sort_index(inplace=True)
# ---------------- OUTPUT ----------------
print(family, '\t', len(all_observed), '\t', len(species_counts))
cleansed_data_file_name = family + '_' + OUTPUT_SUFFIX + '.csv'
species_counts.to_csv(str(cleansed_data_folder / cleansed_data_file_name))
| StarcoderdataPython |
12859514 | from dataclasses import dataclass, field
from typing import List
from itertools import chain
@dataclass
class Word:
word: str
wordtype: str
shortdef: List[str] = field(default_factory=list)
synonyms: List[str] = field(default_factory=list)
antonyms: List[str] = field(default_factory=list)
stems: List[str] = field(default_factory=list)
@classmethod
def from_response(cls, r: dict) -> object:
obj = cls.__new__(cls)
obj.word = r["meta"]["id"]
obj.wordtype = r["fl"]
obj.shortdef = r["shortdef"]
obj.synonyms = list(chain.from_iterable(r["meta"]["syns"]))
obj.antonyms = list(chain.from_iterable(r["meta"]["ants"]))
obj.stems = r["meta"]["stems"]
return obj
| StarcoderdataPython |
1911635 | # Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Type
from cpo.lib.fyre.response_managers.json_response_manager import (
AbstractJSONResponseManager,
)
from cpo.lib.fyre.types.default_success_response import DefaultSuccessResponse
class DefaultResponseManager(AbstractJSONResponseManager):
"""JSON response manager for REST endpoints (PUT) returning a generic
success response"""
# override
def get_error_message(self, json_error_response: Any) -> Optional[str]:
return self.get_default_error_message(json_error_response)
# override
def get_error_response_schema(self) -> Optional[Any]:
return self.get_default_error_response_schema()
# override
def get_response_schema(self) -> Any:
return self.get_default_success_response_schema()
# override
def get_response_type(self) -> Type:
return DefaultSuccessResponse
| StarcoderdataPython |
9712573 | <reponame>vitruvianscience/OpenDeep
"""
This module defines isotropic gaussian log-likelihood loss.
"""
# standard libraries
import logging
# third party libraries
from theano.tensor import (log as Tlog, sqrt)
from numpy import pi
# internal references
from opendeep.optimization.loss import Loss
log = logging.getLogger(__name__)
class IsotropicGaussianLL(Loss):
"""
This takes the negative log-likelihood of an isotropic Gaussian with estimated mean and standard deviation.
Useful for continuous-valued costs.
.. note::
Use this cost, for example, on Generative Stochastic Networks when the input/output is continuous
(alternative to mse cost).
"""
def __init__(self, inputs, targets, std_estimated):
"""
Initializes the :class:`IsotropicGaussianLL` loss function.
Parameters
----------
inputs : theano symbolic expression
The symbolic tensor (or compatible) representing the means of the distribution estimated.
In the case of Generative Stochastic Networks, for example, this would be the final reconstructed output x'.
targets : theano symbolic variable
The symbolic tensor (or compatible) target truth to compare the means_estimated against.
std_estimated : theano symbolic expression
The estimated standard deviation (sigma).
"""
super(IsotropicGaussianLL, self).__init__(inputs=inputs, targets=targets, std_estimated=std_estimated)
def get_loss(self):
"""
Returns
-------
theano expression
The loss function.
"""
# The following definition came from the Conditional_nade project
# the loglikelihood of isotropic Gaussian with
# estimated mean and std
std_estimated = self.args.get('std_estimated')
target = self.targets[0]
input = self.inputs[0]
A = -((target - input) ** 2) / (2 * (std_estimated ** 2))
B = -Tlog(std_estimated * sqrt(2 * pi))
LL = (A + B).sum(axis=1).mean()
return -LL
# Example from GSN:
# this_cost = isotropic_gaussian_LL(
# output=reconstruction,
# std_estimated=self.layers[0].sigma,
# target=self.inputs)
| StarcoderdataPython |
3444519 | import tensorflow as tf
def createFullToken(voxel_shape, TOKEN):
return tf.fill(voxel_shape, TOKEN)
| StarcoderdataPython |
3511422 | from __future__ import absolute_import
from __future__ import unicode_literals
import six
from corehq.util.translation import localize
from custom.ilsgateway.tanzania.exceptions import InvalidProductCodeException
from custom.ilsgateway.tanzania.handlers.generic_stock_report_handler import GenericStockReportHandler
from custom.ilsgateway.tanzania.handlers.ils_stock_report_parser import Formatter
from custom.ilsgateway.tanzania.reminders import STOCKOUT_CONFIRM, INVALID_PRODUCT_CODE, STOCKOUT_HELP
class StockoutFormatter(Formatter):
def format(self, text):
content = text.split(' ', 1)[1]
products_codes = content.split()
return 'soh {}'.format(' 0 '.join(products_codes)) + ' 0'
class StockoutHandler(GenericStockReportHandler):
formatter = StockoutFormatter
def help(self):
self.respond(STOCKOUT_HELP)
return True
def get_message(self, data):
with localize(self.user.get_language_code()):
return STOCKOUT_CONFIRM % {
'contact_name': self.verified_contact.owner.full_name,
'product_names': self.msg.text.split(' ', 1)[1],
'facility_name': self.sql_location.name
}
def on_success(self):
pass
def on_error(self, data):
for error in data['errors']:
if isinstance(error, InvalidProductCodeException):
self.respond(INVALID_PRODUCT_CODE, product_code=six.text_type(error))
| StarcoderdataPython |
1774224 | """
Ce script s'occupe de tout ce qui est lié au vocalisme.
"""
class VocalismeNonTonique:
def __init__(self):
return
def vocalisme_atone(object):
changements = ''
if ("I") in object:
changements = object.replace("I", "")
elif ("AU") in object:
changements = object.replace("AU", "")
elif ("E") in object:
changements = object.replace("E", "")
elif ("U") in object:
changements = object.replace("U", "")
elif ("A") in object:
changements = object.replace("A", "e")
elif ("O") in object:
changements = object.replace("O", "")
return changements
| StarcoderdataPython |
5084720 | # Copyright (c) 2010 <NAME>
from __future__ import absolute_import, with_statement, division
from twisted.trial import unittest
from .. import plugin
from . import plugins
def identity(*args, **kwargs):
return args, kwargs
class PluginTest(unittest.TestCase):
def testGetCheckerFactories(self):
self.failUnless(list(plugin.getCheckerFactories()))
def testCheckerFactory(self):
factory = plugin.CheckerFactory('aname',
'opm.test.test_plugin.identity')
self.assertEqual(((1,), dict(a=2)), factory(1, a=2))
def testDuplicate(self):
self.assertRaises(KeyError, plugin.getCheckerFactories, plugins)
| StarcoderdataPython |
5112032 | <filename>Python/sparse-matrix-multiplication.py
# Time: O(m * n * l), A is m x n matrix, B is n x l matrix
# Space: O(m * l)
class Solution(object):
def multiply(self, A, B):
"""
:type A: List[List[int]]
:type B: List[List[int]]
:rtype: List[List[int]]
"""
m, n, l = len(A), len(A[0]), len(B[0])
res = [[0 for _ in range(l)] for _ in range(m)]
for i in range(m):
for k in range(n):
if A[i][k]:
for j in range(l):
res[i][j] += A[i][k] * B[k][j]
return res
| StarcoderdataPython |
1961878 | <reponame>MatheusMullerGit/app_empresas<gh_stars>1-10
from django.db import models
from django.urls import reverse
from apps.empresas.models import Empresa
class Departamento(models.Model):
nome = models.CharField(max_length=70)
empresa = models.ForeignKey(Empresa, on_delete=models.PROTECT)
def get_absolute_url(self):
return reverse('list_departamentos')
def __str__(self):
return self.nome
| StarcoderdataPython |
4887822 | from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.DistributionRequirement import (
DistributionRequirement)
@linter(executable='verilator',
output_format='regex',
use_stderr=True,
output_regex=r'\%(?:(?P<severity>Error|Warning.*?).*?):'
r'.+?:(?P<line>.+?): (?P<message>.+)')
class VerilogLintBear:
"""
Analyze Verilog code using ``verilator`` and checks for all lint
related and code style related warning messages. It supports the
synthesis subset of Verilog, plus initial statements, proper
blocking/non-blocking assignments, functions, tasks.
It also warns about unused code when a specified signal is never sinked,
and unoptimized code due to some construct, with which the
optimization of the specified signal or block is disabled.
This is done using the ``--lint-only`` command. For more information visit
<http://www.veripool.org/projects/verilator/wiki/Manual-verilator>.
"""
LANGUAGES = {'Verilog'}
REQUIREMENTS = {
DistributionRequirement(
apt_get='verilator',
brew=None,
dnf='verilator',
portage=None,
yum='verilator',
zypper='verilator',
),
}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'<EMAIL>'}
LICENSE = 'AGPL-3.0'
ASCIINEMA_URL = 'https://asciinema.org/a/45275'
CAN_DETECT = {'Formatting', 'Code Simplification', 'Syntax', 'Unused Code'}
@staticmethod
def create_arguments(filename, file, config_file):
return '--lint-only', filename
| StarcoderdataPython |
3330784 | import io
import os
import unittest
import chess.pgn
from puzzlemaker.puzzle_finder import find_puzzle_candidates
from puzzlemaker.analysis import AnalysisEngine
def pgn_file_path(pgn_filename) -> io.TextIOWrapper:
cur_dir = os.path.dirname(os.path.abspath(__file__))
return open(os.path.join(cur_dir, '..', 'fixtures', pgn_filename))
class TestPuzzleFinder(unittest.TestCase):
@classmethod
def setUpClass(self):
AnalysisEngine.instance()
@classmethod
def tearDownClass(self):
AnalysisEngine.quit()
def test_finding_blunder(self):
with pgn_file_path("carlsen-anand-blunder.wc2014.pgn") as f:
game = chess.pgn.read_game(f)
puzzles = find_puzzle_candidates(game, scan_depth=6)
found_blunder = False
fen = '6rr/1k3p2/1pb1p1np/p1p1P2R/2P3R1/2P1B3/P1B2PP1/2K5 w - - 4 26'
move = 'c1d2'
for puzzle in puzzles:
if (puzzle.initial_board.fen() == fen
and puzzle.initial_move.uci() == move):
found_blunder = True
self.assertTrue(found_blunder)
| StarcoderdataPython |
4802030 | """
Same as my_first_test.py, but without the asserts.
"""
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_basic(self):
self.open("https://store.xkcd.com/search")
self.type('input[name="q"]', "xkcd book\n")
self.open("https://xkcd.com/353/")
self.click('a[rel="license"]')
self.go_back()
self.click_link_text("About")
| StarcoderdataPython |
3301299 | class Solution:
def canIWin(self, max_num: int, desiredTotal: int) -> bool:
if desiredTotal<= max_num: return True
if (max_num*(max_num+1)//2)< desiredTotal: return False
mem ={}
def dp(total, seen):
if total>=desiredTotal: return False
if (total,seen) in mem: return mem[(total,seen)]
ans = False
for num in range(1, max_num+1):
if 1<<num & seen: continue
new_seen= seen|1<<num
temp= dp(total+num, new_seen)
if not temp:
mem[(total, seen)]= True
return True #i can force other player to lose by picking this
mem[(total,seen)]= ans
return ans
return dp(0,0) | StarcoderdataPython |
1913537 | <filename>unittests/test_Grapher.py<gh_stars>10-100
from dnnviewer.layers.Dense import Dense
from dnnviewer.Grapher import Grapher
import numpy as np
class TestGrapher:
def test_clear_layers(self):
grapher = Grapher()
assert len(grapher.layers) == 0
assert grapher.structure_props['num_dense'] == 0
assert grapher.structure_props['num_convo2d'] == 0
grads = weights = np.array([[1]])
layer = Dense('test_1', '', 1, weights, grads)
grapher.add_layer(layer)
assert len(grapher.layers) == 1
assert grapher.structure_props['num_dense'] == 1
assert grapher.structure_props['num_convo2d'] == 0
grapher.reset()
assert len(grapher.layers) == 0
assert grapher.structure_props['num_dense'] == 0
assert grapher.structure_props['num_convo2d'] == 0
| StarcoderdataPython |
1842436 | <reponame>osoco/better-ways-of-thinking-about-software
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('email_marketing', '0004_emailmarketingconfiguration_welcome_email_send_delay'),
]
operations = [
migrations.AddField(
model_name='emailmarketingconfiguration',
name='user_registration_cookie_timeout_delay',
field=models.FloatField(default=1.5, help_text='The number of seconds to delay/timeout wait to get cookie values from sailthru.'),
),
]
| StarcoderdataPython |
4979198 | #!/usr/bin/env python3
import sys
def twos():
# 16406297185095647658 - 8579294738944438520
# 7827002446151209138
cval = 16406297185095647658
sval = 8579294738944438520
dval = cval - sval
sum1 = cval + sval
val1 = 1
last1 = val1
for i1 in range(68):
print("%s %s" % (str(i1), str(val1)))
if val1 > cval:
print(" cval = %s" % str(cval))
cval1 = val1 - cval
print(" cval1 = %s" % str(cval1))
cval2 = cval + last1
print(" cval2 = %s" % str(cval2))
if val1 > sval:
print(" sval = %s" % str(sval))
sval1 = val1 - sval
print(" sval1 = %s" % str(sval1))
sval2 = sval + last1
print(" sval2 = %s" % str(sval2))
sval3 = val1 + sval
print(" sval3 = %s" % str(sval3))
if val1 > dval:
print(" dval = %s" % str(dval))
dval1 = val1 - dval
print(" dval1 = %s" % str(dval1))
dval2 = dval + last1
print(" dval2 = %s" % str(dval2))
if val1 > sval:
print(" sum1 = %s" % str(sum1))
sum11 = val1 - sum1
print(" sum11 = %s" % str(sum11))
sum12 = sum1 + last1
print(" sum12 = %s" % str(sum12))
sum13 = val1 + sum1
print(" sum13 = %s" % str(sum13))
sum14 = val1 + sum1
print(" sum14 = %s" % str(sum14))
sum15 = sum1 - val1
print(" sum15 = %s" % str(sum15))
last1 = val1
val1 *= 2
def main():
print("main")
twos()
main()
print("")
sys.stderr.write("\n")
| StarcoderdataPython |
8050354 | <reponame>LarmIg/Algoritmos-Python
# Calcular e apresentar o valor do volume de uma lata de óleo, utilizando a fórmula VOLUME <- 3.14159 *R^2 * ALTURA.
Alt = float(input('Informe a altura da lata: '))
R = float(input('informe o raio da lata: '))
volume = 3.14159 * R**2 * Alt
print('O volume da lata corresponde a: {}'.format(volume)) | StarcoderdataPython |
3333191 | <reponame>stanford-futuredata/sketchstore<filename>python/storyboard/eval.py
from typing import List, Tuple, Sequence, Mapping
import itertools
import numpy as np
from pandas import DataFrame
from storyboard.planner import WorkloadProperties, FreqGroup
from tqdm import tqdm
class StoryboardQueryExecutor:
def __init__(self, groups: List[FreqGroup]):
self.groups = groups
def exec_query(self, filter: Sequence):
n_dims = len(filter)
results = dict()
for cur_segment in self.groups:
matches = all((
filter[i] is None or filter[i] == cur_segment.dims[i]
for i in range(n_dims)
))
if matches:
for k,v in cur_segment.vals.items():
results[k] = results.get(k, 0) + v
return results
class RawQueryExecutor:
def __init__(self, df: DataFrame, dim_names: Sequence[str], val_name: str):
self.df = df
self.dim_names = dim_names
self.val_name = val_name
def exec_query(self, filter: Sequence):
n_dims = len(filter)
mask = np.repeat(True, len(self.df))
for d_idx in range(n_dims):
if filter[d_idx] is not None:
cur_dim_name = self.dim_names[d_idx]
mask &= (self.df[cur_dim_name] == filter[d_idx])
val_counts = self.df[mask][self.val_name].value_counts()
return dict(val_counts)
class StoryboardVarianceEstimator:
def __init__(self, wp: WorkloadProperties, seed: int):
self.wp = wp
self.seed = seed
np.random.seed(self.seed)
def sample_query(self):
p_weights = self.wp.pred_weights
p_cards = self.wp.pred_cardinalities
n_dims = len(p_cards)
dim_values = []
for dim_idx in range(n_dims):
if np.random.uniform() < p_weights[dim_idx]:
# filter on dimension
cur_dim_value = np.random.choice(p_cards[dim_idx])
else:
# not filtering on dimension
cur_dim_value = None
dim_values.append(cur_dim_value)
max_time = np.random.randint(0, self.wp.max_time_segments)
time_range = (0, max_time)
return dim_values, time_range
def eval_error(self, sq: StoryboardQueryExecutor, rq: RawQueryExecutor, x_to_track, n_trials: int = 3):
trial_mses = []
trial_totals = []
print("Evaluating")
for trail_idx in tqdm(range(n_trials)):
filter, _ = self.sample_query()
n_dims = len(filter)
sb_res = sq.exec_query(filter)
raw_res = rq.exec_query(filter)
sb_counts = np.array([sb_res.get(i,0) for i in x_to_track])
raw_counts = np.array([raw_res.get(i,0) for i in x_to_track])
trial_total = sum(raw_res.values())
trial_errors = (raw_counts - sb_counts)**2
trial_mses.append(np.mean(trial_errors))
trial_totals.append(trial_total)
trial_mses = np.array(trial_mses)
trial_totals = np.array(trial_totals)
# print(np.sqrt(trial_mses))
# print(trial_totals)
return np.sqrt(np.mean(trial_mses/trial_totals**2))
def est_error(self, groups: List[FreqGroup], n_trials: int = 3):
trial_totals = []
trial_mses = []
for trail_idx in range(n_trials):
cur_query_dim_values,_ = self.sample_query()
n_dims = len(cur_query_dim_values)
trial_total = 0
trial_mse = 0
# print("trial {}: {}".format(trail_idx, cur_query_dim_values))
for cur_segment in groups:
matches = all((
cur_query_dim_values[i] is None or cur_query_dim_values[i] == cur_segment.dims[i]
for i in range(n_dims)
))
if matches:
trial_total += cur_segment.size
cur_err = min(cur_segment.vals.values())
# cur_err = cur_segment.size / len(cur_segment.vals.keys())
trial_mse += .25*cur_err**2
trial_mses.append(trial_mse)
trial_totals.append(trial_total)
trial_mses = np.array(trial_mses)
trial_totals = np.array(trial_totals)
# print(np.sqrt(trial_mses))
# print(trial_totals)
return np.sqrt(np.mean(trial_mses/(trial_totals**2)))
def calc_error(self, groups: List[FreqGroup]):
num_dims = len(groups[0].dims)
pred_cardinalities = self.wp.pred_cardinalities
pred_weights = self.wp.pred_weights
cs_dims = []
cs_errors = []
cs_weights = []
for num_predicates in range(num_dims + 1):
dim_sets = list(itertools.combinations(range(num_dims), num_predicates))
for dim_set in dim_sets:
group_totals = dict()
group_variances = dict()
for cur_segment in groups:
idx_tuple = tuple(cur_segment.dims[i] for i in dim_set)
group_totals[idx_tuple] = group_totals.get(idx_tuple, 0) + cur_segment.size
cur_group_err = cur_segment.size / len(cur_segment.vals)
group_variances[idx_tuple] = (
group_variances.get(idx_tuple, 0)
+ .25*cur_group_err*cur_group_err
)
dim_set_mse = 0
group_errors = dict()
for idx_tuple in group_totals.keys():
cur_group_total = group_totals[idx_tuple]
cur_group_mse = group_variances[idx_tuple] / (cur_group_total*cur_group_total)
dim_set_mse += cur_group_mse
group_errors[idx_tuple] = np.sqrt(cur_group_mse)
# print('dimset')
# print(dim_set)
# print(group_errors)
num_queries = np.prod([pred_cardinalities[d] for d in dim_set])
dim_set_mse /= num_queries
cur_dim_set_weight = 1
for d_idx in range(num_dims):
if d_idx in dim_set:
cur_dim_set_weight *= pred_weights[d_idx]
else:
cur_dim_set_weight *= (1 - pred_weights[d_idx])
cs_dims.append(dim_set)
cs_errors.append(dim_set_mse)
cs_weights.append(cur_dim_set_weight)
cs_errors = np.array(cs_errors)
cs_weights = np.array(cs_weights)
# print("evaluator:")
# print(np.sqrt(cs_errors))
# print(cs_weights)
return np.sqrt(np.sum(cs_errors * cs_weights))
| StarcoderdataPython |
4981085 | <filename>docs/source/examples/bar.py
plot = Plot()
bar = Bar()
bar.xValues = range(5)
bar.yValues = [2, 8, 4, 6, 5]
plot.add(bar)
plot.xLabel = "Widget ID"
plot.yLabel = "# Widgets Sold"
plot.save("bar.png")
| StarcoderdataPython |
6456055 | <filename>CytoPy/flow/gating/density.py
from .utilities import kde, check_peak, find_local_minima
from .defaults import ChildPopulationCollection
from .base import Gate, GateError
from scipy.signal import find_peaks
import pandas as pd
import numpy as np
class DensityThreshold(Gate):
"""
Threshold gating estimated using properties of a Probability Density Function of events data as estimated
using Gaussian Kernel Density Estimation
Parameters
----------
kde_bw: float, (default=0.01)
Bandwidth to use for gaussian kernel density estimation
ignore_double_pos: bool, (default=False)
if True, in the case that multiple peaks are detected, peaks to the right of
the highest peak will be ignored in the local minima calculation
q: float, optional, (default=0.95)
if only 1 peak is found, quartile gating is performed using this argument as the quartile
std: float, optional
alternative to quartile gating, the number of standard deviations from the mean can be used to
determine the threshold
peak_threshold: float, optional
If not None, this decimal value represents what the minimum height of a peak should be relevant to the highest
peak found (e.g. if peak_threshold=0.05, then all peaks with a height < 0.05 of the heighest peak will be
ignored)
kwargs:
Gate constructor arguments (see cytopy.flow.gating.base)
"""
def __init__(self,
kde_bw: float = 0.01,
ignore_double_pos: bool = False,
std: float or None = None,
q: float or None = 0.95,
peak_threshold: float or None = None,
**kwargs):
super().__init__(**kwargs)
self.kde_bw = kde_bw
self.ignore_double_pos = ignore_double_pos
self.std = std
self.q = q
self.peak_threshold = peak_threshold
self.sample = self.sampling(self.data, 5000)
def _find_peaks(self,
probs: np.array) -> np.array:
"""
Internal function. Perform peak finding (see scipy.signal.find_peaks for details)
Parameters
-----------
probs: Numpy.array
array of probability estimates generated using flow.gating.utilities.kde
Returns
--------
Numpy.array
array of indices specifying location of peaks in `probs`
"""
# Find peaks
peaks = find_peaks(probs)[0]
if self.peak_threshold:
peaks = check_peak(peaks, probs, self.peak_threshold)
return peaks
def _evaluate_peaks(self,
data: pd.DataFrame,
peaks: np.array,
probs: np.array,
xx: np.array) -> float and str:
"""
Internal function. Given the outputs of `__find_peaks` and `__smooth` calculate the threshold to generate
for gating. If a single peak (one population) is found use quantile or standard deviation. If multiple peaks
are found (multiple populations) then look for region of minimum density.
Parameters
----------
data: Pandas.DataFrame
Events data
peaks: Numpy.array
array of indices specifying location of peaks in `probs`
probs: Numpy.array
array of probability estimates generated using flow.gating.utilities.kde
xx: Numpy.array
array of linear space kde calculated across
Returns
--------
float and str
(threshold, method used to generate threshold)
"""
method = ''
threshold = None
# Evaluate peaks
if len(peaks) == 1:
# 1 peak found, use quantile or standard deviation to find threshold
if self.q:
threshold = data[self.x].quantile(self.q, interpolation='nearest')
method = 'Quantile'
elif self.std:
u = data[self.x].mean()
s = data[self.x].std()
threshold = u + (s * self.std)
method = 'Standard deviation'
else:
# No std or q provided so using default of 95th quantile
threshold = data[self.x].quantile(0.95, interpolation='nearest')
method = 'Quantile'
if len(peaks) > 1:
# Multiple peaks found, find the local minima between pair of highest peaks
if self.ignore_double_pos:
# Merge peaks of 'double positive' populations
probs_peaks = probs[peaks]
highest_peak = np.where(probs_peaks == max(probs_peaks))[0][0]
if highest_peak < len(peaks):
peaks = peaks[:highest_peak + 1]
# Merging of peaks if ignore_double_pos might result in one peak
if len(peaks) > 1:
threshold = find_local_minima(probs, xx, peaks)
method = 'Local minima between pair of highest peaks'
else:
threshold = data[self.x].quantile(0.95, interpolation='nearest')
method = 'Quantile'
return threshold, method
def _calc_threshold(self,
data: pd.DataFrame,
x: str) -> float and str:
"""
Internal function Wrapper for calculating threshold for gating.
data: Pandas.DataFrame
Events data
x: str
feature of interest for threshold calculation
Returns
--------
float and str
(threshold, method used to generate threshold)
"""
probs, xx = kde(data, x, self.kde_bw)
peaks = self._find_peaks(probs)
return self._evaluate_peaks(data, peaks, probs, xx)
def gate_1d(self) -> ChildPopulationCollection:
"""
Perform density based threshold gating in 1 dimensional space using the properties of a Probability
Density Function of the events data as estimated using Gaussian Kernel Density Estimation.
Returns
--------
ChildPopulationCollection
Updated child population collection
"""
# If parent is empty just return the child populations with empty index array
if self.empty_parent:
return self.child_populations
if self.sample is not None:
threshold, method = self._calc_threshold(self.sample, self.x)
else:
threshold, method = self._calc_threshold(self.data, self.x)
if not threshold:
raise GateError('Unexpected error whilst performing threshold gating. Calculated threshold is Null.')
# Update child populations
self.child_update_1d(threshold, method)
return self.child_populations
def gate_2d(self) -> ChildPopulationCollection:
"""
Perform density based threshold gating in 2 dimensional space using the properties of a Probability
Density Function of the events data as estimated using Gaussian Kernel Density Estimation. KDE and threshold
calculation performed on each dimension separately.
Returns
--------
ChildPopulationCollection
Updated child population collection
"""
# If parent is empty just return the child populations with empty index array
if self.empty_parent:
return self.child_populations
if not self.y:
raise GateError('For a 2D threshold gate a value for `y` is required')
if self.sample is not None:
x_threshold, x_method = self._calc_threshold(self.sample, self.x)
y_threshold, y_method = self._calc_threshold(self.sample, self.y)
else:
x_threshold, x_method = self._calc_threshold(self.data, self.x)
y_threshold, y_method = self._calc_threshold(self.data, self.y)
if not x_threshold or not y_threshold:
raise GateError('Unexpected error whilst performing threshold gating. Calculated threshold is Null.')
method = f'X: {x_method}, Y: {y_method}'
self.child_update_2d(x_threshold, y_threshold, method)
return self.child_populations
| StarcoderdataPython |
9699205 | <reponame>nitish771/pygorithm
'''
Author : <NAME> (nitish771)
25-06-21
'''
from inspect import getsource
def common_word(word1, word2):
result = ''
l1 = len(word1)
l2 = len(word2)
min_len = l1 if l1 < l2 else l2
for i in range(min_len):
if word1[i] != word2[i]:
break
result += word1[i]
return result
def longest_prefix(words: list) -> str:
"""
Args:
words : List of words (min lenght = 1)
Return:
str
"""
if len(words) == 1:
return words[0]
if not len(words):
return 'Please give me a list of words'
ans = words[0]
for word in words:
ans = common_word(ans, word)
return ans
def get_code():
return getsource(longest_prefix)
| StarcoderdataPython |
5155612 | import numpy as np
'''
Functions to specify how y_ex (exemplar -> outcome associations) are updated.
from_rtrv: Learning rates for exemplars are equal to retrieval strength
times a constant (lrate_par).
from_rtrv_indv_delta: Learning rates for exemplars are equal to retrieval strength
times a constant (lrate_par), and prediction errors are for each
individual exemplar rather than for common.
only_max: Only the most similar exemplar has a non-zero learning rate, which is constant.
ex_mean: Each y_ex is simply the mean of u when that exemplar is present.
'''
def from_rtrv(sim, rtrv, y, y_hat, y_lrn, y_ex, ex_counts, n_ex, n_y, sim_pars):
"""
Learning rates for exemplars are equal to retrieval strength
times a constant (lrate_par).
Notes
-----
This (minus the 'humble teachers',
and with retrieval strength equal to similarity) is the form of
learning used in ALCOVE (Kruschke, 1992).
"""
lrate = sim_pars['lrate_par']*rtrv # learning rates for exemplars
delta = y - y_hat # prediction error (total)
return np.outer(lrate, y_lrn*delta)
from_rtrv.par_names = ['lrate_par']
def from_rtrv_indv_delta(sim, rtrv, y, y_hat, y_lrn, y_ex, ex_counts, n_ex, n_y, sim_pars):
"""
Learning rates for exemplars are equal to retrieval strength
times a constant (lrate_par), and prediction errors are for each
individual exemplar rather than for common.
"""
lrate = sim_pars['lrate_par']*rtrv # learning rates for exemplars
update = np.zeros((n_ex, n_y))
for i in range(n_ex):
delta = y - y_ex[i] # prediction error (only for exemplar i)
update[i, :] = lrate[i]*delta
return update
from_rtrv_indv_delta.par_names = ['lrate_par']
def only_max(sim, rtrv, y, y_hat, y_lrn, y_ex, ex_counts, n_ex, n_y, sim_pars):
"""
Only the most similar exemplar has a non-zero learning rate, which is constant.
Notes
-----
This is the form of learning assumed by Ghirlanda (2015) when showing the equivalence
between exemplar and RW family models.
"""
selector = np.zeros(n_ex)
selector[np.argmax(sim)] = 1
lrate = sim_pars['lrate_par']*selector # learning rates for exemplars
delta = y - y_hat # prediction error (total)
return np.outer(lrate, y_lrn*delta)
only_max.par_names = ['lrate_par']
def ex_mean(sim, rtrv, y, y_hat, y_lrn, y_ex, ex_counts, n_ex, n_y, sim_pars):
"""
Each y_ex is simply the mean of u when that exemplar is present.
Notes
-----
Combined with the right type of retrieval function, this is equivalent
to instance based learning: instances with the same cues (x)
are simply grouped together as a single exemplar.
One can add a fixed initial value ('nu') to ex_counts.
"""
index = np.argmax(sim)
lrate = np.zeros(n_ex)
lrate[index] = 1/(ex_counts[index] + sim_pars['nu'])
delta = y - y_ex[index] # prediction error (only for current exemplar)
return np.outer(lrate, y_lrn*delta)
ex_mean.par_names = ['nu'] | StarcoderdataPython |
51739 | <filename>components/gbf.py
import ssl
import re
import json
import zlib
from urllib import request
from datetime import datetime, timedelta
# ----------------------------------------------------------------------------------------------------------------
# GBF Component
# ----------------------------------------------------------------------------------------------------------------
# This component is the interface with Granblue Fantasy
#
# IMPORTANT
# Documentation will be limited on purpose to avoid possible misuses from people reading this
# ----------------------------------------------------------------------------------------------------------------
class GBF():
def __init__(self, bot):
self.bot = bot
self.data = None
self.ssl = ssl.create_default_context() # not sure if needed
self.vregex = re.compile("Game\.version = \"(\d+)\";") # for the gbf version check
def init(self):
self.data = self.bot.data
def request(self, url, **options):
try:
data = None
headers = {}
if not options.get('no_base_headers', False):
headers['Accept'] = 'application/json, text/javascript, */*; q=0.01'
headers['Accept-Encoding'] = 'gzip, deflate'
headers['Accept-Language'] = 'en'
headers['Connection'] = 'close'
headers['Host'] = 'game.granbluefantasy.jp'
headers['Origin'] = 'http://game.granbluefantasy.jp'
headers['Referer'] = 'http://game.granbluefantasy.jp/'
if "headers" in options:
headers = {**headers, **options["headers"]}
id = options.get('account', None)
if id is not None:
acc = self.get(id)
if not options.get('force_down', False) and acc[3] == 2: return "Down"
if options.get('check', False):
ver = self.version()
else:
ver = self.data.save['gbfversion']
url = url.replace("PARAMS", "_=TS1&t=TS2&uid=ID")
if ver == "Maintenance": return "Maintenance"
elif ver is not None:
url = url.replace("VER", "{}".format(ver))
ts = int(datetime.utcnow().timestamp() * 1000)
url = url.replace("TS1", "{}".format(ts))
url = url.replace("TS2", "{}".format(ts+300))
if id is not None:
if ver is None or acc is None:
return None
url = url.replace("ID", "{}".format(acc[0]))
if 'Cookie' not in headers: headers['Cookie'] = acc[1]
if 'User-Agent' not in headers: headers['User-Agent'] = acc[2]
if 'X-Requested-With' not in headers: headers['X-Requested-With'] = 'XMLHttpRequest'
if 'X-VERSION' not in headers: headers['X-VERSION'] = ver
payload = options.get('payload', None)
if payload is None: req = request.Request(url, headers=headers)
else:
if not options.get('no_base_headers', False) and 'Content-Type' not in headers: headers['Content-Type'] = 'application/json'
if 'user_id' in payload and payload['user_id'] == "ID": payload['user_id'] = acc[0]
req = request.Request(url, headers=headers, data=json.dumps(payload).encode('utf-8'))
url_handle = request.urlopen(req, context=self.ssl)
if id is not None:
self.refresh(id, url_handle.info()['Set-Cookie'])
if options.get('decompress', False): data = zlib.decompress(url_handle.read(), 16+zlib.MAX_WBITS)
else: data = url_handle.read()
url_handle.close()
if options.get('load_json', False): data = json.loads(data)
return data
except:
try: url_handle.close()
except: pass
return None
def get(self, id : int = 0):
try:
return self.data.save['gbfaccounts'][id]
except:
return None
def add(self, uid : int, ck : str, ua : str):
with self.data.lock:
if 'gbfaccounts' not in self.data.save:
self.data.save['gbfaccounts'] = []
self.data.save['gbfaccounts'].append([uid, ck, ua, 0, 0, None])
self.data.pending = True
return True
def update(self, id : int, **options):
try:
uid = options.pop('uid', None)
ck = options.pop('ck', None)
ua = options.pop('ua', None)
with self.data.lock:
if uid is not None:
self.data.save['gbfaccounts'][id][0] = uid
self.data.save['gbfaccounts'][id][4] = 0
if ck is not None:
self.data.save['gbfaccounts'][id][1] = ck
self.data.save['gbfaccounts'][id][5] = None
if ua is not None:
self.data.save['gbfaccounts'][id][2] = ua
self.data.save['gbfaccounts'][id][3] = 0
self.data.pending = True
return True
except:
return False
def remove(self, id : int):
try:
with self.data.lock:
if id < 0 or id >= len(self.data.save['gbfaccounts']):
return False
self.data.save['gbfaccounts'].pop(id)
if self.gbfcurrent >= id and self.gbfcurrent >= 0: self.gbfcurrent -= 1
self.data.pending = True
return True
except:
return False
def refresh(self, id : int, ck : str):
try:
A = self.data.save['gbfaccounts'][id][1].split(';')
B = ck.split(';')
for c in B:
tA = c.split('=')
if tA[0][0] == " ": tA[0] = tA[0][1:]
for i in range(0, len(A)):
tB = A[i].split('=')
if tB[0][0] == " ": tB[0] = tB[0][1:]
if tA[0] == tB[0]:
A[i] = c
break
with self.data.lock:
self.data.save['gbfaccounts'][id][1] = ";".join(A)
self.data.save['gbfaccounts'][id][3] = 1
self.data.save['gbfaccounts'][id][5] = self.bot.util.JST()
self.data.pending = True
return True
except:
return False
def version(self): # retrieve the game version
res = self.request('http://game.granbluefantasy.jp/', headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36', 'Accept-Language':'en', 'Accept-Encoding':'gzip, deflate', 'Host':'game.granbluefantasy.jp', 'Connection':'keep-alive'}, decompress=True, no_base_headers=True)
if res is None: return None
try:
return int(self.vregex.findall(str(res))[0])
except:
return "Maintenance" # if not found on the page, return "Maintenance"
def updateVersion(self, v): # compare version with given value, then update and return a value depending on difference
try:
int(v)
if v is None:
return 1 # unchanged because of invalid parameter
elif self.data.save['gbfversion'] is None:
with self.data.lock:
self.data.save['gbfversion'] = v
self.savePending = True
return 2 # value is set
elif self.data.save['gbfversion'] != v:
with self.data.lock:
self.data.save['gbfversion'] = v
self.data.pending = True
return 3 # update happened
return 0 # unchanged
except:
return -1 # v isn't an integer
def version2str(self, version_number): # convert gbf version number to its timestamp
try: return "{0:%Y/%m/%d %H:%M} JST".format(datetime.fromtimestamp(int(version_number)) + timedelta(seconds=32400)) # JST
except: return ""
def isAvailable(self): # use the above to check if the game is up
v = self.version()
return ((v is not None) and (v != "Maintenance")) | StarcoderdataPython |
8130351 | <reponame>domwillcode/home-assistant
"""The etherscan component."""
| StarcoderdataPython |
1898348 | #!/usr/bin/env python
"""Tests for `statdepth` package."""
import unittest
import pandas as pd
import numpy as np
from statdepth import *
from statdepth.testing import *
from statdepth.homogeneity import *
class TestStatdepth(unittest.TestCase):
"""Tests for `statdepth` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_functional(self):
"""Test something."""
df = generate_noisy_univariate()
bd = FunctionalDepth([df], containment='r2')
self.assertIsInstance(bd, pd.Series)
self.assertIsInstance(bd.ordered(), pd.Series)
self.assertIsInstance(bd.median(), pd.Series)
self.assertIsInstance(bd.deepest(n=2), pd.Series)
self.assertIsInstance(bd.outlying(n=2), pd.Series)
self.assertIsInstance(FunctionalDepth([df], K=5, containment='r2'), pd.Series)
def test_pointcloud_l1(self):
df = generate_noisy_pointcloud(n=10, d=2)
bd = PointcloudDepth(df, containment='l1')
self.assertIsInstance(bd, pd.Series)
self.assertIsInstance(bd.ordered(), pd.Series)
self.assertIsInstance(bd.median(), pd.Series)
self.assertIsInstance(bd.deepest(n=2), pd.Series)
self.assertIsInstance(bd.outlying(n=2), pd.Series)
self.assertIsInstance(PointcloudDepth(df, K=2, containment='l1'), pd.Series)
def test_pointcloud_simplex(self):
df = generate_noisy_pointcloud(n=10, d=2)
bd = PointcloudDepth(df, containment='simplex')
self.assertIsInstance(bd, pd.Series)
self.assertIsInstance(bd.ordered(), pd.Series)
self.assertIsInstance(bd.median(), pd.Series)
self.assertIsInstance(bd.deepest(n=2), pd.Series)
self.assertIsInstance(bd.outlying(n=2), pd.Series)
self.assertIsInstance(PointcloudDepth(df, K=2, containment='simplex'), pd.Series)
def test_pointcloud_oja(self):
df = generate_noisy_pointcloud(n=20, d=2)
bd = PointcloudDepth(df, containment='oja')
self.assertIsInstance(bd, pd.Series)
self.assertIsInstance(bd.ordered(), pd.Series)
self.assertIsInstance(bd.median(), pd.Series)
self.assertIsInstance(bd.deepest(n=2), pd.Series)
self.assertIsInstance(bd.outlying(n=2), pd.Series)
self.assertIsInstance(PointcloudDepth(df, K=2, containment='oja'), pd.Series)
def test_multivariate(self):
data = generate_noisy_multivariate()
bd = FunctionalDepth(data, containment='simplex')
self.assertIsInstance(bd, pd.Series)
self.assertIsInstance(bd.ordered(), pd.Series)
if __name__ == "__main__":
unittest.main() | StarcoderdataPython |
4840878 | from gi.repository import Gtk, Gio, Gdk, GObject, Pango
from asciiplayback import *
from asciimation import *
class GtkASCIIPlayer(Gtk.Box):
def __init__(self, player):
Gtk.Box.__init__(self, orientation=Gtk.Orientation.VERTICAL)
self.player = player
labelbox = Gtk.Box()
label = Gtk.Label()
label.set_alignment(0, 0)
label.set_width_chars(self.player.asciimation.size[0])
label.set_max_width_chars(self.player.asciimation.size[0])
self._label_attrs = Pango.AttrList()
self._label_attrs.insert(Pango.attr_size_new(self.player.asciimation.font_size*Pango.SCALE))
self._label_attrs.insert(Pango.attr_family_new(self.player.asciimation.font_family + ",monospace"))
label.set_attributes(self._label_attrs)
labelbox.pack_start(label, True, False, 0)
self.pack_start(labelbox, True, False, 0)
self.do_animate(label)
def do_animate(self, widget):
asciiframe = self.player.next_frame()
# Process the string to make each line the correct length, and ensure
# that there is exactly the right number of lines. Perhaps this
# should be somewhere in the model code...
text = []
for line in asciiframe.text.split('\n'):
text.append("{{: <{}s}}".format(self.player.asciimation.size[0]).format(line))
text[-1] = text[-1][:self.player.asciimation.size[0]]
while len(text) < self.player.asciimation.size[1]:
text.append(' '*self.player.asciimation.size[0])
text = '\n'.join(text[:self.player.asciimation.size[1]])
# Draw the string and background
fg = Gdk.Color.parse(asciiframe.foreground_color)[1].to_floats()
fg_pango = (int(x * 65535) for x in fg)
self._label_attrs.change(Pango.attr_foreground_new(*fg_pango))
widget.set_attributes(self._label_attrs)
widget.set_text(text)
widget.modify_bg(Gtk.StateType.NORMAL, Gdk.Color.parse(asciiframe.background_color)[1])
GObject.timeout_add(self.player.asciimation.speed, self.do_animate, widget)
class GtkASCIIControls(Gtk.Box):
def __init__(self, player):
Gtk.Box.__init__(self, orientation=Gtk.Orientation.HORIZONTAL)
Gtk.StyleContext.add_class(self.get_style_context(), "linked")
self.player = player
btn_previous = Gtk.Button(image=Gtk.Image.new_from_gicon(Gio.ThemedIcon(
name="media-skip-backward-symbolic"),
Gtk.IconSize.BUTTON))
btn_previous.connect("clicked", self.do_previous)
self.add(btn_previous)
btn_rewind = Gtk.Button(image=Gtk.Image.new_from_gicon(Gio.ThemedIcon(
name="media-seek-backward-symbolic"),
Gtk.IconSize.BUTTON))
btn_rewind.connect("clicked", self.do_rewind)
self.add(btn_rewind)
self.btn_play = Gtk.Button()
self.set_play_button_icon()
self.btn_play.connect("clicked", self.do_play)
self.add(self.btn_play)
btn_forward = Gtk.Button(image=Gtk.Image.new_from_gicon(Gio.ThemedIcon(
name="media-seek-forward-symbolic"),
Gtk.IconSize.BUTTON))
btn_forward.connect("clicked", self.do_forward)
self.add(btn_forward)
btn_next = Gtk.Button(image=Gtk.Image.new_from_gicon(Gio.ThemedIcon(
name="media-skip-forward-symbolic"),
Gtk.IconSize.BUTTON))
btn_next.connect("clicked", self.do_next)
self.add(btn_next)
def do_previous(self, button):
self.player.to_start()
self.set_play_button_icon()
def do_rewind(self, button):
self.player.rewind()
self.set_play_button_icon()
def do_play(self, button):
self.player.toggle_playing()
self.set_play_button_icon()
def do_forward(self, button):
self.player.fast_forward()
self.set_play_button_icon()
def do_next(self, button):
self.player.to_end()
self.set_play_button_icon()
def set_play_button_icon(self):
if self.player.speed == 0:
self.btn_play.set_image(Gtk.Image.new_from_gicon(Gio.ThemedIcon(
name="media-playback-start-symbolic"), Gtk.IconSize.BUTTON))
else:
self.btn_play.set_image(Gtk.Image.new_from_gicon(Gio.ThemedIcon(
name="media-playback-pause-symbolic"), Gtk.IconSize.BUTTON))
| StarcoderdataPython |
9752258 | from chispa import assert_df_equality
from cishouseholds.derive import assign_any_symptoms_around_visit
def test_assign_any_symptoms_around_visit(spark_session):
expected_df = spark_session.createDataFrame(
data=[
(1, "No", 1, "2020-07-20", "Yes"),
(2, "No", 1, "2020-07-20", "No"),
(3, "Yes", 2, "2020-02-18", "Yes"),
(1, "Yes", 2, "2020-08-20", "Yes"),
(2, "No", 3, "2020-08-20", "Yes"),
(3, "No", 3, "2020-03-18", "Yes"),
(1, "Yes", 3, "2020-09-20", "Yes"),
(2, "Yes", 4, "2020-09-20", "Yes"),
(3, "No", 4, "2020-04-18", "No"),
],
schema="id integer, symptoms string, visit_id integer, visit_date string, result string",
)
output_df = assign_any_symptoms_around_visit(
df=expected_df.drop("result"),
column_name_to_assign="result",
symptoms_bool_column="symptoms",
id_column="id",
visit_date_column="visit_date",
visit_id_column="visit_id",
)
assert_df_equality(output_df, expected_df, ignore_nullable=True, ignore_row_order=True)
| StarcoderdataPython |
180071 | from .ddr import *
from .ndt import *
from .nnb import *
from .rnn import *
from .wnd import *
from .fcnn import *
from .linear import *
from .tree_dnn import *
from .transformer import *
from .base import ModelBase
| StarcoderdataPython |
8190806 | <gh_stars>10-100
from zipfile import ZipFile
import requests
from tcrdist import paths
__all__ = ['download_and_extract_zip_file']
"""
python -c "from tcrdist.setup_tests import *; download_and_extract_zip_file('bulk.csv.zip')"
python -c "from tcrdist.setup_tests import *; download_and_extract_zip_file('dash.zip')"
python -c "from tcrdist.setup_tests import *; download_and_extract_zip_file('sant.csv.zip')"
"""
# public facing data files, L looksup url based for dropbox and aws
L = {"dash.zip":
{'dropbox': {
'url' : "https://www.dropbox.com/s/pce3f9816ntzjki/dash.zip?dl=1"},
'aws': {
'url' : None}
},
"bulk.zip":
{'dropbox': {
'url' : "https://www.dropbox.com/s/4yy9110al33ckh7/bulk.zip?dl=1"},
'aws': {
'url' : None}
},
"olga_T_alpha_beta_1000K_simulated_cdr3.zip":
{'dropbox': {
'url' : "https://www.dropbox.com/s/6qcxs3ylmczyfk7/olga_T_alpha_beta_1000K_simulated_cdr3.zip?dl=1"},
'aws': {
'url' : None}
},
"cdr3_beta_500K.zip":
{'dropbox': {
'url' : "https://www.dropbox.com/s/yevk0rus1dqnzcg/cdr3_beta_500K.zip?dl=1"},
'aws': {
'url' : None}
},
"human_T_alpha_beta_sim200K.zip":
{'dropbox': {
'url' : "https://www.dropbox.com/s/jjnon2x8qt0qk4y/human_T_alpha_beta_sim200K.zip?dl=1"},
'aws': {
'url' : None}
},
"vdjDB_PMID28636592.zip":
{'dropbox': {
'url' : "https://www.dropbox.com/s/mmjyi8i3p1ps3qq/vdjDB_PMID28636592.zip?dl=1"},
'aws': {
'url' : None}
},
"sant.csv.zip":
{'dropbox': {
'url' : "https://www.dropbox.com/s/8p3djrdd270ad0n/sant.csv.zip?dl=1"},
'aws': {
'url' : None}
},
"bulk.csv.zip":
{'dropbox': {
'url' : "https://www.dropbox.com/s/g6k2h1ed5d5sabz/bulk.csv.zip?dl=1"},
'aws': {
'url' : None}
},
"wiraninha_sampler.zip":
{'dropbox': {
'url' : "https://www.dropbox.com/s/ily0td3tn1uc7bi/wiraninha_sampler.zip?dl=1"},
'aws': {
'url' : None}
},
"ruggiero_mouse_sampler.zip":
{'dropbox':{
'url' : "https://www.dropbox.com/s/yz8v1c1gf2eyzxk/ruggiero_mouse_sampler.zip?dl=1"},
'aws': {
'url' : None}
},
"ruggiero_human_sampler.zip":
{'dropbox':{
'url' : "https://www.dropbox.com/s/jda6qtemk65zlfk/ruggiero_human_sampler.zip?dl=1"},
'aws': {
'url' : None}
},
"britanova_human_beta_t_cb.tsv.sampler.tsv.zip":
{'dropbox':{
'url' : "https://www.dropbox.com/s/87n5v2by80xhy1q/britanova_human_beta_t_cb.tsv.sampler.tsv.zip?dl=1"},
'aws': {
'url' : None}
},
"emerson_human_beta_t_cmvneg.tsv.sampler.tsv.zip":
{'dropbox':{
'url' : "https://www.dropbox.com/s/04mxrzw7f5wkg1x/emerson_human_beta_t_cmvneg.tsv.sampler.tsv.zip?dl=1"},
'aws': {
'url' : None}
},
"ruggiero_human_alpha_t.tsv.sampler.tsv.zip":
{'dropbox':{
'url' : "https://www.dropbox.com/s/9h84bzhd0asfym7/ruggiero_human_alpha_t.tsv.sampler.tsv.zip?dl=1"},
'aws': {
'url' : None}
},
"ruggiero_human_beta_t.tsv.sampler.tsv.zip":
{'dropbox':{
'url' : "https://www.dropbox.com/s/onr5lntmlm4fivi/ruggiero_human_beta_t.tsv.sampler.tsv.zip?dl=1"},
'aws': {
'url' : None}
},
"ImmunoSeq_MIRA_matched_tcrdist3_ready.zip":
{'dropbox':{
'url' : "https://www.dropbox.com/s/1vma8opj0yqts9e/ImmunoSeq_MIRA_matched_tcrdist3_ready.zip?dl=1"},
'aws': {
'url' : None}
},
"ImmunoSeq_MIRA_matched_tcrdist3_ready_2_files.zip":
{'dropbox':{
'url' : "https://www.dropbox.com/s/qrjawanmrklts70/ImmunoSeq_MIRA_matched_tcrdist3_ready_2_files.zip?dl=1"},
'aws': {
'url' : None}
},
'bioRxiv_v2_metaclonotypes.tsv.zip':
{'dropbox':{
'url' : "https://www.dropbox.com/s/hpt1ropv7u02eqr/bioRxiv_v2_metaclonotypes.tsv.zip?dl=1"},
'aws': {
'url' : None}
},
'ImmunoSEQhsTCRBV4b_tcrdist3.zip':
{'dropbox':{
'url' : "https://www.dropbox.com/s/22iyel9uyzy7zyq/ImmunoSEQhsTCRBV4b_tcrdist3.zip?dl=1"},
'aws': {
'url' : None}
},
'2021-04-02-Release_v2.1_metaclonotypes_concise.tsv.zip':
{'dropbox':{
'url': "https://www.dropbox.com/s/6to8fsga8k5twdr/2021-04-02-Release_v2.1_metaclonotypes_concise.tsv.zip?dl=1"},
'aws':{
'url' : None}
},
'2021-04-02-Release_v2.1_TCRs_concise_covid_only.tsv.zip':
{'dropbox' : {
'url' : "https://www.dropbox.com/s/60i0reiv7utr8hw/2021-04-02-Release_v2.1_TCRs_concise_covid_only.tsv.zip?dl=1"},
'aws':{
'url' : None}
}
}
def list_available_zip_files():
"""
List all available zip files downloadable from tcrdist3.
Returns
-------
List of zipfile names that can be passed to zipfile argument in download_and_extract_zip_file()
"""
return [k for k in L.keys()]
def get_url(zipfile, source = 'dropbox'):
"""
Lookup the url associatd with a zipfile
Returns
-------
url : str
"""
url = L[zipfile][source]['url']
return url
def download_and_extract_zip_file(zipfile = None, source = 'dropbox', dest = paths.path_to_base):
"""
Downloads and extracts a zip file to destination folder.
Uses functions from **requests** and **Zipfile**, part of the Python Standard Library, to avoid the
platform independent use of wget, curl, gunzip, etc.
Parameters
----------
zipfile : str
Name of zip file see (list_available_zip_files() for current list)
source : str
The host source name where the file will be downloaded from. Currently 'dropbox'
is the only aviable option but 'aws' will be available on release >= 1.0.0
dest : str
path where the files to be saved and unzipped
"""
url = get_url(zipfile, source = source)
r = requests.get(url)
with open(zipfile,'wb') as f:
f.write(r.content)
with ZipFile(zipfile, 'r') as zipObj:
# Extract all the contents of zip file in different directory
zipObj.extractall(dest)
def download_and_extract_directly_from_url(zipfile, url,dest = paths.path_to_base):
"""
Advanced feature allowing downloads from URLs not prespecified in list above.
Users who want to add downloads to public URLs may find this useful; However,
this is not recommended unless you want something not yet available in package test files.
You can check list_available_zip_files() to see if the name and url of the file
you want is already prespecified as a test or demonstration file.
Whereas if you use this direct option, filenames are not strictly enforced.
Parameters
----------
zipfile : str
Name of zip file
url : str
User can directly provide the url they wish to use (advanced option)
dest : str
path where the files to be saved and unzipped
Example
-------
from tcrdist.setup_tests import download_and_extract_directly_from_url
download_and_extract_directly_from_url(zipfile= "2021-04-02-Release_v2.1_TCRs_concise_covid_only.tsv.zip",
url = "https://www.dropbox.com/s/60i0reiv7utr8hw/2021-04-02-Release_v2.1_TCRs_concise_covid_only.tsv.zip?dl=1")
"""
r = requests.get(url)
with open(zipfile,'wb') as f:
f.write(r.content)
with ZipFile(zipfile, 'r') as zipObj:
# Extract all the contents of zip file in different directory
zipObj.extractall(dest)
| StarcoderdataPython |
3469143 | #!/usr/bin/env python3
'''
$ argument_group.py --help
> usage: argument_group.py --flag1 [--flag2] [-h] pos1 [pos2] [args [args ...]]
>
> positional arguments:
> args
>
> optional arguments:
> -h, --help show this help message and exit
>
> Group #1:
> First group.
>
> pos1 First positional argument
> --flag1 First flag
>
> Group #2:
> pos2 Second positional argument
> --flag2 Second flag
$ argument_group.py 123 # returncode=2 stderr=True
usage: argument_group.py --flag1 [--flag2] [-h] pos1 [pos2] [args [args ...]]
argument_group.py: error: one of the arguments --flag1 is required
$ argument_group.py 123 --noflag1 --flag2
pos1='123' pos2=None args=() flag1=False flag2=True
'''
from hashbang import command, Argument
class Group:
def __init__(self, name, *, title=None, description=None):
self.name = name
self.title = title
self.description = description
self.group = None
def apply_hashbang_extension(self, cmd):
cmd.__groups = getattr(cmd, '_Group__groups', None) or {}
if self.name not in cmd.__groups:
cmd.__groups[self.name] = self
def create_group(self, parser):
if self.group is None:
self.group = parser.add_argument_group(
self.title, self.description)
return self.group
class GroupArgument(Argument):
def __init__(self, name, *args, group=None, **kwargs):
super().__init__(name, *args, **kwargs)
if group is None:
raise RuntimeError('Group name cannot be none')
self.group = group
def add_argument(self, cmd, parser, param):
parser = cmd._Group__groups[self.group].create_group(parser)
super().add_argument(cmd, parser, param)
@command(
Group('one', title='Group #1', description='First group.'),
Group('two', title='Group #2'),
GroupArgument('pos1', group='one', help='First positional argument'),
GroupArgument('pos2', group='two', help='Second positional argument'),
GroupArgument('flag1', group='one', help='First flag', required=True),
GroupArgument('flag2', group='two', help='Second flag'))
def main(pos1, pos2=None, *args, flag1=False, flag2=False):
print('pos1={} pos2={} args={} flag1={} flag2={}'.format(
*map(repr, (pos1, pos2, args, flag1, flag2))))
if __name__ == '__main__':
main.execute()
| StarcoderdataPython |
6551070 | # Define a bazel macro that creates cc_test for re2.
def re2_test(name, deps=[]):
native.cc_test(
name=name,
srcs=["re2/testing/%s.cc" % (name)],
deps=[
":re2",
":test",
] + deps
)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.