id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
3327528 | ############################################################################
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############################################################################
# pylint: disable=locally-disabled, invalid-name, missing-docstring
"""Intel win32 compiler configurations release
"""
from parts.config import ConfigValues, configuration
def map_default_version(env):
return env['INTELC_VERSION']
config = configuration(map_default_version)
config.VersionRange("7-*",
append=ConfigValues(
CCFLAGS=[
# Compile using multiple processes
'/MP',
# SDL: Stack-based Buffer Overrun Detection
'/GS-',
# minimize size
'/O1',
# allow non standard comment in C
'/wd991',
# typedef forward with the same name
'/wd344',
# disable language extensions
'/Za',
# Use multi-thread static libc
'/MT',
# treat all warnings as errors
'/Wall',
'/WX',
'/nologo'],
CXXFLAGS=[
'/EHsc',
# disable RTTI
'/GR-'],
LINKFLAGS=[
# no default libraries
'/NODEFAULTLIB',
# prevent linker from references _main in dll
'/NOENTRY',
# eliminate unreferenced functions + data
'/OPT:REF',
# SDL: Data Execution Prevention
'/NXCOMPAT',
# SDL: Image Randomization
'/DYNAMICBASE',
# treat linker warnings as errors
'/WX',
'/nologo'
],
CPPDEFINES=['NDEBUG']
)
)
| StarcoderdataPython |
3371982 | <filename>app/utils.py
import math
# from https://stackoverflow.com/questions/24727773/detecting-rectangle-collision-with-a-circle
def collision_rect_circle(rleft, rtop, width, height,
center_x, center_y, radius):
rright, rbottom = rleft + width, rtop + height
cleft, ctop = center_x-radius, center_y-radius
cright, cbottom = center_x+radius, center_y+radius
if rright < cleft or rleft > cright or rbottom < ctop or rtop > cbottom:
return False
for x in (rleft, rleft+width):
for y in (rtop, rtop+height):
if math.hypot(x-center_x, y-center_y) <= radius:
return True
if rleft <= center_x <= rright and rtop <= center_y <= rbottom:
return True
return False
def sigmoid(x):
return 1 / (1 + math.exp(-x))
| StarcoderdataPython |
1694415 | from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
handler404 = 'public_api_site.api.views.default'
urlpatterns = patterns('',
# Documentation says location vs locations - adding until it's figured out
# Judas (ston) 6/29
(r'^api/location[/]$', 'public_api_site.api.views.locations'),
(r'^api/speakers[/]$', 'public_api_site.api.views.speakers'),
(r'^api/talks[/]$', 'public_api_site.api.views.talks'),
(r'^api/interests[/]$', 'public_api_site.api.views.interests'),
(r'^api/stats[/]$', 'public_api_site.api.views.stats'),
(r'^api/users[/]$', 'public_api_site.api.views.users'),
(r'^$','public_api_site.api.views.default'),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/(.*)', admin.site.root),
)
| StarcoderdataPython |
1765505 | <filename>starminder.py<gh_stars>10-100
#!/usr/bin/env python
"""Main Starminder script."""
from datetime import datetime
import random
from typing import Callable, Optional, Union
import boto3
from emoji import emojize
from github import Github
from github.AuthenticatedUser import AuthenticatedUser
from github.Repository import Repository
from jinja2 import Template
from loguru import logger
import mistune # type: ignore
from constants import (
AWS_ACCESS_KEY_ID,
AWS_FROM,
AWS_SECRET_ACCESS_KEY,
GITHUB_FORK_URL,
GITHUB_TOKEN,
STARMINDER_COUNT,
STARMINDER_RECIPIENT,
STARMINDER_SUBJECT,
TEMPLATE_PATH,
)
TODAY = datetime.utcnow().date().strftime("%A, %-d %B, %Y")
SUBJECT = STARMINDER_SUBJECT.substitute(today=TODAY)
StarData = list[dict[str, Optional[Union[str, int]]]]
EmailData = dict[str, Union[str, object]]
SendFunction = Callable[[str, str, str, str], None]
def gh_init() -> Github:
"""Initialize GitHub connection object."""
logger.info("Initializing GitHub connection")
gh = Github(GITHUB_TOKEN)
logger.debug("Initialized GitHub connection successfully")
return gh
def get_user(gh: Github) -> AuthenticatedUser:
"""Retrieve user for given GitHub connection object."""
logger.info("Fetching user")
user = gh.get_user()
logger.debug(f"Fetched user {user.login} successfully")
return user
def get_stars(user: AuthenticatedUser) -> list[Repository]:
"""Retrieve given user's starred repositories."""
logger.info("Fetching stars")
stars = list(user.get_starred())
logger.debug(f"Fetched {len(stars)} stars successfully")
return stars
def reconcile_count(stars: list[Repository], user_count: int) -> int:
"""Decide maximum number of stars based on setting and number of repositories."""
logger.info("Reconciling count")
if len(stars) >= user_count:
logger.debug("Reconciled count to setting")
count = user_count
else:
logger.debug("Reconciled count to number of stars")
count = len(stars)
logger.debug(f"Reconciled count: {count}")
return count
def randomize_stars(stars: list[Repository], count: int) -> list[Repository]:
"""Retrieve random stars from given list."""
logger.info("Randomizing stars")
random_stars = random.sample(stars, count)
logger.debug(f"Randomized {count} stars")
return random_stars
def generate_star_data(stars: list[Repository]) -> StarData:
"""Generate star data for email template."""
logger.info("Generating star email data")
data = [
{
"full_name": star.full_name,
"description": emojize(star.description or "", use_aliases=True) or None,
"url": star.html_url,
"homepage": star.homepage or None,
"stargazers_count": star.stargazers_count,
# watchers are really subscribers, thanks to GitHub API weirdness:
# https://developer.github.com/changes/2012-09-05-watcher-api/
"watchers_count": star.subscribers_count,
}
for star in stars
]
logger.debug(f"Generated star email data: {data}")
return data
def generate_name(user: AuthenticatedUser) -> str:
"""Generate user name for greeting from name and username."""
logger.info("Generating user name")
user_name = user.login
if user.name:
user_name = f"{user.name} ({user.login})"
logger.debug(f"Generated user name: {user_name}")
return user_name
def generate_email_data(user: AuthenticatedUser, star_data: StarData) -> EmailData:
"""Generate all data for email template."""
logger.info("Generating email data")
data = {
"user_name": generate_name(user),
"today": TODAY,
"stars": star_data,
"fork_url": GITHUB_FORK_URL,
}
logger.debug(f"Generated email data: {data}")
return data
def generate_email_md(data: EmailData) -> str:
"""Generate raw email Markdown."""
logger.info("Generating email Markdown")
template = Template(TEMPLATE_PATH.read_text())
markdown = template.render(**data)
logger.debug(f"Generated email Markdown: {markdown}")
return markdown
def generate_email_html(markdown: str) -> str:
"""Generate HTML email contents from Markdown."""
logger.info("Generating email HTML")
html: str = mistune.html(markdown)
logger.debug(f"Generated email HTML: {html}")
return html
def send_email(text: str, html: str, subject: str, recipient: str) -> None:
"""Send email via SES."""
logger.info("Sending email via SES")
client = boto3.client(
"ses",
region_name="us-east-1",
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
)
email_kwargs = {
"Source": AWS_FROM,
"Destination": {"ToAddresses": [recipient]},
"Message": {
"Subject": {"Data": subject},
"Body": {"Text": {"Data": text}, "Html": {"Data": html}},
},
"ReplyToAddresses": [AWS_FROM],
}
client.send_email(**email_kwargs)
logger.debug("Sent email via SES")
def reconcile_send_email_function() -> SendFunction:
"""Decide whether to use the built-in or custom send_email implementation."""
logger.info("Reconciling send_email function")
try:
from custom import send_email as custom_send_email # type: ignore
except (ImportError, ModuleNotFoundError):
logger.debug("Reconciled send_email to built-in")
send_function = send_email
else:
logger.debug("Reconciled send_email to custom")
send_function = custom_send_email
return send_function
def starminder() -> None:
"""Execute Starminder."""
logger.info("Running Starminder")
# auth
gh = gh_init()
user = get_user(gh)
# fetch all stars
all_stars = get_stars(user)
# decide how many stars
count = reconcile_count(all_stars, STARMINDER_COUNT)
# pick stars
random_stars = randomize_stars(all_stars, count)
# generate email data
star_data = generate_star_data(random_stars)
email_data = generate_email_data(user, star_data)
# generate email contents from template
email_md = generate_email_md(email_data)
email_html = generate_email_html(email_md)
# send email
send_email_function = reconcile_send_email_function()
send_email_function(email_md, email_html, SUBJECT, STARMINDER_RECIPIENT)
if __name__ == "__main__":
logger.info("Running script: %s" % __file__)
starminder()
logger.debug("Finished running script")
| StarcoderdataPython |
2480 | <reponame>XiaoboLinlin/scattering
import itertools as it
import numpy as np
import mdtraj as md
from progressbar import ProgressBar
from scattering.utils.utils import get_dt
from scattering.utils.constants import get_form_factor
def compute_van_hove(trj, chunk_length, water=False,
r_range=(0, 1.0), bin_width=0.005, n_bins=None,
self_correlation=True, periodic=True, opt=True, partial=False):
"""Compute the partial van Hove function of a trajectory
Parameters
----------
trj : mdtraj.Trajectory
trajectory on which to compute the Van Hove function
chunk_length : int
length of time between restarting averaging
water : bool
use X-ray form factors for water that account for polarization
r_range : array-like, shape=(2,), optional, default=(0.0, 1.0)
Minimum and maximum radii.
bin_width : float, optional, default=0.005
Width of the bins in nanometers.
n_bins : int, optional, default=None
The number of bins. If specified, this will override the `bin_width`
parameter.
self_correlation : bool, default=True
Whether or not to include the self-self correlations
Returns
-------
r : numpy.ndarray
r positions generated by histogram binning
g_r_t : numpy.ndarray
Van Hove function at each time and position
"""
n_physical_atoms = len([a for a in trj.top.atoms if a.element.mass > 0])
unique_elements = list(set([a.element for a in trj.top.atoms if a.element.mass > 0]))
partial_dict = dict()
for elem1, elem2 in it.combinations_with_replacement(unique_elements[::-1], 2):
print('doing {0} and {1} ...'.format(elem1, elem2))
r, g_r_t_partial = compute_partial_van_hove(trj=trj,
chunk_length=chunk_length,
selection1='element {}'.format(elem1.symbol),
selection2='element {}'.format(elem2.symbol),
r_range=r_range,
bin_width=bin_width,
n_bins=n_bins,
self_correlation=self_correlation,
periodic=periodic,
opt=opt)
partial_dict[(elem1, elem2)] = g_r_t_partial
if partial:
return partial_dict
norm = 0
g_r_t = None
for key, val in partial_dict.items():
elem1, elem2 = key
concentration1 = trj.atom_slice(trj.top.select('element {}'.format(elem1.symbol))).n_atoms / n_physical_atoms
concentration2 = trj.atom_slice(trj.top.select('element {}'.format(elem2.symbol))).n_atoms / n_physical_atoms
form_factor1 = get_form_factor(element_name=elem1.symbol, water=water)
form_factor2 = get_form_factor(element_name=elem2.symbol, water=water)
coeff = form_factor1 * concentration1 * form_factor2 * concentration2
if g_r_t is None:
g_r_t = np.zeros_like(val)
g_r_t += val * coeff
norm += coeff
# Reshape g_r_t to better represent the discretization in both r and t
g_r_t_final = np.empty(shape=(chunk_length, len(r)))
for i in range(chunk_length):
g_r_t_final[i, :] = np.mean(g_r_t[i::chunk_length], axis=0)
g_r_t_final /= norm
t = trj.time[:chunk_length]
return r, t, g_r_t_final
def compute_partial_van_hove(trj, chunk_length=10, selection1=None, selection2=None,
r_range=(0, 1.0), bin_width=0.005, n_bins=200,
self_correlation=True, periodic=True, opt=True):
"""Compute the partial van Hove function of a trajectory
Parameters
----------
trj : mdtraj.Trajectory
trajectory on which to compute the Van Hove function
chunk_length : int
length of time between restarting averaging
selection1 : str
selection to be considered, in the style of MDTraj atom selection
selection2 : str
selection to be considered, in the style of MDTraj atom selection
r_range : array-like, shape=(2,), optional, default=(0.0, 1.0)
Minimum and maximum radii.
bin_width : float, optional, default=0.005
Width of the bins in nanometers.
n_bins : int, optional, default=None
The number of bins. If specified, this will override the `bin_width`
parameter.
self_correlation : bool, default=True
Whether or not to include the self-self correlations
Returns
-------
r : numpy.ndarray
r positions generated by histogram binning
g_r_t : numpy.ndarray
Van Hove function at each time and position
"""
unique_elements = (
set([a.element for a in trj.atom_slice(trj.top.select(selection1)).top.atoms]),
set([a.element for a in trj.atom_slice(trj.top.select(selection2)).top.atoms]),
)
if any([len(val) > 1 for val in unique_elements]):
raise UserWarning(
'Multiple elements found in a selection(s). Results may not be '
'direcitly comprable to scattering experiments.'
)
# Don't need to store it, but this serves to check that dt is constant
dt = get_dt(trj)
pairs = trj.top.select_pairs(selection1=selection1, selection2=selection2)
n_chunks = int(trj.n_frames / chunk_length)
g_r_t = None
pbar = ProgressBar()
for i in pbar(range(n_chunks)):
times = list()
for j in range(chunk_length):
times.append([chunk_length*i, chunk_length*i+j])
r, g_r_t_frame = md.compute_rdf_t(
traj=trj,
pairs=pairs,
times=times,
r_range=r_range,
bin_width=bin_width,
n_bins=n_bins,
period_length=chunk_length,
self_correlation=self_correlation,
periodic=periodic,
opt=opt,
)
if g_r_t is None:
g_r_t = np.zeros_like(g_r_t_frame)
g_r_t += g_r_t_frame
return r, g_r_t
| StarcoderdataPython |
121582 | """Clowder API
This module provides simple wrappers around the clowder Collections API
"""
import json
import logging
import requests
from pyclowder.utils import StatusMessage
def create_empty(connector, host, key, collectionname, description, parentid=None, spaceid=None):
"""Create a new collection in Clowder.
Keyword arguments:
connector -- connector information, used to get missing parameters and send status updates
host -- the clowder host, including http and port, should end with a /
key -- the secret key to login to clowder
collectionname -- name of new dataset to create
description -- description of new dataset
parentid -- id of parent collection
spaceid -- id of the space to add dataset to
"""
logger = logging.getLogger(__name__)
if parentid:
if (spaceid):
url = '%sapi/collections/newCollectionWithParent?key=%s' % (host, key)
result = requests.post(url, headers={"Content-Type": "application/json"},
data=json.dumps({"name": collectionname, "description": description,
"parentId": [parentid], "space": spaceid}),
verify=connector.ssl_verify if connector else True)
else:
url = '%sapi/collections/newCollectionWithParent?key=%s' % (host, key)
result = requests.post(url, headers={"Content-Type": "application/json"},
data=json.dumps({"name": collectionname, "description": description,
"parentId": [parentid]}),
verify=connector.ssl_verify if connector else True)
else:
if (spaceid):
url = '%sapi/collections?key=%s' % (host, key)
result = requests.post(url, headers={"Content-Type": "application/json"},
data=json.dumps({"name": collectionname, "description": description,
"space": spaceid}),
verify=connector.ssl_verify if connector else True)
else:
url = '%sapi/collections?key=%s' % (host, key)
result = requests.post(url, headers={"Content-Type": "application/json"},
data=json.dumps({"name": collectionname, "description": description}),
verify=connector.ssl_verify if connector else True)
result.raise_for_status()
collectionid = result.json()['id']
logger.debug("collection id = [%s]", collectionid)
return collectionid
def get_child_collections(connector, host, key, collectionid):
"""Get list of child collections in collection by UUID.
Keyword arguments:
connector -- connector information, used to get missing parameters and send status updates
host -- the clowder host, including http and port, should end with a /
key -- the secret key to login to clowder
collectionid -- the collection to get children of
"""
url = "%sapi/collections/%s/getChildCollections?key=%s" % (host, collectionid, key)
result = requests.get(url,
verify=connector.ssl_verify if connector else True)
result.raise_for_status()
return json.loads(result.text)
def get_datasets(connector, host, key, collectionid):
"""Get list of datasets in collection by UUID.
Keyword arguments:
connector -- connector information, used to get missing parameters and send status updates
host -- the clowder host, including http and port, should end with a /
key -- the secret key to login to clowder
datasetid -- the collection to get datasets of
"""
url = "%sapi/collections/%s/datasets?key=%s" % (host, collectionid, key)
result = requests.get(url,
verify=connector.ssl_verify if connector else True)
result.raise_for_status()
return json.loads(result.text)
# pylint: disable=too-many-arguments
def upload_preview(connector, host, key, collectionid, previewfile, previewmetadata):
"""Upload preview to Clowder.
Keyword arguments:
connector -- connector information, used to get missing parameters and send status updates
host -- the clowder host, including http and port, should end with a /
key -- the secret key to login to clowder
collectionid -- the file that is currently being processed
preview -- the file containing the preview
previewdata: any metadata to be associated with preview,
this can contain a section_id to indicate the
section this preview should be associated with.
"""
connector.status_update(StatusMessage.processing, {"type": "collection", "id": collectionid},
"Uploading collection preview.")
logger = logging.getLogger(__name__)
headers = {'Content-Type': 'application/json'}
# upload preview
url = '%sapi/previews?key=%s' % (host, key)
with open(previewfile, 'rb') as filebytes:
result = requests.post(url, files={"File": filebytes},
verify=connector.ssl_verify if connector else True)
result.raise_for_status()
previewid = result.json()['id']
logger.debug("preview id = [%s]", previewid)
# associate uploaded preview with original collection
if collectionid and not (previewmetadata and previewmetadata['section_id']):
url = '%sapi/collections/%s/previews/%s?key=%s' % (host, collectionid, previewid, key)
result = requests.post(url, headers=headers, data=json.dumps({}),
verify=connector.ssl_verify if connector else True)
result.raise_for_status()
# associate metadata with preview
if previewmetadata is not None:
url = '%sapi/previews/%s/metadata?key=%s' % (host, previewid, key)
result = requests.post(url, headers=headers, data=json.dumps(previewmetadata),
verify=connector.ssl_verify if connector else True)
result.raise_for_status()
return previewid
| StarcoderdataPython |
56880 | <filename>prodigy.py
#!/usr/bin/python3
import re
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", metavar = "FILE", help = "Please enter the file (with extension) that you wish to parse", default = "orfs.fa")
parser.add_argument("-o", "--output", metavar = "FILE", help = "This shall provide an output for observation provided the given input")
parser.add_argument("-c", "--choice", type=str, help = "Choose which enzyme to use out of Trypsin, Endoproteinase_LysC, Endoproteinase_Arg and V8_Proteinase", choices = ['Trypsin', 'Endoproteinase_LysC', 'Endoproteinase_Arg', 'V8_Proteinase'])
args = parser.parse_args()
def read_fasta(file):
"""Read and store the contents of files"""
# The point of this parser is to provide a function which allows for the storing of names and sequences provided
# by the appropriate 'name' and 'seq' variables. This is perpetuated by the presumption that the 'name' will be
# the title, or name of the sequence to be parsed, as indicated by the '>' symbol, indicating that it is a FASTA
# sequence. Should '>' not be the first character of a line, it will automatically be stored as sequence data.
# This function relies on the later definition of the variable 'file.' This should be an input provided by what
# the user would otherwise specify.
name, seq = None, [] #To store the sequences in a list for line in file:
line = line.rstrip() #To read the lines
if re.search(r">", line): #to read the FASTA formatted line
if name : yield (name, ''.join(seq))
name, seq = line, []
else:
seq.append(line) #To read the sequence line if name: yield(name, "\n".join(seq))
def Trypsin(sequence):
"""Cleave where K or R are present, except where either is adjacent to P"""
cleavage = [];
proteinNumber = 0;
Lysine_K = False; #Supposing that Lysine is not the previous letter, it should continue to read the sequence
Arginine_R = False; #Supposing Arginine was not the previous letter, it would continue to read the sequence
concatenatedSequences = []
#Loop through every letter of the sequence
for protein in sequence:
if ((protein != 'P') & (Lysine_K == True) or (Arginine_R == True)):
cleavage.append(proteinNumber - 1);
if protein == 'K':
Lysine_K = True; #If the programme finds K, it will cleave before K
else:
Lysine_K = False; #If K has not been found, it will continue to iterate
if protein == 'R':
Arginine_R = True; #If R has been found, there should be cleavage
else:
Arginine_R = False; #If R has not been found, it will continue to iterate
proteinNumber += 1
lastCut = 0;
for point in cleavage:
concatenatedSequences.append(sequence[lastCut:point]); #This should concatenate the sequences prior to the cleavage
lastCut = point;
return concatenatedSequences
def Endoproteinase_LysC(sequence):
"""Cleave where K is present, except when K is adjacent to P"""
cutPoints = [];
proteinNumber = 0;
Lysine_K = False; #Supposing that Lysine is not the previous letter, it should continue to read the sequence
concatenatedSequences = [];
#Loop through every letter of the sequence
for protein in sequence:
if ((protein != 'P') & (Lysine_K == True)):
cutPoints.append(proteinNumber - 1);
if protein == 'K':
Lysine_K = True; #If the programme finds K, it will cleave before K
else:
Lysine_K = False; #If K has not been found, it will continue to iterate over the sequence
proteinNumber += 1
lastCut = 0;
for point in cutPoints:
concatenatedSequences.append(sequence[lastCut:point]); #This should concatenate the sequences prior to the cleavage
lastCut = point;
return concatenatedSequences
def Endoproteinase_Arg(sequence):
"""Cleave where R is present, except when it is adjacent to P"""
cutPoints = [];
proteinNumber = 0;
Arginine_R = False; #Supposing that Arginine is not the previous letter, it should continue to read the sequence
concatenatedSequences = [];
#Loop through every letter of the sequence
for protein in sequence:
if ((protein != 'P') & (Arginine_R == True)):
cutPoints.append(proteinNumber - 1);
if protein == 'R':
Arginine_R = True; #If the programme finds R, it will cleave before R
else:
Arginine_R = False; #If R has not been found, it will continue to iterate over the sequence
proteinNumber += 1
lastCut = 0;
for point in cutPoints:
concatenatedSequences.append(sequence[lastCut:point]); #This should concatenate the sequences prior to the cleavage
lastCut = point;
return concatenatedSequences
def V8_Proteinase(sequence):
"""Cleave where E is present, except when adjacent to P."""
cutPoints = [];
proteinNumber = 0;
Glutamine_E = False; #Supposing that Glutamine is not the previous letter, it should continue to read the sequence
concatenatedSequences = [];
#Loop through every letter of the sequence
for protein in sequence:
if ((protein != 'P') & (Glutamine_E == True)):
cutPoints.append(proteinNumber - 1);
if protein == 'E':
Glutamine_E = True; #If the programme finds E, it will cleave before E
else:
Glutamine_E = False; #If E has not been found, it will continue to iterate over the sequence
proteinNumber += 1
lastCut = 0;
for point in cutPoints:
concatenatedSequences.append(sequence[lastCut:point]); #This should concatenate the sequences prior to the cleavage
lastCut = point;
return concatenatedSequences
enzymes = {Trypsin, Endoproteinase_LysC, Endoproteinase_Arg, V8_Proteinase} #Create a dictionary containing each of the enzymes
with open(args.input) as file:
for name, seq in read_fasta(file): + "\n"
if args.choice == "Trypsin":
for character in range(len(Trypsin(seq))):
t_output = (name + "\n" + (Trypsin(seq)[character])
output = open(args.output, "a+")
output.write(t_output)
if args.choice == "Endoproteinase_LysC":
for character in range(len(Endoproteinase_LysC(seq))):
c_output = (name + "\n" + (Endoproteinase_LysC(seq)[character]) + "\n")
ec_output = open(args.output, "a+")
ec_output.write(c_output)
if args.choice == "Endoproteinase_Arg":
for character in range(len(Endoproteinase_Arg(seq))):
arg_output = (name + "\n" + (Endoproteinase_Arg(seq)[character]) + "\n")
ea_output = open(args.output, "a+")
ea_output.write(arg_output)
if args.choice == "V8_Proteinase":
for character in range(len(V8_Proteinase(seq))):
v_output = (name + "\n" + (V8_Proteinase(seq)[character]) + "\n")
v8_output = open(args.output, "a+")
v8_output.write(v_output)
| StarcoderdataPython |
3231030 | <gh_stars>10-100
def a():
pass
# asdfasdf
def b():
pass
@dec1
@dec2
def a():
pass
# Foo
# Bar
def b():
pass
class Foo:
b = 0
def bar():
pass
def bar2():
pass
@decoratedclass
class Baz:
def zorp():
pass
def testing345():
pass
def b(n):
pass
def a():
pass
def b(n):
pass
def testing123():
pass
@decorator
def a():
print "testing 1"
# test comment
print "testing 2"
print "testing 3"
foo = 7
bar = 2
| StarcoderdataPython |
3382999 | # A constant to define how much folds should be used
N_FOLDS = 5
# A constant to define an epsilon value for avoiding division by zero
EPSILON = 1e-10
| StarcoderdataPython |
164294 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from iceberg.avro.codecs import DeflateCodec
from iceberg.avro.file import AvroFileHeader
def get_deflate_compressor():
header = AvroFileHeader(bytes(0), {"avro.codec": "deflate"}, bytes(16))
assert header.compression_codec() == DeflateCodec
def get_null_compressor():
header = AvroFileHeader(bytes(0), {"avro.codec": "null"}, bytes(16))
assert header.compression_codec() is None
def test_unknown_codec():
header = AvroFileHeader(bytes(0), {"avro.codec": "unknown"}, bytes(16))
with pytest.raises(ValueError) as exc_info:
header.compression_codec()
assert "Unsupported codec: unknown" in str(exc_info.value)
def test_missing_schema():
header = AvroFileHeader(bytes(0), {}, bytes(16))
with pytest.raises(ValueError) as exc_info:
header.get_schema()
assert "No schema found in Avro file headers" in str(exc_info.value)
| StarcoderdataPython |
3219413 | from cryptography.fernet import Fernet
key = b'<KEY>
input_file = 'test-criptato.txt'
output_file = 'test-decriptato.txt'
with open(input_file, 'rb') as f:
data = f.read()
fernet = Fernet(key)
encrypted = fernet.decrypt(data)
with open(output_file, 'wb') as f:
f.write(encrypted) | StarcoderdataPython |
1687361 | <filename>pub_data_visualization/production/load/entsoe/paths.py
"""
Folders where the raw production data provided by ENTSO-E
and the transformed dataframes are saved.
"""
import os
#
from .... import global_var
folder_production_entsoe_raw = os.path.join(global_var.path_public_data,
'11_ENTSOE',
'ActualGenerationOutputPerUnit',
)
fpath_production_entsoe_tmp = os.path.join(global_var.path_transformed,
'ENTSOE',
'ActualGenerationOutputPerUnit',
'ActualGenerationOutputPerUnit_{map_code}',
) | StarcoderdataPython |
4838373 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.port.base import Port
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
_log = logging.getLogger(__name__)
class ApplePort(Port):
"""Shared logic between all of Apple's ports."""
# This is used to represent the version of an operating system
# corresponding to the "mac" or "win" base LayoutTests/platform
# directory. I'm not sure this concept is very useful,
# but it gives us a way to refer to fallback paths *only* including
# the base directory.
# This is mostly done because TestConfiguration assumes that self.version()
# will never return None. (None would be another way to represent this concept.)
# Apple supposedly has explicit "future" results which are kept in an internal repository.
# It's possible that Apple would want to fix this code to work better with those results.
FUTURE_VERSION = 'future' # FIXME: This whole 'future' thing feels like a hack.
# overridden in subclasses
VERSION_FALLBACK_ORDER = []
ARCHITECTURES = []
@classmethod
def determine_full_port_name(cls, host, options, port_name):
options = options or {}
if port_name in (cls.port_name, cls.port_name + '-wk2'):
# If the port_name matches the (badly named) cls.port_name, that
# means that they passed 'mac' or 'win' and didn't specify a version.
# That convention means that we're supposed to use the version currently
# being run, so this won't work if you're not on mac or win (respectively).
# If you're not on the o/s in question, you must specify a full version or -future (cf. above).
assert host.platform.os_name in port_name, "%s is not in %s!" % (host.platform.os_name, port_name)
if port_name == cls.port_name and not getattr(options, 'webkit_test_runner', False):
port_name = cls.port_name + '-' + host.platform.os_version
else:
port_name = cls.port_name + '-' + host.platform.os_version + '-wk2'
elif getattr(options, 'webkit_test_runner', False) and '-wk2' not in port_name:
port_name += '-wk2'
return port_name
def _strip_port_name_prefix(self, port_name):
# Callers treat this return value as the "version", which only works
# because Apple ports use a simple name-version port_name scheme.
# FIXME: This parsing wouldn't be needed if port_name handling was moved to factory.py
# instead of the individual port constructors.
return port_name[len(self.port_name + '-'):]
def __init__(self, host, port_name, **kwargs):
super(ApplePort, self).__init__(host, port_name, **kwargs)
allowed_port_names = self.VERSION_FALLBACK_ORDER + [self.operating_system() + "-future"]
port_name = port_name.replace('-wk2', '')
self._version = self._strip_port_name_prefix(port_name)
assert port_name in allowed_port_names, "%s is not in %s" % (port_name, allowed_port_names)
def _skipped_file_search_paths(self):
# We don't have a dedicated Skipped file for the most recent version of the port;
# we just use the one in platform/{mac,win}
most_recent_name = self.VERSION_FALLBACK_ORDER[-1]
return set(filter(lambda name: name != most_recent_name, super(ApplePort, self)._skipped_file_search_paths()))
# FIXME: A more sophisticated version of this function should move to WebKitPort and replace all calls to name().
# This is also a misleading name, since 'mac-future' gets remapped to 'mac'.
def _port_name_with_version(self):
return self.name().replace('-future', '').replace('-wk2', '')
def _generate_all_test_configurations(self):
configurations = []
allowed_port_names = self.VERSION_FALLBACK_ORDER + [self.operating_system() + "-future"]
for port_name in allowed_port_names:
for build_type in self.ALL_BUILD_TYPES:
for architecture in self.ARCHITECTURES:
configurations.append(TestConfiguration(version=self._strip_port_name_prefix(port_name), architecture=architecture, build_type=build_type))
return configurations
| StarcoderdataPython |
57076 | #
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import tensor_util
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import dtypes
from intel_quantization.transform_graph.graph_transform_base import GraphTransformBase
class FuseColumnWiseMul(GraphTransformBase):
def __init__(self, input_pb):
super(FuseColumnWiseMul, self).__init__(input_pb)
def get_fuse_index(self, input_node_map, input_name_list):
fuseable_op_list = ['Conv2D', 'DepthwiseConv2dNative', 'MatMul']
fuse_op_name = {}
for node_index, node_name in enumerate(input_name_list):
node_op = input_node_map[node_name].op
if node_op == "Mul" and input_node_map[
input_node_map[node_name].
input[0]].op in fuseable_op_list and input_node_map[
input_node_map[node_name].input[1]].op == "Const":
fuse_op_name[input_node_map[node_name].input[0]] = node_name
return fuse_op_name
def parse_input_graph(self, input_graph_def):
node_type_list = []
node_name_list = []
input_node_map = {}
for node in input_graph_def.node:
node_name_list.append(node.name)
node_type_list.append(node.op)
each_node_input = []
if node.input:
for _, sub_input in enumerate(node.input):
each_node_input.append(sub_input)
if node.name not in input_node_map:
input_node_map[node.name] = node
else:
print('Duplicate node name {}'.format(node.name))
return input_node_map, node_type_list, node_name_list
def generate_output_graph(self, input_graph_def, input_node_map,
fuse_op_name):
output_graph_def = graph_pb2.GraphDef()
skip_list = []
skip_node_name = []
for index, node in enumerate(input_graph_def.node):
if node.name in fuse_op_name:
skip_list.append(index + 1)
original_node = input_node_map[node.name]
mul_node = input_node_map[fuse_op_name[node.name]]
weights_node_name = original_node.input[1]
weights_node = input_node_map[weights_node_name]
mul_value_node_name = mul_node.input[1]
mul_value_node = input_node_map[mul_value_node_name]
new_node = node_def_pb2.NodeDef()
new_node.op = original_node.op
new_node.name = mul_node.name
for _, value in enumerate(node.input):
new_node.input.append(value)
if original_node.op == "DepthwiseConv2dNative":
weights_col = weights_node.attr[
'value'].tensor.tensor_shape.dim[
2].size * weights_node.attr[
'value'].tensor.tensor_shape.dim[3].size
elif original_node.op == "Conv2D":
weights_col = weights_node.attr[
'value'].tensor.tensor_shape.dim[3].size
else:
weights_col = weights_node.attr[
'value'].tensor.tensor_shape.dim[1].size
mul_value_node_tensor = mul_value_node.attr['value'].tensor
weights_node_tensor = weights_node.attr['value'].tensor
if len(mul_value_node_tensor.tensor_shape.dim
) != 1 or mul_value_node_tensor.tensor_shape.dim[
0].size != weights_col:
print ("Invalid Mul OP fusion.")
mul_value_node_list = [
i for i in tensor_util.MakeNdarray(
mul_value_node_tensor).flat
]
new_weights = []
for index, i in enumerate(
tensor_util.MakeNdarray(weights_node_tensor).flat):
new_weights_value = i * mul_value_node_list[
index % len(mul_value_node_list)]
new_weights.append(new_weights_value)
weights_node.attr['value'].CopyFrom(
attr_value_pb2.
AttrValue(tensor=tensor_util.make_tensor_proto(
new_weights, dtypes.float32,
tensor_util.MakeNdarray(weights_node_tensor).shape)))
skip_node_name.append(weights_node.name)
output_graph_def.node.extend([weights_node])
for key in original_node.attr:
new_node.attr[key].CopyFrom(original_node.attr[key])
output_graph_def.node.extend([new_node])
elif index in skip_list or node.name in skip_node_name:
continue
else:
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
output_graph_def.node.extend([new_node])
return output_graph_def
def do_transformation(self):
"""
Execute the Conv2D/DepthwiseConv2dNative/Matmul + Mul fusion.
:return: Transformed graph
"""
input_node_map, _, node_name_list = self.parse_input_graph(
self.input_graph)
fuse_op_name = self.get_fuse_index(input_node_map, node_name_list)
# print(fuse_op_name)
return self.generate_output_graph(self.input_graph, input_node_map,
fuse_op_name)
| StarcoderdataPython |
1654572 | """Tests for the views of the sprints app."""
from django.test import TestCase, RequestFactory # NOQA
from mock import MagicMock
from .. import views
class BacklogViewTestCase(object):
"""Tests for the ``BacklogView`` view class."""
longMessage = True
def setUp(self):
super(BacklogViewTestCase, self).setUp()
self.mock_board = MagicMock()
mock_list = MagicMock()
mock_list.name = 'Foobar'
self.mock_backlog = MagicMock()
self.mock_backlog.name = 'Backlog'
self.mock_lists = [mock_list, self.mock_backlog]
mock_card1 = MagicMock()
mock_card1.id = 1
mock_card1.name = 'Foobar'
mock_card2 = MagicMock()
mock_card2.id = 2
mock_card2.name = 'Something (5)'
mock_card3 = MagicMock()
mock_card3.id = 3
mock_card3.name = 'Something else (120)'
self.mock_backlog.list_cards.return_value = [
mock_card1, mock_card2, mock_card3]
views.TrelloClient.get_board = MagicMock(return_value=self.mock_board)
self.mock_board.get_lists.return_value = self.mock_lists
def test_view(self):
req = RequestFactory().get('/')
resp = views.BacklogView.as_view()(req)
self.assertEqual(resp.status_code, 200, msg=('Should be callable'))
req = RequestFactory().get('/?board=foo&rate=100')
resp = views.BacklogView.as_view()(req)
self.assertEqual(resp.status_code, 200, msg=('Should be callable'))
self.assertEqual(resp.context_data['total_time'], 125, msg=(
'Should iterate through all cards and add up the estimated time'))
class SprintViewTestCase(object):
"""Tests for the ``SprintView`` view class."""
longMessage = True
def setUp(self):
super(SprintViewTestCase, self).setUp()
self.mock_board = MagicMock()
mock_list = MagicMock()
mock_list.name = 'Foobar'
self.mock_sprint = MagicMock()
self.mock_sprint.name = 'Sprint-2014-10-20'
self.mock_lists = [mock_list, self.mock_sprint]
mock_card1 = MagicMock()
mock_card1.id = 1
mock_card1.short_id = 1
mock_card1.name = 'Foobar'
mock_card2 = MagicMock()
mock_card2.id = 2
mock_card2.name = 'Something (5)'
mock_card2.short_id = 2
mock_card3 = MagicMock()
mock_card3.id = 3
mock_card3.short_id = 3
mock_card3.name = 'Something else (120)'
self.mock_sprint.list_cards.return_value = [
mock_card1, mock_card2, mock_card3]
views.TrelloClient.get_board = MagicMock(return_value=self.mock_board)
self.mock_board.get_lists.return_value = self.mock_lists
mock_entry1 = MagicMock()
entry2_dict = {'description': 'blabla c2 blabla', 'minutes': 10}
def getitem(name):
return entry2_dict[name]
mock_entry2 = MagicMock(spec_set=dict)
mock_entry2.__getitem__.side_effect = getitem
mock_entry3 = MagicMock()
self.mock_entries = [mock_entry1, mock_entry2, mock_entry3]
views.Freckle.get_entries = MagicMock(return_value=self.mock_entries)
def test_view(self):
req = RequestFactory().get('/')
resp = views.SprintView.as_view()(req)
self.assertEqual(resp.status_code, 200, msg=('Should be callable'))
req = RequestFactory().get(
'/?board=foobar&sprint=Sprint-2014-10-20&project=foobar&rate=100')
resp = views.SprintView.as_view()(req)
self.assertEqual(resp.status_code, 200, msg=('Should be callable'))
self.assertEqual(resp.context_data['total_actual_time'], 10, msg=(
'Should iterate through all Freckle items and calculate actual'
' time'))
| StarcoderdataPython |
3307494 | <filename>functions.py
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from imports import (os, sns, pd, np, plt)
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
def find_yx(lat, lon, point_lat, point_lon):
abs_lat = abs(lat - point_lat)
abs_lon = abs(lon - point_lon)
c = np.maximum(abs_lat, abs_lon)
y, x = np.where(c == c.min())
y = y[0]
x = x[0]
xx = lat[y, x].x
yy = lon[y, x].y
return(xx, yy)
# plot style
def plot_style():
sns.set_context('paper', font_scale=1.6)
sns.set(font = 'Serif', font_scale = 1.6, )
sns.set_style('ticks',
{'font.family':'serif', #'font.serif':'Helvetica'
'grid.linestyle': '--',
'axes.grid': True,
},
)
# Set the palette to the "pastel" default palette:
sns.set_palette("colorblind")
plot_style()
def concat_profile_all_days(df, Date, observation, _pres, _temp, _dwpt, _xwind, _ywind):
_lev = np.arange(1000,-25, -25)
_averaged = pd.DataFrame()
for i in _lev:
filter1 = np.logical_and(df.PRES > i-25,
df.PRES <= i+25 )
_averaged = pd.concat([_averaged, df.where(filter1).mean()], axis = 1)
_averaged = _averaged.rename(columns = {0:i})
_averaged = _averaged.T
# concat the pressure, height, temperature, dewpoint, mixing ration, wind direction, wind speed,
# potential temperature of all dates
_pres = pd.concat([_pres, _averaged.PRES], axis = 1).rename(columns = {'PRES':Date})
_temp = pd.concat([_temp, _averaged.TEMP], axis = 1).rename(columns = {'TEMP':Date})
_dwpt = pd.concat([_dwpt, _averaged.DWPT], axis = 1).rename(columns = {'DWPT':Date})
_xwind = pd.concat([_xwind, _averaged.x_wind], axis = 1).rename(columns = {'x_wind':Date})
_ywind = pd.concat([_ywind, _averaged.y_wind], axis = 1).rename(columns = {'y_wind':Date})
return(_pres, _temp, _dwpt, _xwind, _ywind)
def plt_skewT(fig, skew, meps_run, p, T, Td, u, v,profile_time):
cc = [sns.color_palette("colorblind",5)[2],
sns.color_palette("colorblind",5)[1],
sns.color_palette("colorblind",5)[0]]
for meps, k, xloc in zip(meps_run,cc,[0.8, 0.9, 1.]):
skew.plot(p[meps][profile_time], T[meps][profile_time], color= k, label = meps)
skew.plot(p[meps][profile_time], Td[meps][profile_time], color = k)
skew.plot_barbs(p[meps][profile_time], u[meps][profile_time], v[meps][profile_time],color=k, xloc=xloc)
skew.ax.set_ylim(1000, 100)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Good bounds for aspect ratio
skew.ax.set_xlim(-35, 40)
skew.ax.text(0.05, 1, 'Vertical profile mean - Stavanger: {} UTC'.format(profile_time), transform=skew.ax.transAxes,
fontsize=14, verticalalignment='bottom',)# bbox='fancy')
plt.legend(loc = 'lower left', fancybox = True, facecolor = 'white', title_fontsize = 16)
| StarcoderdataPython |
1760726 | <filename>run_realtime.py
import argparse
import logging
import matplotlib.pyplot as plt
import numpy as np
import torch.utils.data
from hardware.camera import RealSenseCamera
from hardware.device import get_device
from inference.post_process import post_process_output
from utils.data.camera_data import CameraData
from utils.visualisation.plot import save_results, plot_results
import time
logging.basicConfig(level=logging.INFO)
def parse_args():
parser = argparse.ArgumentParser(description='Evaluate network')
parser.add_argument('--network', type=str, default='trained-models/cornell-randsplit-rgbd-grconvnet3-drop1-ch32/epoch_19_iou_0.98',
help='Path to saved network to evaluate')
# parser.add_argument('--network', type=str, default='logs/210222_2305_/epoch_02_iou_0.97',
# help='Path to saved network to evaluate')
parser.add_argument('--use-depth', type=int, default=1,
help='Use Depth image for evaluation (1/0)')
parser.add_argument('--use-rgb', type=int, default=1,
help='Use RGB image for evaluation (1/0)')
parser.add_argument('--n-grasps', type=int, default=1,
help='Number of grasps to consider per image')
parser.add_argument('--cpu', dest='force_cpu', action='store_true', default=False,
help='Force code to run in CPU mode')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
# Connect to Camera
logging.info('Connecting to camera...')
# cam = RealSenseCamera(device_id=826212070189)
cam = RealSenseCamera(device_id=846112071703)
cam.connect()
cam_data = CameraData(include_depth=args.use_depth, include_rgb=args.use_rgb)
# Load Network
logging.info('Loading model...')
net = torch.load(args.network)
logging.info('Done')
# Get the compute device
device = get_device(args.force_cpu)
try:
fig = plt.figure(figsize=(20, 20))
while True:
image_bundle = cam.get_image_bundle()
rgb = image_bundle['rgb']
depth = image_bundle['aligned_depth']
x, depth_img, rgb_img = cam_data.get_data(rgb=rgb, depth=depth)
with torch.no_grad():
xc = x.to(device)
pred = net.predict(xc)
q_img, ang_img, width_img = post_process_output(pred['pos'], pred['cos'], pred['sin'], pred['width'])
plot_results(fig=fig,
rgb_img=cam_data.get_rgb(rgb, False),
depth_img=np.squeeze(cam_data.get_depth(depth)),
grasp_q_img=q_img,
grasp_angle_img=ang_img,
no_grasps=args.n_grasps,
grasp_width_img=width_img)
save_results(
rgb_img=cam_data.get_rgb(rgb, False),
depth_img=np.squeeze(cam_data.get_depth(depth)),
grasp_q_img=q_img,
id = time.time(),
grasp_angle_img=ang_img,
no_grasps=args.n_grasps,
grasp_width_img=width_img
)
finally:
save_results(
rgb_img=cam_data.get_rgb(rgb, False),
depth_img=np.squeeze(cam_data.get_depth(depth)),
grasp_q_img=q_img,
grasp_angle_img=ang_img,
no_grasps=args.n_grasps,
grasp_width_img=width_img
)
| StarcoderdataPython |
42330 | <filename>Algorithms_medium/0034. Find First and Last Position of Element in Sorted Array.py
"""
0034. Find First and Last Position of Element in Sorted Array
Medium
Given an array of integers nums sorted in ascending order, find the starting and ending position of a given target value.
If target is not found in the array, return [-1, -1].
Follow up: Could you write an algorithm with O(log n) runtime complexity?
Example 1:
Input: nums = [5,7,7,8,8,10], target = 8
Output: [3,4]
Example 2:
Input: nums = [5,7,7,8,8,10], target = 6
Output: [-1,-1]
Example 3:
Input: nums = [], target = 0
Output: [-1,-1]
Constraints:
0 <= nums.length <= 105
-109 <= nums[i] <= 109
nums is a non-decreasing array.
-109 <= target <= 109
"""
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
res = [-1, -1]
cnt = 1
for idx, num in enumerate(nums):
if num == target:
if cnt == 1:
res[0] = idx
cnt = 2
if cnt == 2:
res[1] = idx
return res
# O(logn) solution
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
def helper(num):
lo, hi = 0, len(nums)
while lo < hi:
mid = (lo+hi)//2
if nums[mid] >= num:
hi = mid
else:
lo = mid + 1
return lo
idx = helper(target)
return [idx, helper(target+1)-1] if target in nums[idx: idx+1] else [-1, -1]
| StarcoderdataPython |
3249654 | <gh_stars>0
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
class NormalUserViewSet(viewsets.ModelViewSet):
"""
A simple ViewSet for objects filtered by their 'owner' attribute.
To use it, at minimum you'll need to provide the `serializer_class` attribute and
the `model` attribute shortcut.
"""
permission_classes = [IsAuthenticated]
| StarcoderdataPython |
3265629 | from jno.commands.command import Command
from jno.commands.setdefault import SetDefault
from jno.commands.init import Init
from jno.commands.jnoserial import JnoSerial
from jno.commands.build import Build
from jno.commands.upload import Upload
from jno.commands.boards import Boards
from jno.commands.ports import Ports
from jno.commands.clean import Clean
from jno.util import formatted_help_string, JnoException
import os
from colorama import Fore
command_list = [Init, Build, Upload, JnoSerial, Boards, Ports, Clean, SetDefault]
class JnoHelp(Command):
help_name = "Help"
help_usage = "jno help [command_name]"
help_description = "Without arguments, prints usage and description for all supported commands. With a command name supplied, prints usage and description for specific command."
def run(self,argv,location):
if len(argv) > 0:
query_name = argv[-1]
found_command = None
if query_name in ["setlocal", "setglobal"]:
found_command = SetDefault
else:
for command in command_list:
if command.help_name.lower() == query_name.lower():
found_command = command
break
if not found_command:
raise JnoException("help for command '{}' cannot be displayed'; command not found".format(query_name))
print(formatted_help_string(found_command,surround=True))
return
print(Fore.CYAN+"======================")
for command in command_list:
print(formatted_help_string(command))
print(Fore.CYAN+"----------------------")
print(formatted_help_string(self))
print(Fore.CYAN+"======================"+Fore.RESET)
| StarcoderdataPython |
3369398 | import os
from scrapy.utils.misc import load_object
from scrapyd.config import Config
def get_application(config=None):
"""Overide default get_application in Scrapy."""
if config is None:
config = Config()
# Override http_port by $PORT environment variable in Heroku.
# Override bind_address to 0.0.0.0 if $PORT exists
# Note that the http_port has to be a string intead of int.
config.cp['scrapyd'].update(
http_port=os.environ.get('PORT', config.get('http_port')),
bind_address='0.0.0.0' if os.environ.get('PORT') else config.get('bind_address')
)
apppath = config.get('application', 'scrapyd.app.application')
appfunc = load_object(apppath)
return appfunc(config)
# Create Twisted application.
application = get_application()
| StarcoderdataPython |
3241197 | <reponame>felixkam93/climateObservation
import requests
import json
import csv
import datetime
import logging
import sys
logging.basicConfig(filename='/home/pi/climate/climate.log',level=logging.DEBUG)
url = 'http://192.168.1.131:3000/api/climates'
ROOM_ID='581f6102c34ccfe0154577f7'
CSVFILE='/home/pi/climate/file.csv'
with open(CSVFILE, 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in csvreader:
if len(row) > 0:
humidity = float(row[0])
temperature = float(row[1])
time = row[2].strip()
time = datetime.datetime.strptime(time, "%a %b %d %H:%M:%S %Y")
data = {}
data['time'] = time.strftime("%Y-%m-%d %H:%M:%S")
data['temperature'] = temperature
data['humidity'] = humidity
data['roomId'] = ROOM_ID
json_data = json.dumps(data)
headers = {'Accept': 'application/json'}
try:
response = requests.post(url, data=data)
except requests.exceptions.RequestException as e: # This is the correct syntax
#print "I/O error({0}): {1}".format(e.error.no, e.error.strerror)
logging.error(e)
sys.exit(1)
# For successful API call, response code will be 200 (OK)
if(response.ok):
# Loading the response data into a dict variable
# json.loads takes in only binary or string variables so using content to fetch binary content
# Loads (Load String) takes a Json file and converts into python data structure (dict or list, depending on JSON)
jData = json.loads(response.content)
# print response.text
print("The response contains {0} properties".format(len(jData)))
print("\n")
print(response.content)
for key in jData:
print key + " : " + str(jData[key])
| StarcoderdataPython |
3368459 | <gh_stars>1-10
""" Aula - 022 - Módulos e Pacotes
"""
'''
Modularização:
-> Surgiu no início da década de 60.
-> Sistemas ficando cada vez maiores.
-> Foco: dividir um programa grande.
-> Foco: aumentar a legibilidade.
-> Foco: facilitar a manutenção.
'''
# Teoria:
'''
def fatorial(n):
f = 1
for c in range(1, n+1):
f*=c -------------------uteis.py-----------------
return f |def fatorial(n): |
| f = 1 |
| for c in range(1, n+1): |
def dobro(n): | f*=c |
return n * 2 | return f |
| def dobro(n): |
| return n * 2 |
def triplo(n): | def triplo(n): |
return n * 3 | return n * 3 |
--------------------------------------------
^ Criação de um módulo!
num = int(input("Digite um valor"))
fat = fatorial(num)
print(f"O fatorial de {num} é {fat}")
Com o módulo uteis.py
import uteis
num = int(input("Digite um valor"))
fat = uteis.fatorial(num)
print(f"O fatorial de {num} é {fat}")
print(f'O dobro de {num} é {uteis.dobro(num)}')
print(f'O triplo de {num} é {uteis.triplo(num)}')
Vantagens de se usar modularização:
-> Organização do código
-> Facilidade na manutenção
-> Ocutação de código detalhado
-> Reutilização em outros projetos
>>> PACÓTES:
Junção de varios módulos, separados por assunto!
'''
| StarcoderdataPython |
82634 | <reponame>skandupmanyu/facet<gh_stars>0
import numpy as np
import pandas as pd
import pytest
from joblib import Parallel, delayed
from pandas.testing import assert_frame_equal
from facet.data import Sample
def test_sample_init(boston_df: pd.DataFrame, boston_target: str) -> None:
# check handling of various invalid inputs
# 1. sample parameter
# 1.1 None
with pytest.raises(ValueError):
# noinspection PyTypeChecker
Sample(observations=None, target_name=boston_target)
# 1.2 not a DF
with pytest.raises(ValueError):
# noinspection PyTypeChecker
Sample(observations=[], target_name=boston_target)
# 2. no features and no target specified
with pytest.raises(TypeError):
# noinspection PyTypeChecker
Sample(observations=boston_df, target_name=None)
# store list of feature columns:
f_columns = list(boston_df.columns)
f_columns.remove(boston_target)
# 2.1 invalid feature column specified
with pytest.raises(KeyError):
f_columns_invalid = f_columns.copy()
f_columns_invalid.append("doesnt_exist")
Sample(
observations=boston_df,
feature_names=f_columns_invalid,
target_name=boston_target,
)
# 2.2 invalid target column specified
with pytest.raises(KeyError):
Sample(
observations=boston_df,
feature_names=f_columns,
target_name="doesnt_exist",
)
# 3. column is target and also feature
with pytest.raises(KeyError):
f_columns_invalid = f_columns.copy()
f_columns_invalid.append(boston_target)
Sample(
observations=boston_df,
feature_names=f_columns_invalid,
target_name=boston_target,
)
# 4. weight column is not defined
with pytest.raises(KeyError):
Sample(
observations=boston_df,
target_name=boston_target,
weight_name="doesnt_exist",
)
def test_sample(boston_df: pd.DataFrame, boston_target: str) -> None:
# define various assertions we want to test:
def run_assertions(sample: Sample):
assert sample.target.name == boston_target
assert sample.weight.name == boston_target
assert boston_target not in sample.feature_names
assert len(sample.feature_names) == len(boston_df.columns) - 1
assert type(sample.target) == pd.Series
assert type(sample.weight) == pd.Series
assert type(sample.features) == pd.DataFrame
assert len(sample.target) == len(sample.features)
# test explicit setting of all fields
feature_columns = list(boston_df.drop(columns=boston_target).columns)
s = Sample(
observations=boston_df,
target_name=boston_target,
feature_names=feature_columns,
weight_name=boston_target,
)
# _rank_learners the checks on s:
run_assertions(s)
# test implicit setting of features by only giving the target
s2 = Sample(
observations=boston_df, target_name=boston_target, weight_name=boston_target
)
# _rank_learners the checks on s2:
run_assertions(s2)
# test numerical features
features_numerical = s.features.select_dtypes(np.number).columns
assert "LSTAT" in features_numerical
# test categorical features
features_non_numerical = s.features.select_dtypes(object).columns
assert len(features_non_numerical) == 0
# assert feature completeness
assert (
len(
set(features_numerical)
.union(set(features_non_numerical))
.difference(s.feature_names)
)
== 0
)
# test length
assert len(s) == len(boston_df)
# test select_observations
sub = s2.subsample(iloc=[0, 1, 2, 3])
assert len(sub) == 4
# test subset of features
assert_frame_equal(
s2.keep(feature_names=s2.feature_names[:10]).features, s2.features.iloc[:, :10]
)
with pytest.raises(ValueError):
s2.keep(feature_names=["does not exist"])
# test that s.features is a deterministic operation that does not depend on the
# global python environment variable PYTHONHASHSEED
parallel = Parallel(n_jobs=-3)
def get_column(sample: Sample):
return list(sample.features.columns)
columns1, columns2 = parallel(delayed(get_column)(sample) for sample in [s, s])
assert columns1 == columns2
| StarcoderdataPython |
38992 | # -*- coding: utf-8 -*-
"""Module where all interfaces, events and exceptions live."""
from . import _
from plone.app.vocabularies.catalog import CatalogSource
from plone.namedfile.field import NamedBlobImage
from plone.supermodel import model
from z3c.relationfield.schema import RelationChoice
from zope import schema
from zope.publisher.interfaces.browser import IDefaultBrowserLayer
launches = CatalogSource(portal_type=("Document", "News Item"))
class IParrucFlexsliderLayer(IDefaultBrowserLayer):
"""Marker interface that defines a browser layer."""
class ISlide(model.Schema):
image = NamedBlobImage(
title=_("Immagine slide"),
description=_(u"Dimensione consigliata 1200x300"),
required=True,
)
bw = schema.Bool(
title=_(u"Convertire in bianco e nero?"),
default=True,
)
link = RelationChoice(
title=_(u"Contenuto dal linkare nella slide"),
source=launches,
required=False,
)
model.primary('image')
| StarcoderdataPython |
3367895 | from rest_framework import generics
from rest_framework import permissions
from permissions import IsOwnerOrReadOnly
from bilgecode.apps.passage_planner.models import Passage
from serializers import PassageSerializer
from django.contrib.auth.models import User
from serializers import UserSerializer
class PassageList(generics.ListCreateAPIView):
queryset = Passage.objects.all()
serializer_class = PassageSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class PassageDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Passage.objects.all()
serializer_class = PassageSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly,)
| StarcoderdataPython |
3245195 | from skeleton import SkeletonClass
def main():
skeleton_class = SkeletonClass()
skeleton_class.greet()
if __name__ == "__main__":
main()
| StarcoderdataPython |
1757883 | my_name = 'Ryu'
print(my_name)
def print_name():
my_name = "Crystal"
print(f'Name is {my_name}')
print_name()
print(my_name)
def print_name_again():
global my_name
my_name = "yoshi"
print(f'Name is {my_name}')
print_name_again()
print(my_name) | StarcoderdataPython |
1668449 | <reponame>SousaPedro11/fail2ban-telegram
from flask import Flask
from flask_restful import Api
api = Api()
def create_app(config_name='config.Config'):
app = Flask(__name__)
app.config.from_object(config_name)
# Registra a Blueprint de HTTPAuth
from app.authorization import http_auth
app.register_blueprint(http_auth)
# Registra a Blueprint da aplicacao central
from app.telegram import telegram_restful
app.register_blueprint(telegram_restful)
# Registra a Blueprint do errorhandler
from app.errors import errors_bp
app.register_blueprint(errors_bp)
api.init_app(app)
return app
| StarcoderdataPython |
1678625 | from .transforms import *
from .readers import *
from .outputs import *
from .evaluations import *
| StarcoderdataPython |
1699075 | <reponame>Fenghuapiao/PyLeetcode<gh_stars>1-10
# The read4 API is already defined for you.
# @param buf, a list of characters
# @return an integer
# def read4(buf):
class Solution(object):
def read(self, buf, n):
"""
:type buf: Destination buffer (List[str])
:type n: Maximum number of characters to read (int)
:rtype: The number of characters read (int)
"""
cnt = 0
tmp = [""] * 4
while cnt < n:
r = read4(tmp)
if r == 0:
break
for i in range(min(r, n - cnt)):
buf[cnt] = tmp[i]
cnt += 1
return cnt | StarcoderdataPython |
3235831 | # Write a function named collatz() that has one parameter named number. If number is even, then collatz() should print number // 2 and return this value. If number is odd, then collatz() should print and return 3 * number + 1.
#
# Then write a program that lets the user type in an integer and that keeps calling collatz() on that number until the function returns the value 1. (Amazingly enough, this sequence actually works for any integer—sooner or later, using this sequence, you’ll arrive at 1! Even mathematicians aren’t sure why. Your program is exploring what’s called the Collatz sequence, sometimes called “the simplest impossible math problem.”)
#
# Remember to convert the return value from input() to an integer with the int() function; otherwise, it will be a string value.
# The output of this program could look something like this:
#
#
# Enter number:
# 3
# 10
# 5
# 16
# 8
# 4
# 2
# 1
import sys
def collatz(number):
if number % 2 == 0:
return number/2
return 3 * number + 1
def run():
print('Enter a non zero positive number')
input_num = 0
try:
input_num = int(input())
if input_num <= 1:
print('Invalid Input Entered')
sys.exit()
while True:
input_num = collatz(input_num)
print(int(input_num))
if input_num == 1:
print('You have reached 1.')
break
except ValueError:
print('Invalid Input entered.')
run()
| StarcoderdataPython |
49113 | <filename>setup.py
from setuptools import setup
setup(version='1.5')
| StarcoderdataPython |
4842819 | """
@author : <NAME>
@version : 1.0
"""
from django.db import models
class Class(models.Model):
code = models.CharField(max_length=10, unique=True)
title = models.CharField(max_length=150)
description = models.TextField(blank=True)
def __iter__(self):
return [self.code, self.title]
class Student(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=70)
enrolled_to = models.ManyToManyField(Class, null=True, blank=True, related_name='enrolled_students')
def __iter__(self):
return [self.first_name, self.last_name] | StarcoderdataPython |
138645 | from PIL import Image
import pytesseract
import sys
from pdf2image import convert_from_path
import os
DATASET_DIR = "../../adil-dataset"
TXT_DIR = os.path.join(DATASET_DIR, "txt")
if os.path.exists(TXT_DIR):
print("Folder already exist")
else:
os.mkdir(TXT_DIR)
print("Txt folder created")
for filename in os.listdir(DATASET_DIR):
if filename.endswith(".pdf"):
PDF_file = filename
base_filename = os.path.splitext(os.path.basename(filename))[0]
pages = convert_from_path(PDF_file, 500)
count = 1
for page in pages:
filename = "page_"+str(count)+".jpg"
page.save(filename, 'JPEG')
count = count + 1
filelimit = count-1
outfile = base_filename + ".txt"
print(outfile)
for i in range(1, filelimit + 1):
img_name = "page_"+str(i)+".jpg"
text = str(((pytesseract.image_to_string(Image.open(img_name)))))
text = text.replace('-\n', '')
with open(os.path.join(TXT_DIR, outfile), "a") as f:
f.write(text)
os.remove("page_"+str(i)+".jpg")
f.close()
| StarcoderdataPython |
3372186 | <reponame>JasXSL/ExiWoW-VH
# Used for MS & Config reading/writing
# Reads from screen and harddrive
from ctypes import windll, Structure, c_long, byref
import sys, os, json, subprocess, psutil, pyperclip
class vhWindows:
cursor = {"x":0,"y":0}
server = "vibhub.io"
deviceID = "TestDevice"
appName = "VH-WoW-Python"
dc = windll.user32.GetDC(0)
r = 0
g = 0
b = 0
wowPid = 0
# Max intensity of output
maxIntensity = 255
minIntensity = 30
# Percent of max intensity to add from taking damage
hpRatio = 5
# Event raised when WoW is started or stopped
# Takes 1 argument which is true/false
onWowStatus = None
def init(self):
self.getConfig()
# Screen reader
def updatePixelColor(self):
parse = windll.gdi32.GetPixel(self.dc,self.cursor["x"],self.cursor["y"])
self.r = parse & 0xFF
self.g = (parse >> 8) & 0xFF
self.b = (parse >> 16) & 0xFF
#print("Parse at ", self.cursor["x"], self.cursor["y"], "=", self.r, self.g, self.b)
# Checks if WoW is running or not
def processScan(self):
#Scan for WoW
if not self.wowPid:
cmd = 'WMIC PROCESS where "name=\'Wow-64.exe\' or name=\'Wow.exe\'" get Caption,Processid'
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
for line in proc.stdout:
spl = line.split()
if len(spl) > 1 and (spl[0] == b'Wow-64.exe' or spl[0] == b'Wow.exe'):
self.wowPid = int(spl[1])
if self.onWowStatus:
self.onWowStatus(True)
# Make sure WoW process still exists
elif not psutil.pid_exists(self.wowPid):
self.wowPid = 0
if self.onWowStatus:
self.onWowStatus(False)
def saveConfig(self):
confFile = open("conf.json", "w")
confFile.write(json.dumps({
"cursor" : [self.cursor["x"],self.cursor["y"]],
"server" : self.server,
"deviceID" : self.deviceID,
"maxIntensity" : self.maxIntensity,
"hpRatio" : self.hpRatio,
"minIntensity" : self.minIntensity
}))
confFile.close()
def getConfig(self):
try:
confFile = open("conf.json", "r")
js = json.loads(confFile.read())
confFile.close()
if "cursor" in js and isinstance(js["cursor"], list) and len(js["cursor"]):
self.cursor["x"] = js["cursor"][0]
if "cursor" in js and isinstance(js["cursor"], list) and len(js["cursor"]) > 1:
self.cursor["y"] = js["cursor"][1]
if "server" in js:
self.server = js["server"]
if "deviceID" in js:
self.deviceID = js["deviceID"]
if "maxIntensity" in js:
self.maxIntensity = js["maxIntensity"]
if "hpRatio" in js:
self.hpRatio = js["hpRatio"]
if "minIntensity" in js:
self.minIntensity = min(js["minIntensity"], self.maxIntensity)
print("Loaded settings:")
print(" DeviceID: ", self.deviceID)
print(" Server: ", self.server)
print(" Max Intens: ", self.maxIntensity)
print(" Min Intens: ", self.minIntensity)
print(" Cursor: ", self.cursor["x"], self.cursor["y"])
print("Start the program with reset as an argument to reconfigure")
except FileNotFoundError:
pass
def copyWeakaura(self):
try:
confFile = open("weakaura.txt", "r")
data = confFile.read()
confFile.close()
pyperclip.copy(data)
except FileNotFoundError:
pass
| StarcoderdataPython |
1633264 | <filename>server/analysis/tests/test_preprocessing.py
#
# OtterTune - test_preprocessing.py
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
import unittest
import numpy as np
from analysis.preprocessing import DummyEncoder, consolidate_columnlabels
class TestDummyEncoder(unittest.TestCase):
def test_no_categoricals(self):
X = [[1, 2, 3], [4, 5, 6]]
n_values = []
categorical_features = []
cat_columnlabels = []
noncat_columnlabels = ['a', 'b', 'c']
enc = DummyEncoder(n_values, categorical_features,
cat_columnlabels, noncat_columnlabels)
X_encoded = enc.fit_transform(X)
new_labels = enc.new_labels
self.assertTrue(np.all(X == X_encoded))
self.assertEqual(noncat_columnlabels, new_labels)
def test_simple_categorical(self):
X = [[0, 1, 2], [1, 1, 2], [2, 1, 2]]
n_values = [3]
categorical_features = [0]
cat_columnlabels = ['label']
noncat_columnlabels = ['a', 'b']
X_expected = [[1, 0, 0, 1, 2], [0, 1, 0, 1, 2], [0, 0, 1, 1, 2]]
new_labels_expected = ['label____0', 'label____1', 'label____2', 'a', 'b']
enc = DummyEncoder(n_values, categorical_features,
cat_columnlabels, noncat_columnlabels)
X_encoded = enc.fit_transform(X)
new_labels = enc.new_labels
self.assertTrue(np.all(X_expected == X_encoded))
self.assertEqual(new_labels_expected, new_labels)
def test_mixed_categorical(self):
X = [[1, 0, 2], [1, 1, 2], [1, 2, 2]]
n_values = [3]
categorical_features = [1]
cat_columnlabels = ['label']
noncat_columnlabels = ['a', 'b']
X_expected = [[1, 0, 0, 1, 2], [0, 1, 0, 1, 2], [0, 0, 1, 1, 2]]
new_labels_expected = ['label____0', 'label____1', 'label____2', 'a', 'b']
enc = DummyEncoder(n_values, categorical_features,
cat_columnlabels, noncat_columnlabels)
X_encoded = enc.fit_transform(X)
new_labels = enc.new_labels
self.assertTrue(np.all(X_expected == X_encoded))
self.assertEqual(new_labels_expected, new_labels)
def test_consolidate(self):
labels = ['label1____0', 'label1____1', 'label2____0', 'label2____1', 'noncat']
consolidated = consolidate_columnlabels(labels)
expected = ['label1', 'label2', 'noncat']
self.assertEqual(expected, consolidated)
def test_inverse_transform(self):
X = [[1, 0, 2], [1, 1, 2], [1, 2, 2]]
n_values = [3]
categorical_features = [1]
cat_columnlabels = ['label']
noncat_columnlabels = ['a', 'b']
X_expected = [[1, 0, 0, 1, 2], [0, 1, 0, 1, 2], [0, 0, 1, 1, 2]]
enc = DummyEncoder(n_values, categorical_features,
cat_columnlabels, noncat_columnlabels)
X_encoded = enc.fit_transform(X)
self.assertTrue(np.all(X_encoded == X_expected))
X_decoded = enc.inverse_transform(X_encoded)
self.assertTrue(np.all(X == X_decoded))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
50309 | <reponame>mfomicheva/OpenNMT-tf
# -*- coding: utf-8 -*-
import os
import tensorflow as tf
from opennmt.utils import Vocab
from opennmt.tests import test_util
class VocabTest(tf.test.TestCase):
def testSimpleVocab(self):
vocab = Vocab()
self.assertEqual(0, vocab.size)
vocab.add("toto")
vocab.add("toto")
vocab.add("toto")
vocab.add("titi")
vocab.add("titi")
vocab.add("tata")
self.assertEqual(3, vocab.size)
self.assertEqual(1, vocab.lookup("titi"))
self.assertEqual("titi", vocab.lookup(1))
pruned_size = vocab.prune(max_size=2)
self.assertEqual(2, pruned_size.size)
self.assertEqual(None, pruned_size.lookup("tata"))
pruned_frequency = vocab.prune(min_frequency=3)
self.assertEqual(1, pruned_frequency.size)
self.assertEqual(0, pruned_frequency.lookup("toto"))
def testVocabWithSpecialTokens(self):
vocab = Vocab(special_tokens=["foo", "bar"])
self.assertEqual(2, vocab.size)
vocab.add("toto")
vocab.add("toto")
vocab.add("toto")
vocab.add("titi")
vocab.add("titi")
vocab.add("tata")
self.assertEqual(5, vocab.size)
self.assertEqual(3, vocab.lookup("titi"))
pruned_size = vocab.prune(max_size=3)
self.assertEqual(3, pruned_size.size)
self.assertEqual(0, pruned_size.lookup("foo"))
self.assertEqual(1, pruned_size.lookup("bar"))
def testVocabSaveAndLoad(self):
vocab1 = Vocab(special_tokens=["foo", "bar"])
vocab1.add("toto")
vocab1.add("toto")
vocab1.add("toto")
vocab1.add("titi")
vocab1.add("titi")
vocab1.add("tata")
vocab_file = os.path.join(self.get_temp_dir(), "vocab.txt")
vocab1.serialize(vocab_file)
vocab2 = Vocab(from_file=vocab_file)
self.assertEqual(vocab1.size, vocab2.size)
self.assertEqual(vocab1.lookup("titi"), vocab2.lookup("titi"))
def testLoadSentencePieceVocab(self):
vocab_path = test_util.make_data_file(
os.path.join(self.get_temp_dir(), "vocab_sp"),
[
"<unk> 0",
"<s> 0",
"</s> 0",
", -3.0326",
". -3.41093",
"▁the -3.85169",
"s -4.05468",
"▁die -4.15914",
"▁in -4.2419",
"▁der -4.36135"
])
vocab = Vocab(from_file=vocab_path, from_format="sentencepiece")
self.assertEqual(len(vocab), 7)
self.assertNotIn("<unk>", vocab)
self.assertNotIn("<s>", vocab)
self.assertNotIn("</s>", vocab)
self.assertIn("▁the", vocab)
def testVocabPadding(self):
vocab = Vocab()
vocab.add("toto")
vocab.add("titi")
vocab.add("tata")
self.assertEqual(vocab.size, 3)
vocab.pad_to_multiple(6, num_oov_buckets=1)
self.assertEqual(vocab.size, 6 - 1)
def testVocabNoPadding(self):
vocab = Vocab()
vocab.add("toto")
vocab.add("titi")
vocab.add("tata")
self.assertEqual(vocab.size, 3)
vocab.pad_to_multiple(4, num_oov_buckets=1)
self.assertEqual(vocab.size, 3)
if __name__ == "__main__":
tf.test.main()
| StarcoderdataPython |
1635346 | <gh_stars>10-100
from requests import get
from sys import argv as args
from threading import Thread
from time import sleep, time
def sendMessage(headers):
while True:
startTime = time()
try:
res = get('https://web.ewt360.com/customerApi/api/studyprod/lessonCenter/getUserTimeRanking', headers=headers, timeout=5)
print(res.text)
except Exception as e:
print('Error! Message: %s' % e)
endTime = time()
sleep(5.5 - endTime + startTime)
def keepLogin(cookies):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36 Edg/88.0.705.56",
"Cookie": cookies
}
t = Thread(target=sendMessage, args=(headers,))
t.setDaemon(True)
t.start()
while True:
cmd = input()
if cmd == "quit":
print("Exit!")
break
elif cmd == "clear":
print('\033c', end='')
else:
print("Enter \"quit\" to exit.\r\nEnter \"clear\" to clear console.")
if __name__ == '__main__':
if len(args) >= 1:
if args[0].split('.')[-1] in ['py', 'pyw']:
args.pop(0)
if len(args) > 1:
print("Args > 1!")
elif len(args) == 0:
keepLogin(input("Please enter your cookies: "))
elif len(args) == 1:
if args[0] == "--help" or args[0] == "-h" or args[0] == "-?":
print("EwtKeepLogin.exe \"you cookies...\"")
else:
keepLogin(args[0])
| StarcoderdataPython |
3256545 | <filename>speechrecog/dashmain.py<gh_stars>0
from jesica4 import create_dashboard
from jesica4 import command_light
from jesica4 import command_SoundSystem
from jesica4 import command_Door
from jesica4 import command_detectsound
app = create_dashboard()
app.run_server(debug=False) | StarcoderdataPython |
1630691 | import io
import os
import re
import shutil
import unittest
from orderedattrdict import AttrDict
from nose.tools import ok_
from . import folder
from gramex import variables
from gramex.install import init, _ensure_remove
from shutilwhich import which
class TestInit(unittest.TestCase):
@classmethod
def setUp(cls):
cls.appname = 'test-gramex-init'
cls.app_dir = os.path.join(folder, cls.appname)
cls.cwd = os.getcwd()
def test_init(self):
if os.path.exists(self.app_dir):
shutil.rmtree(self.app_dir, onerror=_ensure_remove)
os.makedirs(self.app_dir)
os.chdir(self.app_dir)
init([], AttrDict())
# Ensure files are present
source = os.path.join(variables['GRAMEXPATH'], 'apps', 'init')
for path in os.listdir(source):
path = path.replace('appname', self.appname.replace('-', '_'))
ok_(os.path.exists(os.path.join(self.app_dir, path)), path + ' in init')
# Ensure templates work
with io.open(os.path.join(self.app_dir, 'gramex.yaml'), encoding='utf-8') as handle:
line = handle.readline().strip()
ok_('don\'t delete this line' in line)
ok_(re.match(r'# Generated by gramex init 1\.\d+', line))
# If Git LFS is present, ensure that it's set up to track assets/**
if which('git-lfs'):
path = os.path.join(self.app_dir, '.gitattributes')
ok_(os.path.exists(path), 'Git LFS worked')
with open(path, encoding='utf-8') as handle:
ok_('assets/**' in handle.read(), 'Git LFS tracks assets/**')
path = os.path.join(self.app_dir, '.gitignore')
with open(path, encoding='utf-8') as handle:
ok_('assets/**' not in handle.read(), '.gitignore allows assets/**')
# Else, check that .gitignore does not commit assets/**
else:
path = os.path.join(self.app_dir, '.gitignore')
with open(path, encoding='utf-8') as handle:
ok_('assets/**' in handle.read(), '.gitignore allows assets/**')
@classmethod
def tearDown(cls):
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.app_dir, onerror=_ensure_remove)
except OSError:
# Ideally, we should clean up the app_dir
# But on Windows, npm / git may prevent this for some time. Ignore this
pass
| StarcoderdataPython |
155188 | import cv2
import numpy as np
import pyzbar.pyzbar as pyzbar
from pyzbar.pyzbar import ZBarSymbol
import sqlite3
from sqlite3 import Error
import time
class TimeStamp:
def __init__(self, db = 'payroll.db'):
self.db = db
def scan_qr(self):
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
font = cv2.FONT_HERSHEY_PLAIN
while True:
_, frame = cap.read()
cv2.normalize(frame, frame, 0, 255, cv2.NORM_MINMAX)
decodedObjects = pyzbar.decode(frame, symbols=[ZBarSymbol.QRCODE])
for obj in decodedObjects:
return obj.data
cv2.imshow("Frame", frame)
key = cv2.waitKey(1)
if key == 27:
break
def get_timeStamp(self):
try:
emp_id = int(self.scan_qr())
time.sleep(2)
except:
return
try:
conn = sqlite3.connect(self.db)
c = conn.cursor()
c.execute('select isWorking, entry_time from employee where emp_id = ?',(emp_id,))
record = c.fetchone()
if record[0]==0:
c.execute("select datetime('now');")
time_stamp = c.fetchone()
entry_time = time_stamp[0]
c.execute("update employee \
set entry_time = ?, isWorking = ? \
where emp_id = ?",(entry_time, 1, emp_id))
conn.commit()
else:
entry_time = record[1]
c.execute("Select Cast (( JulianDay('now') - JulianDay(?) ) * 24 As Integer);",(entry_time,))
hours_worked = c.fetchone()
hours_worked = hours_worked[0]
c.execute("update employee \
set hours_worked = ?, isWorking = 0, entry_time = NULL \
where emp_id = ?",(int(hours_worked) if hours_worked else 0, emp_id))
self.compute_salary(emp_id, c)
conn.commit()
except Error as e:
print(e)
if conn is not None:
conn.close()
def compute_salary(self, id, c):
try:
c.execute("select employee.hours_worked, roles.salary, roles.ot_salary, roles.expected_hours \
from employee inner join roles \
on employee.role = roles.role \
where employee.emp_id = ?",(id,))
record = c.fetchone()
hours_worked = record[0]
salary = record[1]
ot_salary = record[2]
expected_hours = record[3]
ot = 0 if hours_worked < expected_hours else (hours_worked-expected_hours)
hours_worked -= ot
computed_salary = salary * hours_worked + ot_salary * ot
c.execute("update employee \
set computed_salary = ? \
where emp_id = ?", (computed_salary,id))
except Error as e:
print(e)
| StarcoderdataPython |
83343 | import jiwer
import jiwer.transforms as tr
from jiwer import compute_measures
from typing import List
def compute_wer(predictions=None, references=None, concatenate_texts=False):
if concatenate_texts:
return compute_measures(references, predictions)
else:
incorrect = 0
total = 0
for prediction, reference in zip(predictions, references):
measures = compute_measures(reference, prediction)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return measures
class SentencesToListOfCharacters(tr.AbstractTransform):
def process_string(self,s):
return list(s)
def process_list(self, inp: List[str]):
chars = []
for sentence in inp:
chars.extend(self.process_string(sentence))
return chars
cer_transform = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
SentencesToListOfCharacters(),
]
)
def compute_cer(predictions, references, concatenate_texts=False):
if concatenate_texts:
return jiwer.wer(
references,
predictions,
truth_transform=cer_transform,
hypothesis_transform=cer_transform,
)
incorrect = 0
total = 0
for prediction, reference in zip(predictions, references):
measures = jiwer.compute_measures(
reference,
prediction,
truth_transform=cer_transform,
hypothesis_transform=cer_transform,
)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
if __name__ == "__main__":
print(compute_wer(['my name is'],['my name']))
| StarcoderdataPython |
3296920 | import json
import os
import sys
import argparse
import numpy as np
import tensorflow as tf
import model, encoder
def score_tokens(*, hparams, tokens):
# tokens is 1d, but model expects a batch of token-lists, so make a batch of 1
x = tf.stack([tokens])
lm_output = model.model(hparams=hparams, X=x, past=None, reuse=tf.AUTO_REUSE)
# lm_output['logits'] should have shape [batch_size, tokens_length, vocab_size],
# but from the slice in sample.py, it seemed like this might not always be the case?
assert lm_output['logits'].shape[2] == hparams.n_vocab
# take the first tensor, since batch size is fixed at 1
logits = lm_output['logits'][0]
# logits has shape [tokens_length, vocab_size]
# get actual probabilities, in same shape as logits
probs = model.softmax(logits)
# The probabilities are for its guesses about the next token after each position.
# We want to look up the probability that it gave for what actually turned out to be the "true"
# next token.
next_tokens = tokens[1:]
tokens_range = tf.range(tf.shape(next_tokens)[0])
indices = tf.stack([tokens_range, next_tokens], axis=-1)
# indices has shape [next_tokens_length, 2]. it is a list of [pos, token] that we want to lookup in probs
probs_next = tf.gather_nd(probs, indices)
# probs_next has shape [tokens_length-1], and has the predicted probability of each input token (after the first one)
# Get log probabilities
ln_probs_next = tf.log(probs_next)
return ln_probs_next
def score_texts(*, model_name, texts, exclude_end, models_dir='models'):
models_dir = os.path.expanduser(os.path.expandvars(models_dir))
enc = encoder.get_encoder(model_name, models_dir)
hparams = model.default_hparams()
with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
end_token = enc.encoder['<|endoftext|>']
start_token = end_token # it does double duty
with tf.Session(graph=tf.Graph()) as sess:
tokens_tensor = tf.placeholder(tf.int32, [None])
output = score_tokens(hparams=hparams, tokens=tokens_tensor)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name))
saver.restore(sess, ckpt)
for text in texts:
# prepend the start token so that we get a probability for the first "real" token
tokens = enc.encode(text)
if not exclude_end:
tokens += [end_token]
tokens_with_start = [start_token] + tokens
logprobs = sess.run(output, feed_dict={
tokens_tensor: tokens_with_start,
})
logprobs_list = logprobs.tolist()
assert len(logprobs_list) == len(tokens) # sanity check
print('%s\t%.5g' % (text, sum(logprobs_list)))
for t, lp in zip(tokens, logprobs_list):
print('%s\t%.5g' % (enc.decoder[t], lp))
print()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_file')
parser.add_argument('--model', default='124M')
parser.add_argument('--exclude-end', action='store_true')
args = parser.parse_args()
if args.input_file == '-':
input_f = sys.stdin
else:
input_f = open(args.input_file, 'r')
texts = []
for line in input_f:
sline = line.strip()
if not sline:
continue
texts.append(sline)
score_texts(model_name=args.model, texts=texts, exclude_end=args.exclude_end)
| StarcoderdataPython |
3235566 | <filename>machine-learning/ml-algos/linear_regression.py
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
def abline(slope, intercept):
"""Plot a line from slope and intercept"""
axes = plt.gca()
x_vals = np.array(axes.get_xlim())
y_vals = intercept + slope * x_vals
plt.plot(x_vals, y_vals, '--')
def main():
n = int(input("Enter number of data points"))
X = []
Y = []
for i in range(n):
txt = "Enter the {value}th example in the training set."
xi, yi = input(txt.format(value=i)).split()
X.append(int(xi))
Y.append(int(yi))
print("The dataset contains the values")
print(X)
print(Y)
sumX, sumX2, sumY, sumXY = 0, 0, 0, 0
for i in range(0, n):
sumX = sumX + X[i]
sumX2 = sumX2 + (X[i] * X[i])
sumY = sumY + Y[i]
sumXY = sumXY + (X[i] * Y[i])
m = ((n * sumXY) - (sumX * sumY))/((n*sumX2) - (sumX*sumX))
c = (sumY - m * sumX)/n
print("y = mx + c with c and m values respectively are:")
print(c, m)
plt.scatter(X, Y)
abline(m, c)
plt.show()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1756048 | # coding=utf-8
# Copyright 2021 Pandora Media, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from scooch import Configurable
from scooch import Param
class Augmenter(Configurable):
"""
An abstract augmenter base class for all feature augmentations to derive from.
"""
_augmentations_per_sample = Param(int, default=3, doc="The number of augmentations returned for each input sample")
def augment(self, sample):
return [self._get_augmentation(sample) for _ in range(self._augmentations_per_sample)]
def _get_augmentation(self, sample):
raise NotImplementedError(f"The augmenter class {self.__class__.__name__} has no defined method to augment a feature.")
class DCOffsetAugmenter(Augmenter):
"""
Adds random DC offsets to training samples.
"""
_offset_variance = Param(float, default=1.0, doc="The variance of random offset values applied as data augmentations")
def _get_augmentation(self, sample):
return sample + np.random.normal(scale=np.sqrt(self._offset_variance))
class NoiseAugmenter(Augmenter):
"""
Takes in audio samples and augments them by adding noise, distributed uniformly on
a logarithmic scale between the minimum and maximum provided noise values.
"""
_noise_min = Param(float, default=-10.0, doc="Minimum RMS power of noise to be added to an audio sample (in dB)")
_noise_max = Param(int, default=10.0, doc="Maximum RMS power of noise to be added to an audio sample (in dB)")
def _get_augmentation(self, sample):
# Produce a random dB value for the noise
power_db = np.random.rand()*(self._noise_max - self._noise_min) + self._noise_min
# Convert to linear
power_linear = 10.0**(power_db/10.0)
# Synthesize and add the noise to the signal
noise_data = np.random.normal(scale=power_linear, size=sample.shape)
return sample + noise_data
| StarcoderdataPython |
3213076 | <filename>01-Featurization/featurizeRMSD.py
### Featurization based on RMSD from the native state for the protein folding trajectories
### Required packages: mdtraj, numpy
### @<NAME>, <EMAIL>
import mdtraj as md
import numpy as np
# Read the list of MD trajectories to featurize
trajnames = [ line.rstrip() for line in open("List","r") ]
# Read the native crystal structure
reference = md.load("native.pdb")
top = reference.topology
# Calculate RMSDs of all frames in MD trajectories.
for trajname in trajnames:
traj = md.load( trajname + ".xtc", top=top, stride=8)
rmsd = md.rmsd(traj, reference, atom_indices=top.select("name 'CA' or name 'N' or name 'O' or name 'CB' or name 'C'"))
np.save(trajname + ".npy", rmsd)
| StarcoderdataPython |
1733537 | import pandas as pd
from sklearn.linear_model import LogisticRegression
import pickle
if __name__ == '__main__':
# create df
train = pd.read_csv('https://raw.githubusercontent.com/mwaskom/seaborn-data/master/titanic.csv') # change file path
# drop null values
train.dropna(inplace=True)
# features and target
target = 'survived'
features = ['pclass', 'age', 'sibsp', 'fare']
# X matrix, y vector
X = train[features]
y = train[target]
# model
model = LogisticRegression()
model.fit(X, y)
model.score(X, y)
pickle.dump(model, open('model.pkl', 'wb'))
| StarcoderdataPython |
52438 | <gh_stars>0
"""
Given two binary strings a and b, return their sum as a binary string.
Example 1:
Input: a = "11", b = "1"
Output: "100"
Example 2:
Input: a = "1010", b = "1011"
Output: "10101"
Constraints:
1 <= a.length, b.length <= 104
a and b consist only of '0' or '1' characters.
Each string does not contain leading zeros except for the zero itself.
"""
#ALGORITHM -> TC (O(N + M)) & SpaceComplexity (O(max(N, M))) where N and M are length of input
"""
Convert a and b into integers x and y, x will be used to keep an answer, and y for the carry.
While carry is nonzero: y != 0:
Current answer without carry is XOR of x and y: answer = x^y.
Current carry is left-shifted AND of x and y: carry = (x & y) << 1.
Job is done, prepare the next loop: x = answer, y = carry.
Return x in the binary form.
"""
class Solution(object):
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
temp1, temp2 = int(a, 2), int(b, 2)
while temp2:
ans = temp1 ^ temp2
carry = (temp1 & temp2) << 1
temp1, temp2 = ans, carry
return bin(temp1)[2:]
| StarcoderdataPython |
123718 | <reponame>zoek1/Birdwatching<filename>aave-server/app/scripts/get_contracts.py
import web3
import json
import os
network = os.getenv('NETWORK_URL')
address = os.getenv('ADDRESS_NETWORK', '0x9C6C63aA0cD4557d7aE6D9306C06C093A2e35408')
contract_path = os.getenv('CONTRACT_JSON_PATH')
abi = json.load(open(contract_path))
w3 = web3.Web3(web3.HTTPProvider(network))
contract = w3.eth.contract(address=address, abi=abi["abi"])
reserves = {
'0x1BCe8A0757B7315b74bA1C7A731197295ca4747a': {
name: 'LEND',
abbrv: 'LEND'
}
}
lending_pool = contract.functions.getLendingPool().call()
| StarcoderdataPython |
24159 | import os
import shutil
import pychemia
import tempfile
import unittest
class MyTestCase(unittest.TestCase):
def test_incar(self):
"""
Test (pychemia.code.vasp) [INCAR parsing and writing] :
"""
print(os.getcwd())
iv = pychemia.code.vasp.read_incar('tests/data/vasp_01/INCAR')
self.assertEqual(len(iv), 12)
self.assertEqual(iv.EDIFF, 1E-7)
wf = tempfile.NamedTemporaryFile()
iv.write(wf.name)
wf.close()
iv4dir = pychemia.code.vasp.read_incar('tests/data/vasp_01')
self.assertEqual(iv, iv4dir)
self.assertRaises(ValueError, pychemia.code.vasp.read_incar, 'tests/data')
iv3 = pychemia.code.vasp.VaspInput(variables={'EDIFF': 1E-6})
self.assertEqual(iv3['EDIFF'], 1E-6)
iv = pychemia.code.vasp.read_incar('tests/data/vasp_02')
iv.EDIFF *= 1.3
td = tempfile.mkdtemp()
pychemia.code.vasp.write_incar(iv, td)
self.assertRaises(ValueError, iv.write_key, 'EDIF')
shutil.rmtree(td)
def test_bad_outcar(self):
"""
Test (pychemia.code.vasp) [corrupted VASP OUTCAR] :
"""
vo = pychemia.code.vasp.VaspOutput('tests/data/vasp_04/OUTCAR')
self.assertTrue(vo.is_finished)
def test_encut_setup(self):
"""
Test (pychemia.code.vasp) [ENCUT setup] :
"""
iv = pychemia.code.vasp.read_incar('tests/data/vasp_06')
iv.set_encut(ENCUT=1.2, POTCAR='tests/data/vasp_06/POTCAR')
self.assertEqual(iv.ENCUT, 307)
iv.set_rough_relaxation()
self.assertEqual(iv.EDIFFG, -1E-2)
iv.set_mit_settings()
def test_vaspjob(self):
"""
Test (pychemia.code.vasp) [VaspJob] :
"""
td = tempfile.mkdtemp()
st = pychemia.code.vasp.read_poscar('tests/data/vasp_06')
kp = pychemia.code.vasp.read_kpoints('tests/data/vasp_06')
self.assertEqual(kp.number_of_kpoints, 693)
iv = pychemia.code.vasp.read_incar('tests/data/vasp_06')
vj = pychemia.code.vasp.VaspJob(workdir=td,)
vj.initialize(st, kpoints=kp)
vj.set_input_variables(iv)
vj.write_poscar()
vj.write_kpoints()
vj.write_incar()
shutil.rmtree(td)
def test_outcar(self):
"""
Test (pychemia.code.vasp) [outcar] :
"""
vo = pychemia.code.vasp.VaspOutput('tests/data/vasp_06/OUTCAR')
self.assertEqual(vo.get_memory_used()['grid'], (1028.0, 'kBytes'))
self.assertAlmostEqual(vo.to_dict['energy'], -19.67192646)
print(vo)
self.assertTrue(vo.has_forces_stress_energy())
def test_poscar(self):
"""
Test (pychemia.code.vasp) [poscar] :
"""
# Temporal directory for outputs
tmpdir = tempfile.mkdtemp()
# Read a POSCAR by directory
st = pychemia.code.vasp.read_poscar('tests/data/vasp_06')
self.assertEqual(st.natom, 4)
# Opening old format POSCAR without POTCAR
with self.assertRaises(ValueError) as context:
st = pychemia.code.vasp.read_poscar('tests/data/vasp_07/POSCAR')
st = pychemia.code.vasp.read_poscar('tests/data/vasp_08/POSCAR_old')
self.assertEqual(st.natom, 2)
st = pychemia.code.vasp.read_poscar('tests/data/vasp_08/POSCAR_new')
self.assertEqual(st.natom, 2)
with self.assertRaises(ValueError) as context:
pychemia.code.vasp.write_potcar(st, filepath=tmpdir + os.sep + 'POTCAR', basepsp='/no/existing/path')
with self.assertRaises(ValueError) as context:
pychemia.code.vasp.write_potcar(st, filepath=tmpdir + os.sep + 'POTCAR', basepsp='tests/data')
cwd = os.getcwd()
os.chdir('tests/data/vasp_07')
st = pychemia.code.vasp.read_poscar('POSCAR_new')
os.chdir(cwd)
self.assertEqual(st.natom, 44)
st = pychemia.code.vasp.read_poscar('tests/data/vasp_07/POSCAR_alt')
pychemia.code.vasp.write_poscar(st, tmpdir + os.sep + 'POSCAR1')
pychemia.code.vasp.write_poscar(st, tmpdir + os.sep + 'POSCAR2', direct=False)
pychemia.code.vasp.write_poscar(st, tmpdir + os.sep + 'POSCAR3', newformat=False)
st = pychemia.code.vasp.read_poscar(tmpdir + os.sep + 'POSCAR1')
self.assertAlmostEqual(st.volume, 584.47161926043907)
sym = pychemia.crystal.CrystalSymmetry(st)
self.assertEqual(sym.symbol(), 'C2/c')
st = pychemia.code.vasp.read_poscar(tmpdir + os.sep + 'POSCAR2')
self.assertAlmostEqual(st.volume, 584.47161926043907)
sym = pychemia.crystal.CrystalSymmetry(st)
self.assertEqual(sym.symbol(), 'C2/c')
st = pychemia.code.vasp.read_poscar(tmpdir + os.sep + 'POSCAR3')
self.assertAlmostEqual(st.volume, 584.47161926043907)
sym = pychemia.crystal.CrystalSymmetry(st)
self.assertEqual(sym.symbol(), 'C2/c')
pychemia.code.vasp.write_potcar(st, filepath=tmpdir + os.sep + 'POTCAR', basepsp='tests/data')
pychemia.code.vasp.get_potcar_info(tmpdir + os.sep + 'POTCAR')
shutil.rmtree(tmpdir)
| StarcoderdataPython |
3242560 | #!/usr/bin/env python
import roslib
roslib.load_manifest('lg_common')
import rospy
import urllib
import json
from urlparse import urlparse
from std_msgs.msg import String
from std_msgs.msg import Bool
from lg_common.srv import USCSMessage, USCSMessageResponse, InitialUSCS, InitialUSCSResponse
from interactivespaces_msgs.msg import GenericMessage
from lg_common import USCSService
from lg_common.helpers import check_www_dependency
from lg_common.helpers import run_with_influx_exception_handler
# TODO implement this in the ros_cms side of things so
# that the initial scene can be set from the web interface
# instead of via hacky url
INITIAL_STATE = "http://lg-head:8088/director_api/initial_scene"
ON_OFFLINE_STATE = "http://lg-head:8088/director_api/on_offline_scene"
ON_ONLINE_STATE = "http://lg-head:8088/director_api/on_online_scene"
ON_INACTIVE_STATE = "http://lg-head:8088/director_api/on_inactive_scene"
ON_ACTIVE_STATE = "http://lg-head:8088/director_api/on_active_scene"
NODE_NAME = 'uscs_service'
def main():
def set_url(param):
url = rospy.get_param(param, '')
if url:
scheme = urlparse(url)
if not scheme.port:
port = 80
else:
port = scheme.port
check_www_dependency(depend_on_scene_repository,
scheme.hostname,
port, param[1:],
global_dependency_timeout)
return url
rospy.init_node(NODE_NAME, anonymous=False)
director_topic = rospy.get_param('~director_topic', '/director/scene')
message_topic = rospy.get_param('~message_topic', '/uscs/message')
offline_topic = rospy.get_param('~offline_topic', '/lg_offliner/offline')
activity_topic = rospy.get_param('~activity_topic', '/activity/active')
depend_on_scene_repository = rospy.get_param('~depend_on_scene_repository', True)
global_dependency_timeout = rospy.get_param('/global_dependency_timeout', 15)
initial_state_scene_url = set_url('~initial_state_scene_url')
on_online_state_scene_url = set_url('~on_online_state_scene_url')
on_offline_state_scene_url = set_url('~on_offline_state_scene_url')
on_active_state_scene_url = set_url('~on_active_state_scene_url')
on_inactive_state_scene_url = set_url('~on_inactive_state_scene_url')
director_scene_publisher = rospy.Publisher(
director_topic, GenericMessage, queue_size=3)
us = USCSService(
initial_state_scene_url=initial_state_scene_url,
on_online_state_scene_url=on_online_state_scene_url,
on_offline_state_scene_url=on_offline_state_scene_url,
on_active_state_scene_url=on_active_state_scene_url,
on_inactive_state_scene_url=on_inactive_state_scene_url,
director_scene_publisher=director_scene_publisher
)
rospy.Subscriber(director_topic, GenericMessage, us.update_uscs_message)
rospy.Subscriber(offline_topic, Bool, us.handle_offline_message)
rospy.Subscriber(activity_topic, Bool, us.handle_activity_message)
rospy.Service(message_topic, USCSMessage, us.current_uscs_message)
rospy.Service('/initial_state', InitialUSCS, us.initial_state)
rospy.spin()
if __name__ == "__main__":
run_with_influx_exception_handler(main, NODE_NAME)
| StarcoderdataPython |
3214001 | <filename>exec_date_aggs.py
'''
- This py script executes time aggregations for 1 year history of a data source, comprised of 12 separate tables.
- When combined with its corresponding string, it allowed for easy automation of many taks such as adding suffixes to new generated attributes.
- It also generates a log file specifying execution variables and runtime.
In practice it would be used mostly in 2 ways, setting P_FIRST and P_LAST during execution:
1. For monthly updates, simply set both values to the monthly date.
2. For reprocessing of several periods, set P_FIRST and P_LAST accordingly.
'''
# Libraries and Modeler API
import datetime
diagram = modeler.script.diagram()
stream = modeler.script.stream()
p_first = stream.getParameterValue("P_FIRST")
p_last = stream.getParameterValue("P_LAST")
# Data Directory
direc = "C:/.."
# Periods List
def periods_between(p_first,p_last):
periods_list = []
date = datetime.date(int('20' + str(p_first)[0:2]), int(str(p_first)[2:4]), 1)
iteration = 0
while True:
if iteration == 0:
dxdate = date
else:
dxdate = dxdate - datetime.timedelta(days=1)
dxdate = dxdate.replace(day=1)
if len(str(dxdate.month)) == 1:
dxperiod_month = '0' + str(dxdate.month)
else:
dxperiod_month = str(dxdate.month)
periods_list.insert(iteration, str(dxdate.year)[2:4] + dxperiod_month)
iteration = iteration + 1
if (str(dxdate.year)[2:4] + dxperiod_month) == str(p_last):
break
return periods_list
periods_list = periods_between(p_first,p_last)
# Renaming on Filter Node
ID_FIELD = "IDFIELD"
renamers = [
'_3M',
'_6M',
'_9M',
'_12M',
]
for ren in renamers:
node = diagram.findByType("filternode",ren)
for field in node.getInputDataModel().nameIterator():
if field == ID_FIELD:
pass
else:
node.setKeyedPropertyValue("new_name", field, field + ren)
# Rename Aggregations
for period in ('_3M','_6M','_9M','_12M'):
# DX
node = diagram.findByType("filternode",'_DX' + period)
for field in node.getInputDataModel().nameIterator():
if field == ID_FIELD:
pass
else:
field_cut = field.replace('_Sum','')
node.setKeyedPropertyValue("new_name", field, field_cut + '_DX' + period)
# AVGDX
node = diagram.findByType("filternode",'_AVGDX' + period)
for field in node.getInputDataModel().nameIterator():
if field == ID_FIELD:
pass
else:
field_cut = field.replace('_Mean_Sum','')
node.setKeyedPropertyValue("new_name", field, field_cut + '_AVGDX' + period)
# Historia Rename & Run
periods = 12
iteration = 0
for period in periods_list:
iteration = iteration + 1
historia_list = []
date = datetime.date(int('20'+str(period)[0:2]),int(str(period)[2:4]),1)
h_iter = 0
while True:
if h_iter == 0:
dxdate = date
else:
dxdate = dxdate - datetime.timedelta(days=1)
dxdate = dxdate.replace(day=1)
dxperiod_year = str(dxdate.year)[2:4]
if len(str(dxdate.month)) == 1:
dxperiod_month = '0' + str(dxdate.month)
else:
dxperiod_month = str(dxdate.month)
historia_list.insert(h_iter, str(dxdate.year)[2:4] + dxperiod_month)
h_iter = h_iter + 1
if h_iter == periods:
break
# Rename In/Out
diagram.findByType("statisticsexport","out").setPropertyValue("full_filename", direc + period + "_12M.sav")
for p in range(periods):
diagram.findByType("statisticsimport",str(p + 1)).setPropertyValue("full_filename", direc + historia_list[p] + ".sav")
# Run Stream
diagram.findByType("dataaudit","QUALI").run(None)
diagram.findByType("statisticsexport","out").run(None) | StarcoderdataPython |
1623861 | <gh_stars>1-10
from typing import Union
from ansitable import ANSITable
from mathpad.val import Val
from mathpad.equation import Equation
def tabulate(*entities: Union[Val, Equation]):
"Prints a list of values or relations consistent with the display environment"
# TODO: latex version in supporting IPython environments
table = ANSITable(" Entity ", " Units ", border="thick", bordercolor="blue")
for entity in entities:
if isinstance(entity, Val):
table.row(entity.val, entity.units)
else:
table.row(f"{entity.lhs} == {entity.rhs}", entity.units)
table.print() | StarcoderdataPython |
3336201 | <reponame>ctralie/IsometryBlindTimeWarping<filename>CoverSongSync.py
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
import scipy.ndimage
import sys
import time
import pyrubberband as pyrb
from SlidingWindowVideoTDA.VideoTools import *
from Alignment.AlignmentTools import *
from Alignment.DTWGPU import *
from Alignment.AllTechniques import *
from GeometricCoverSongs.CSMSSMTools import *
from GeometricCoverSongs.BlockWindowFeatures import *
from GeometricCoverSongs.pyMIRBasic.AudioIO import *
from GeometricCoverSongs.pyMIRBasic.Onsets import *
from GeometricCoverSongs.SimilarityFusion import *
def getSimilarityFusedSSMs(Features):
X = Features['MFCCs']
DMFCCs = getCSM(X, X)
X = Features['Chromas']
DChromas = getCSMCosine(X, X)
Ds = [DMFCCs, DChromas]
D = doSimilarityFusion(Ds, K = 5, NIters = 10, reg = 1, PlotNames = ['MFCCs', 'Chromas'])
return {"D":D, "DMFCCs":DMFCCs, "DChromas":DChromas}
def doSync(FeatureParams, filename1, filename2, hopSize = 512, winFac = 2):
print "Loading %s..."%filename1
(XAudio1, Fs) = getAudioLibrosa(filename1)
print("Fs = ", Fs)
print "Loading %s..."%filename2
(XAudio2, Fs) = getAudioLibrosa(filename2)
print("Fs = ", Fs)
tempo = 120
#Compute features in intervals evenly spaced by the hop size
nHops = int((XAudio1.size-hopSize*winFac*FeatureParams['MFCCBeatsPerBlock'])/hopSize)
beats1 = np.arange(0, nHops, winFac)
nHops = int((XAudio2.size-hopSize*winFac*FeatureParams['MFCCBeatsPerBlock'])/hopSize)
beats2 = np.arange(0, nHops, winFac)
print("Getting Features1...")
(Features1, O1) = getBlockWindowFeatures((XAudio1, Fs, tempo, beats1, hopSize, FeatureParams))
print("Getting Features2...")
(Features2, O2) = getBlockWindowFeatures((XAudio2, Fs, tempo, beats2, hopSize, FeatureParams))
D1 = getSimilarityFusedSSMs(Features1)
D2 = getSimilarityFusedSSMs(Features2)
sio.savemat("D1.mat", D1)
sio.savemat("D2.mat", D2)
if __name__ == '__main__':
hopSize = 512
winFac = 2
zoom = 1
filename1 = "MJ.mp3"
filename2 = "AAF.mp3"
Fs = 22050
"""
print "Loading %s..."%filename1
(XAudio1, Fs) = getAudioLibrosa(filename1)
print "Loading %s..."%filename2
(XAudio2, Fs) = getAudioLibrosa(filename2)
initParallelAlgorithms()
"""
D1 = sio.loadmat("D1.mat")["D"]
D2 = sio.loadmat("D2.mat")["D"]
#D1 = D1[153:990, 153:990]
#D2 = D2[1:786, 1:786]
#offset1 = 153
#offset2 = 1
offset1 = 0
offset11 = int(D1.shape[0]/2)#1725
offset2 = 0
offset22 = int(D2.shape[0]/2)#1427
D1 = D1[offset1:offset11, offset1:offset11]
D2 = D2[offset2:offset22, offset2:offset22]
[I, J] = np.meshgrid(np.arange(D1.shape[0]), np.arange(D1.shape[1]))
D1[np.abs(I - J) < 8] = 0
[I, J] = np.meshgrid(np.arange(D2.shape[0]), np.arange(D2.shape[1]))
D2[np.abs(I - J) < 8] = 0
D1 = scipy.ndimage.interpolation.zoom(D1, zoom)
D2 = scipy.ndimage.interpolation.zoom(D2, zoom)
(D2, D1) = matchSSMDist(D2, D1)
pD1 = scipy.ndimage.interpolation.zoom(D1, 0.2)
pD2 = scipy.ndimage.interpolation.zoom(D2, 0.2)
floor = 1e-3
pD1[pD1 < floor] = floor
pD2[pD2 < floor] = floor
pD1 = np.log(pD1/floor)
pD2 = np.log(pD2/floor)
fac = (1.0/0.2)*float(hopSize*winFac)/Fs
plt.figure(figsize=(12, 3))
plt.subplot(131)
plt.imshow(pD1, extent = (0, pD1.shape[0]*fac, pD1.shape[1]*fac, 0), cmap = 'afmhot', interpolation = 'none')
plt.xlabel("Time (Sec)")
plt.ylabel("Time (Sec)")
plt.title("<NAME>")
plt.subplot(132)
plt.imshow(pD2, extent = (0, pD2.shape[0]*fac, pD2.shape[1]*fac, 0), cmap = 'afmhot', interpolation = 'none')
plt.title("Alien Ant Farm")
plt.xlabel("Time (Sec)")
plt.ylabel("Time (Sec)")
"""
CSWM = doIBDTWGPU(D1, D2, returnCSM = True)
(DAll, CSSM, backpointers, path) = DTWCSM(CSWM)
"""
"""
print("Doing IBSMWat...")
#matchfn = lambda x: np.exp(-x/(0.3**2))-0.6
hvPenalty = -0.4
#CSWM = doIBSMWatGPU(D1, D2, hvPenalty, True)
CSWM = sio.loadmat("PCSWM_winFac2.mat")["CSWM"]
#CSWM = doIBSMWat(D1, D2, matchfn, hvPenalty, Verbose = True)
CSWM = CSWM - np.median(CSWM)
CSWM = CSWM/np.max(np.abs(CSWM))
matchfn = lambda x: x
hvPenalty = -0.4
res = SMWat(CSWM, matchfn, hvPenalty, backtrace = True)
path = res['path']
path = np.flipud(path)
sio.savemat("CSWM.mat", {"CSWM":CSWM, "path":path})
"""
Saved = sio.loadmat("CSWM.mat")
CSWM = Saved['CSWM']
path = Saved['path']
#path = makePathStrictlyIncrease(path)
fac = float(hopSize*winFac)/Fs
plt.subplot(133)
plt.imshow(CSWM.T, extent = (0, CSWM.shape[0]*fac, CSWM.shape[1]*fac, 0), cmap = 'afmhot', interpolation = 'none')
plt.scatter(path[:, 0]*fac, path[:, 1]*fac, 5, 'm', edgecolor = 'none')
plt.title("PCSWM")
plt.ylabel("AAF Time (sec)")
plt.xlabel("MJ Time (sec)")
plt.savefig("CoverSongAlignment.png", bbox_inches = 'tight', dpi = 200)
"""
XFinal = np.array([[0, 0]])
fileprefix = ""
for i in range(path.shape[0]-1):
[j, k] = [path[i, 0], path[i, 1]]
[j2, k2] = [path[i+1, 0], path[i+1, 1]]
print("i = %i, j = %i, j2 = %i, k = %i, k2 = %i"%(i, j, j2, k, k2))
t1 = int(j*winFac*hopSize/zoom) + offset1*hopSize
t2 = int(j2*winFac*hopSize/zoom) + offset1*hopSize
s1 = int(k*winFac*hopSize/zoom) + offset2*hopSize
s2 = int(k2*winFac*hopSize/zoom) + offset2*hopSize
x1 = XAudio1[t1:t2]
x2 = XAudio2[s1:s2]
#Figure out the time factor by which to stretch x2 so it aligns
#with x1
fac = float(len(x1))/len(x2)
print "fac = ", fac
x2 = pyrb.time_stretch(x2, Fs, 1.0/fac)
print "len(x1) = %i, len(x2) = %i"%(len(x1), len(x2))
N = min(len(x1), len(x2))
x1 = x1[0:N]
x2 = x2[0:N]
X = np.zeros((N, 2))
X[:, 0] = x1
X[:, 1] = x2
if len(fileprefix) > 0:
filename = "%s_%i.mp3"%(fileprefix, i)
sio.wavfile.write("temp.wav", Fs, X)
subprocess.call(["avconv", "-i", "temp.wav", filename])
XFinal = np.concatenate((XFinal, X))
sio.wavfile.write("Synced.wav", Fs, XFinal)
"""
if __name__ == '__main__2':
FeatureParams = {'MFCCBeatsPerBlock':20, 'MFCCSamplesPerBlock':200, 'ChromaBeatsPerBlock':20, 'ChromasPerBlock':40}
#filename1 = "ELPOrig.webm"
#filename2 = "ELPCover.m4a"
filename1 = "MJ.mp3"
filename2 = "AAF.mp3"
doSync(FeatureParams, filename1, filename2)
| StarcoderdataPython |
3202352 | """
Download data from APIs
"""
from mimetypes import MimeTypes
from pathlib import Path
from typing import Iterator, Optional
from flask import Response, send_from_directory, stream_with_context
from werkzeug.utils import secure_filename
from restapi.config import DATA_PATH
from restapi.exceptions import NotFound
from restapi.services.uploader import Uploader
from restapi.utilities.logs import log
DEFAULT_CHUNK_SIZE = 1048576 # 1 MB
class Downloader:
@staticmethod
def guess_mime_type(path: Path) -> Optional[str]:
# guess_type expects a str as argument because
# it is intended to be used with urls and not with paths
mime_type = MimeTypes().guess_type(str(path))
return mime_type[0]
# This is good for small files, in particular with displayable files
# like images, videos or PDF files
# It is also good for media files by sending Range header
@staticmethod
def send_file_content(
filename: str,
subfolder: Path,
mime: Optional[str] = None,
) -> Response:
Uploader.validate_upload_folder(subfolder)
filename = secure_filename(filename)
filepath = subfolder.joinpath(filename)
if not filepath.is_file():
raise NotFound("The requested file does not exist")
if mime is None:
mime = Downloader.guess_mime_type(filepath)
log.info("Sending file content from {}", filepath)
# This function is mainly used for displayable files like images and video
# so that DO NOT SET as_attachment=True that would force the download
return send_from_directory(subfolder, filename, mimetype=mime)
@staticmethod
def read_in_chunks(
path: Path, chunk_size: int = DEFAULT_CHUNK_SIZE
) -> Iterator[bytes]:
"""
Lazy function (generator) to read a file piece by piece.
"""
with open(path, "rb") as file_handle:
while data := file_handle.read(chunk_size):
yield data
# this is good for large files
@staticmethod
def send_file_streamed(
filename: str,
subfolder: Path,
mime: Optional[str] = None,
out_filename: Optional[str] = None,
) -> Response:
Uploader.validate_upload_folder(subfolder)
filename = secure_filename(filename)
filepath = subfolder.joinpath(filename)
if not filepath.is_file():
raise NotFound("The requested file does not exist")
if mime is None:
mime = Downloader.guess_mime_type(filepath)
log.info("Providing streamed content from {} (mime={})", filepath, mime)
response = Response(
stream_with_context(Downloader.read_in_chunks(filepath)),
mimetype=mime,
)
if not out_filename:
out_filename = filepath.name
response.headers["Content-Disposition"] = f"attachment; filename={out_filename}"
response.headers["Content-Length"] = filepath.stat().st_size
return response
| StarcoderdataPython |
1702125 | from a2ml.api.base_a2ml import BaseA2ML
from a2ml.api.utils.show_result import show_result
class A2MLExperiment(BaseA2ML):
"""Contains the experiment operations that interact with provider."""
def __init__(self, ctx, provider=None):
"""Initializes a new a2ml experiment.
Args:
ctx (object): An instance of the a2ml Context.
provider (str): The automl provider(s) you wish to run. For example 'auger,azure,google'. The default is None - use provider set in config.
Returns:
A2MLExperiment object
Examples:
.. code-block:: python
ctx = Context()
model = A2MLExperiment(ctx, 'auger, azure')
"""
super(A2MLExperiment, self).__init__(ctx, 'experiment')
self.runner = self.build_runner(ctx, provider)
@show_result
def list(self):
"""List all of the experiments for the Project specified in the .yaml.
Note:
You will need to user the `iter <https://www.programiz.com/python-programming/methods/built-in/iter>`_ function to access the dataset elements.
Returns:
Results for each provider. ::
{
'auger': {
'result': True,
'data': {
'experiments': <object>
}
}
}
Examples:
.. code-block:: python
ctx = Context()
experiment_list = A2MLExperiment(ctx, 'auger, azure').list()
for provider in ['auger', 'azure']
if experiment_list[provider].result is True:
for experiment in iter(experiment_list[provider].data.datasets):
ctx.log(experiment.get('name'))
else:
ctx.log('error %s' % experiment_list[provider].data)
"""
return self.runner.execute('list')
@show_result
def start(self):
"""Starts experiment/s for selected dataset. If the name of experiment is not set in context config, new experiment will be created, otherwise an existing experiment will be run.
Returns:
Results for each provider. ::
{
'auger': {
'result': True,
'data': {
'experiment_name': <experiment_name>,
'session_id': <session_id>
}
}
}
Examples:
.. code-block:: python
ctx = Context()
experiment = A2MLExperiment(ctx, providers).start()
"""
return self.runner.execute('start')
@show_result
def stop(self, run_id=None):
"""Stops runninng experiment/s.
Args:
run_id (str): The run id for a training session. A unique run id is created for every train. If set to None default is last experiment train.
Returns:
Results for each provider. ::
{
'auger': {
'result': True,
'data': {
'stopped': <experiment_name>
}
}
}
Examples:
.. code-block:: python
ctx = Context()
experiment = A2MLExperiment(ctx, providers).stop()
"""
return self.runner.execute('stop', run_id)
@show_result
def leaderboard(self, run_id):
"""The leaderboard of the currently running or previously completed experiment/s.
Args:
run_id (str): The run id for a training session. A unique run id is created for every train. If set to None default is last experiment train.
Returns:
Results for each provider. ::
{
'auger': {
'result': True,
'data': {
'run_id': '9ccfe04eca67757a',
'leaderboard': [
{'model id': 'A017AC8EAD094FD', 'rmse': '0.0000', 'algorithm': 'LGBMRegressor'},
{'model id': '4602AFCEEEAE413', 'rmse': '0.0000', 'algorithm': 'ExtraTreesRegressor'}
],
'trials_count': 10,
'status': 'started',
'provider_status': 'provider specific'
}
},
'azure': {
'result': True,
'data': {
'run_id': '9ccfe04eca67757a',
'leaderboard': [
{'model id': 'A017AC8EAD094FD', 'rmse': '0.0000', 'algorithm': 'LGBMRegressor'},
{'model id': '4602AFCEEEAE413', 'rmse': '0.0000', 'algorithm': 'ExtraTreesRegressor'}
],
'trials_count': 10,
'status': 'started',
'provider_status': 'provider specific'
}
}
}
**Status**
* **preprocess** - search is preprocessing data for traing
* **started** - search is in progress
* **completed** - search is completed
* **interrupted** - search was interrupted
* **error** - search was finished with error
Examples:
.. code-block:: python
ctx = Context()
leaderboard = A2MLExperiment(ctx, 'auger, azure').leaderboard()
for provider in ['auger', 'azure']
if leaderboard[provider].result is True:
for entry in iter(leaderboard[provider].data.leaderboard):
ctx.log(entry['model id'])
ctx.log('status %s' % leaderboard[provider].data.status)
else:
ctx.log('error %s' % leaderboard[provider].data)
"""
return self.runner.execute('leaderboard', run_id)
@show_result
def history(self):
"""The history of the currently running or previously completed experiment/s.
Note:
You will need to user the `iter <https://www.programiz.com/python-programming/methods/built-in/iter>`_ function to access the dataset elements.
Returns:
Results for each provider. ::
{
'auger': {
'result': True,
'data': {
'history': <object>
}
}
}
Examples:
.. code-block:: python
ctx = Context()
history = A2MLExperiment(ctx, 'auger, azure').history()
for provider in ['auger', 'azure']
if history[provider].result is True:
for run in iter(history[provider].data.history):
ctx.log("run id: {}, status: {}".format(
run.get('id'),
run.get('status')))
else:
ctx.log('error %s' % history[provider].data)
"""
return self.runner.execute('history')
| StarcoderdataPython |
1613945 | <filename>src/services/rp/rp.py<gh_stars>1-10
#! /usr/bin/env python3
import ssl
import jinja2
import yaml
from flask.app import Flask
from flask.globals import request, current_app
from flask.json import jsonify
from flask.templating import render_template
from jwkest.jwk import keyrep
from oic.oic.message import AuthorizationResponse
from werkzeug.utils import redirect
from oidc_fed.relying_party import RP
def init_oidc_fed_rp(cnf):
name = cnf["SERVER_NAME"]
with open(cnf["RELYING_PARTY_CONFIG"]) as f:
rp_config = yaml.safe_load(f)
root_key = keyrep(rp_config["root_key_jwk"])
federation_keys = [keyrep(jwk) for jwk in rp_config["federations_jwk"]]
return RP(name, root_key, rp_config["software_statements"], federation_keys, name + "/signed_jwks")
def init_app():
app = Flask(__name__)
app.config.from_envvar("OIDCFED_RELYING_PARTY_CONFIG")
template_loader = jinja2.FileSystemLoader(["templates", "../templates"])
app.jinja_loader = template_loader
app.rp = init_oidc_fed_rp(app.config)
return app
app = init_app()
@app.route("/")
def index():
return render_template("index.html", software_statements=[ss.jwt.headers["kid"] for ss in
current_app.rp.software_statements])
@app.route("/start", methods=["post"])
def make_authn():
issuer = request.form.get("issuer")
software_statement = request.form.get("software_statement")
response_type = request.form.get("response_type")
registration_data = {"response_types": [response_type]}
if software_statement:
registration_data["software_statements"] = current_app.rp.software_statements_jws[
int(software_statement)]
client_software_statement = current_app.rp.register_with_provider(issuer, registration_data)
args = {
"scope": ["openid profile"],
"response_type": response_type,
"redirect_uri": client_software_statement.msg["redirect_uris"][0],
"response_mode": "query",
}
auth_req = current_app.rp.client.construct_AuthorizationRequest(request_args=args)
login_url = auth_req.request(current_app.rp.client.authorization_endpoint)
return redirect(login_url)
@app.route("/signed_jwks")
def signed_jwks():
return current_app.rp.signed_jwks
@app.route("/finish")
def handle_authn_response():
# parse authn response
authn_response = current_app.rp.client.parse_response(AuthorizationResponse,
info=request.query_string.decode("utf-8"),
sformat="urlencoded")
auth_code = None
if "code" in authn_response:
auth_code = authn_response["code"]
# make token request
args = {
"code": auth_code,
"client_id": current_app.rp.client.client_id,
"client_secret": current_app.rp.client.client_secret
}
token_response = current_app.rp.client.do_access_token_request(scope="openid", request_args=args)
access_token = token_response["access_token"]
id_token = token_response["id_token"].to_dict()
# TODO do userinfo req
else:
id_token = authn_response["id_token"].to_dict()
access_token = authn_response.get("access_token")
return jsonify(dict(auth_code=auth_code, token=access_token, id_token=id_token))
if __name__ == "__main__":
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(app.config["HTTPS_CERT"], app.config["HTTPS_KEY"])
app.run(debug=True, ssl_context=context)
| StarcoderdataPython |
136648 | import os
os.system('xdg-open https://www.instagram.com/shubhamg0sai')
| StarcoderdataPython |
160662 | # [351] Android Unlock Patterns
# Description
# Given an Android 3x3 key lock screen and two integers m and n, where 1 <= m <= n <= 9,
# count the total number of unlock patterns of the Android lock screen, which consist
# of minimum of m keys and maximum n keys.
# Rules for a valid pattern:
# 1) Each pattern must connect at least m keys and at most n keys.
# 2) All the keys must be distinct.
# 3) If the line connecting two consecutive keys in the pattern passes through any other keys,
# the other keys must have previously selected in the pattern. No jumps through non selected key is allowed.
# 4) The order of keys used matters.
# Explanation:
# | 1 | 2 | 3 |
# | 4 | 5 | 6 |
# | 7 | 8 | 9 |
# Invalid move: 4 - 1 - 3 - 6
# Line 1 - 3 passes through key 2 which had not been selected in the pattern.
# Invalid move: 4 - 1 - 9 - 2
# Line 1 - 9 passes through key 5 which had not been selected in the pattern.
# Valid move: 2 - 4 - 1 - 3 - 6
# Line 1 - 3 is valid because it passes through key 2, which had been selected in the pattern
# Valid move: 6 - 5 - 4 - 1 - 9 - 2
# Line 1 - 9 is valid because it passes through key 5, which had been selected in the pattern.
# Example
# Example 1
# Input: m = 1, n = 1
# Output: 9
# Example 2
# Input: m = 1, n = 2
# Output: 65
class Solution:
"""
@param m: an integer
@param n: an integer
@return: the total number of unlock patterns of the Android lock screen
"""
def __init__(self):
self.count = 0
self.res = []
self.corners = [[0,0], [0,2], [2,0], [2, 2]]
def numberOfPatterns(self, m, n):
# Write your code here
nums = [i for i in range(9)]
self.dfs(m, n, [], 0, nums)
# for res in self.res:
# print(res)
return self.count
def dfs(self, m, n, curr, start, nums):
if len(curr) > n:
return
if m <= len(curr) <= n:
self.count += 1
self.res.append(curr[:])
for i in range(start, len(nums)):
if nums[i] not in curr:
curr.append(nums[i])
if self.valid(curr):
self.dfs(m, n, curr, 0, nums)
curr.pop()
def valid(self, points):
prev_x, prev_y = points[0] // 3, points[0] % 3
record = set([(prev_x, prev_y)])
for i in range(1, len(points)):
x, y = points[i] // 3, points[i] % 3
if not ( (abs(x - prev_x) == 1 and abs(y - prev_y) == 0) \
or (abs(x - prev_x) == 0 and abs(y - prev_y) == 1) \
or (abs(x - prev_x) == 1 and abs(y - prev_y) == 1) \
or (abs(x - prev_x) == 1 and abs(y - prev_y) == 2) \
or (abs(x - prev_x) == 2 and abs(y - prev_y) == 1) ):
if (abs(x - prev_x) == 0 and abs(y - prev_y) == 2):
if (x, max(y, prev_y) - 1) not in record:
return False
if (abs(x - prev_x) == 2 and abs(y - prev_y) == 0):
if (max(x, prev_x) - 1, y) not in record:
return False
if x + y == prev_x + prev_y or x - y == prev_x - prev_y:
if (1, 1) not in record:
return False
record.add((x, y))
prev_x, prev_y = x, y
return True
| StarcoderdataPython |
1602164 | <gh_stars>0
registro = []
pessoa = []
notas = []
while True:
nome = str(input('Nome: ')).capitalize().strip()
pessoa.append(nome)
n1 = float(input('Nota 1: '))
notas.append(n1)
n2 = float(input('Nota 2: '))
notas.append(n2)
pessoa.append(notas[:])
notas.clear()
media = (n1 + n2) / 2
pessoa.append(media)
registro.append(pessoa[:])
pessoa.clear()
continuar = str(input('Deseja continuar? [S/N] ')).strip().upper()[0]
while continuar not in 'SN':
continuar = str(input('Tente novamente! Deseja continuar? [S/N]')).strip().upper()[0]
if continuar == 'N':
break
print('-=' * 30)
print('No. NOME MÉDIA')
print('--' * 15)
for c in range(0, len(registro)):
print(f'{c} {registro[c][0] :<12} {registro[c][2] :.1f}')
print('--' * 15)
while True:
aluno = int(input('Mostrar notas de qual aluno? (999 interrompe): '))
if aluno == 999:
break
print(f'Notas de {registro[aluno][0]} são {registro[aluno][1]}')
print('--' * 15)
print('FINALIZANDO...')
print('<<< VOLTE SEMPRE >>>')
| StarcoderdataPython |
4804053 | from modpy.optimize._constraints import Constraints, Bounds, LinearConstraint, NonlinearConstraint, prepare_bounds
from modpy.optimize._root_scalar import bisection_scalar, secant_scalar, newton_scalar
from modpy.optimize._lsq import lsq_linear
from modpy.optimize._nl_lsq import least_squares
from modpy.optimize._linprog import linprog
from modpy.optimize._quadprog import quadprog
from modpy.optimize._nlprog import nlprog
from modpy.optimize._cma_es import cma_es
from modpy.optimize._mmo import mmo
from modpy.optimize._bayesian import bayesian_proposal | StarcoderdataPython |
3398875 | <filename>mobile/mobile_app/admin.py
from django.contrib import admin
from .models import TwoFactor
admin.site.register(TwoFactor) | StarcoderdataPython |
89616 | <gh_stars>0
from .dataset import LRWDataset
from .dataset_lrw1000 import LRW1000_Dataset
from .dataset import AVDataset
from .cvtransforms import * | StarcoderdataPython |
1791780 | <reponame>qqsuhao/object-detection-GUI-QT
# -*- coding:utf8 -*-
# @TIME : 2021/12/17 17:03
# @Author : <NAME>
# @File : test.py
import random
import math
import time
import threading
from PyQt5.QtChart import (QAreaSeries, QBarSet, QChart, QChartView,
QLineSeries, QPieSeries, QScatterSeries, QSplineSeries,
QStackedBarSeries, QValueAxis)
from PyQt5.QtCore import (
pyqtSlot, QPoint, QPointF, Qt
)
from PyQt5.Qt import (QApplication, QWidget, QPushButton,
QThread, QMutex, pyqtSignal)
from PyQt5.QtGui import QColor, QPainter, QPalette
from PyQt5.QtWidgets import (QCheckBox, QComboBox, QGridLayout, QHBoxLayout,
QLabel, QSizePolicy, QWidget, QPushButton)
class TestChart(QChart):
def __init__(self, parent=None):
super(TestChart, self).__init__(parent)
self.xRange = 500
self.sampleRate = 1
self.counter = 0
self.seriesList = []
self.temp_y = []
# self.legend().show()
self.legend().setVisible(False)
self.axisX = QValueAxis()
self.axisX.setRange(0, self.xRange)
self.addAxis(self.axisX, Qt.AlignBottom)
# self.setAxisX(self.axisX, series)
self.axisY = QValueAxis()
self.axisY.setRange(0,1)
self.addAxis(self.axisY, Qt.AlignLeft)
# self.setAxisY(self.axisY, series)
self.series = QLineSeries()
# self.series.setName("生成300~1000随机数")
# self.series.setUseOpenGL(True)
self.addSeries(self.series)
self.series.attachAxis(self.axisX)
self.series.attachAxis(self.axisY)
def handleUpdate(self, ydata):
if (self.counter < self.xRange):
for i in range(self.sampleRate):
self.series.append(self.counter + i, ydata)
self.temp_y.append(ydata)
self.axisY.setRange(min(self.temp_y), max(self.temp_y))
else:
points = self.series.pointsVector()
print(points[0])
y_temp = [0] * (len(points) - self.sampleRate)
for i in range(len(points) - self.sampleRate):
points[i].setY(points[i + self.sampleRate].y())
y_temp[i] = points[i + self.sampleRate].y()
for i in range(self.sampleRate):
points[len(points) - (self.sampleRate - i)].setY(ydata)
self.series.replace(points)
# self.axisY.setRange(min(y_temp), max(y_temp))
# self.series.setUseOpenGL(True)
self.counter += self.sampleRate
class WorkThread(QThread):
_signal_updateUI = pyqtSignal()
def __init__(self, parent=None):
super(WorkThread, self).__init__(parent)
self.qmut = QMutex()
self.isexit = False
def run(self):
while (True):
self.qmut.lock()
if (self.isexit):
break
self.qmut.unlock()
self._signal_updateUI.emit()
time.sleep(0.01)
self.qmut.unlock()
def stop(self):
# 改变线程状态与终止
self.qmut.lock()
self.isexit = True
self.qmut.unlock()
self.wait()
class MyWidget(QWidget):
def __init__(self, parent=None):
super(MyWidget, self).__init__(parent)
self.mythread = WorkThread()
self.label = QLabel()
# self.m_charts = []
self.pushubutton = QPushButton()
self.pushubutton.setText('start')
self.pushubutton_stop = QPushButton()
self.pushubutton_stop.setText('stop')
self.connectSignals()
# Create the layout.
baseLayout = QGridLayout()
settingsLayout = QHBoxLayout()
settingsLayout.addWidget(self.pushubutton)
settingsLayout.addWidget(self.pushubutton_stop)
settingsLayout.addStretch()
baseLayout.addLayout(settingsLayout, 0, 0, 1, 3)
self.myChart = TestChart()
chartView = QChartView(self.myChart)
chartView.setRenderHint(QPainter.Antialiasing) # 抗锯齿
baseLayout.addWidget(chartView)
# self.m_charts.append(chartView)
self.setLayout(baseLayout)
self.pushubutton_stop.setEnabled(False)
def pushubutton_clicked(self):
self.mythread.start()
self.pushubutton.setEnabled(False)
self.pushubutton_stop.setEnabled(True)
def pushubutton_stop_clicked(self):
self.mythread.stop()
self.mythread.isexit = False
self.pushubutton.setEnabled(True)
self.pushubutton_stop.setEnabled(False)
def connectSignals(self):
self.pushubutton.clicked.connect(self.pushubutton_clicked)
self.pushubutton_stop.clicked.connect(self.pushubutton_stop_clicked)
self.mythread._signal_updateUI.connect(self.threading_slot)
def threading_slot(self):
yint = 0 #random.randint(300, 1000)
self.myChart.handleUpdate(yint)
# 实时刷新界面
self.myChart.update()
# QApplication.processEvents()
if __name__ == '__main__':
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow
app = QApplication(sys.argv)
window = QMainWindow()
widget = MyWidget()
window.setCentralWidget(widget)
window.resize(900, 600)
window.show()
sys.exit(app.exec_())
| StarcoderdataPython |
1745091 | <reponame>rcmckee/BPT
from torch.utils.data import Dataset
from scipy.sparse import coo_matrix
from torchtext import datasets
from .base import GraphBatcher, Batch
import numpy as np
import torch as th
import dgl
def get_nli_dataset(name='snli'):
if name == 'snli':
return datasets.SNLI
elif name == 'mnli':
return datasets.MultiNLI
else:
raise KeyError('invalid dataset name')
class NLIBatcher(GraphBatcher):
def __init__(self, TEXT, LABEL, graph_type='bpt', **kwargs):
super(NLIBatcher, self).__init__(triu=True, graph_type=graph_type, **kwargs)
self.TEXT = TEXT
self.LABEL = LABEL
self._cache = {}
def __call__(self, batch):
data = []
labels = []
v_shift, e_shift = 0, 0
row, col = [], []
root_ids, leaf_ids = [], []
pos_arr = []
etypes = []
row_inter, col_inter = [], []
for premise, hypo, label in batch:
premise = self.TEXT.numericalize([premise]).view(-1)
hypo = self.TEXT.numericalize([hypo]).view(-1)
label = self.LABEL.numericalize([label]).view(-1)
data.append(th.cat([premise, hypo], -1))
labels.append(label)
# building premise graph
length = len(premise)
# get graph
g = self._get_graph(length)
# get pos
pos_arr.append(th.from_numpy(g.get_pos()))
# gather leaf nodes
root_ids.append(g.root_id(v_shift=v_shift))
leaf_ids.append(th.from_numpy(g.leaf_ids(v_shift=v_shift)))
# gather edges
src, dst, etype = g.get_edges(v_shift=v_shift)
row.append(src)
col.append(dst)
etypes.append(th.from_numpy(etype))
# update shift
nid_premise_leaf = np.arange(v_shift, v_shift + length)
v_shift += g.number_of_nodes
e_shift += g.number_of_edges
# building hypo graph
length = len(hypo)
# get graph
g = self._get_graph(length)
# get pos
pos_arr.append(th.from_numpy(g.get_pos()))
# gather leaf nodes
root_ids.append(g.root_id(v_shift=v_shift))
leaf_ids.append(th.from_numpy(g.leaf_ids(v_shift=v_shift)))
# gather edges
src, dst, etype = g.get_edges(v_shift=v_shift)
row.append(src)
col.append(dst)
etypes.append(th.from_numpy(etype))
# update shift
nid_hypo_leaf = np.arange(v_shift, v_shift + length)
v_shift += g.number_of_nodes
e_shift += g.number_of_edges
# building inter graph
row_inter.append(np.repeat(nid_premise_leaf, len(nid_hypo_leaf)))
col_inter.append(np.tile(nid_hypo_leaf, len(nid_premise_leaf)))
row_inter.append(np.repeat(nid_hypo_leaf, len(nid_premise_leaf)))
col_inter.append(np.tile(nid_premise_leaf, len(nid_hypo_leaf)))
n = v_shift
root_ids = th.tensor(root_ids)
leaf_ids = th.cat(leaf_ids)
pos_arr = th.cat(pos_arr)
etypes = th.cat(etypes)
row, col = map(np.concatenate, (row, col))
row_inter, col_inter = map(np.concatenate, (row_inter, col_inter))
coo = coo_matrix((np.zeros_like(row), (row, col)), shape=(n, n))
g = dgl.DGLGraph(coo, readonly=True)
coo_inter = coo_matrix((np.zeros_like(row_inter), (row_inter, col_inter)), shape=(n, n))
g_inter = dgl.DGLGraph(coo_inter, readonly=True)
g.set_n_initializer(dgl.init.zero_initializer)
g.set_e_initializer(dgl.init.zero_initializer)
g_inter.set_n_initializer(dgl.init.zero_initializer)
g_inter.set_e_initializer(dgl.init.zero_initializer)
data = th.cat(data)
labels = th.cat(labels)
g.edata['etype'] = etypes
g.ndata['pos'] = pos_arr
g.nodes[leaf_ids].data['x'] = data
return Batch(g=g, g_inter=g_inter, readout_ids=root_ids, leaf_ids=leaf_ids, y=labels)
class NLIDataset(Dataset):
def __init__(self, nli_dataset):
self.data = nli_dataset.examples
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index].premise, self.data[index].hypothesis, self.data[index].label
| StarcoderdataPython |
3377248 | """.. include:: README.md"""
from .abstract_action_space import AbstractActionSpace
from .composite import Composite
from .grid import Grid
from .vertical_grid import VerticalGrid
from .horizontal_grid import HorizontalGrid
from .joystick import Joystick
from .set_position import SetPosition
| StarcoderdataPython |
3329822 | <filename>web_logic/enums.py
import enum
class publicationStatus(enum.Enum):
Submited = "s"
Accepted = "a"
Published = "p"
class studentTypes(enum.Enum):
FirstDegree = "b"
SecondDegreeProject = "mp"
SecondDegreeThesis = "mt"
ThirdDegree = "p"
class buttonTypes(enum.Enum):
Download = "d"
View = "v"
Cite = "c"
class githubLoginType(enum.Enum):
TOKEN = "t"
CRADENTIOALS = "c"
| StarcoderdataPython |
90919 | <gh_stars>0
from django.shortcuts import render
from rest_framework import generics, authentication, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from users.serializers import UserSerializer
class CreateuserView(generics.CreateAPIView):
''' Create a new user in teh system '''
serializer_class = UserSerializer
class ManageUserView(generics.RetrieveUpdateAPIView):
''' Manage the authenticated user '''
serializer_class = UserSerializer
authentication_classes =(authentication.TokenAuthentication,)
permission_classes =(permissions.IsAuthenticated,)
def get_object(self):
''' retrive and return authentication user '''
return self.request.user | StarcoderdataPython |
1611408 | <filename>tests/i18n/urls.py
from django.conf.urls.i18n import i18n_patterns
from django.http import HttpResponse, StreamingHttpResponse
from django.urls import path
from django.utils.translation import gettext_lazy as _
urlpatterns = i18n_patterns(
path('simple/', lambda r: HttpResponse()),
path('streaming/', lambda r: StreamingHttpResponse([_('Yes'), '/', _('No')])),
)
| StarcoderdataPython |
187345 | <reponame>samuelstanton/lambo
import hydra
import wandb
import pandas as pd
import time
import numpy as np
import torch
import random
from torch.nn import functional as F
from pymoo.factory import get_performance_indicator
from botorch.utils.multi_objective import infer_reference_point
from lambo.models.mlm import sample_tokens, evaluate_windows
from lambo.optimizers.pymoo import pareto_frontier, Normalizer
from lambo.models.shared_elements import check_early_stopping
from lambo.utils import weighted_resampling, DataSplit, update_splits, str_to_tokens, tokens_to_str, safe_np_cat
from lambo.models.lanmt import corrupt_tok_idxs
class LaMBO(object):
def __init__(self, bb_task, tokenizer, encoder, surrogate, acquisition, num_rounds, num_gens,
lr, num_opt_steps, concentrate_pool, patience, mask_ratio, resampling_weight,
encoder_obj, optimize_latent, position_sampler, entropy_penalty,
window_size, latent_init, **kwargs):
self.tokenizer = tokenizer
self.num_rounds = num_rounds
self.num_gens = num_gens
self.concentrate_pool = concentrate_pool
self._hv_ref = None
self._ref_point = np.array([1] * bb_task.obj_dim)
self.max_num_edits = bb_task.max_num_edits
self.bb_task = hydra.utils.instantiate(bb_task, tokenizer=tokenizer, candidate_pool=[])
self.encoder_config = encoder
self.encoder = hydra.utils.instantiate(encoder, tokenizer=tokenizer)
self.encoder_obj = encoder_obj
self.surrogate_config = surrogate
self.surrogate_model = hydra.utils.instantiate(surrogate, tokenizer=self.encoder.tokenizer,
encoder=self.encoder)
self.acquisition = acquisition
self.lr = lr
self.num_opt_steps = num_opt_steps
self.patience = patience
self.mask_ratio = mask_ratio
self.resampling_weight = resampling_weight
self.optimize_latent = optimize_latent
self.position_sampler = position_sampler
self.entropy_penalty = entropy_penalty
self.window_size = window_size
self.latent_init = latent_init
self.active_candidates = None
self.active_targets = None
self.train_split = DataSplit()
self.val_split = DataSplit()
self.test_split = DataSplit()
def optimize(self, candidate_pool, pool_targets, all_seqs, all_targets, log_prefix=''):
batch_size = self.bb_task.batch_size
target_min = all_targets.min(axis=0).copy()
target_range = all_targets.max(axis=0).copy() - target_min
hypercube_transform = Normalizer(
loc=target_min + 0.5 * target_range,
scale=target_range / 2.,
)
new_seqs = all_seqs.copy()
new_targets = all_targets.copy()
is_feasible = self.bb_task.is_feasible(candidate_pool)
pool_candidates = candidate_pool[is_feasible]
pool_targets = pool_targets[is_feasible]
pool_seqs = np.array([p_cand.mutant_residue_seq for p_cand in pool_candidates])
self.active_candidates, self.active_targets = pool_candidates, pool_targets
self.active_seqs = pool_seqs
pareto_candidates, pareto_targets = pareto_frontier(self.active_candidates, self.active_targets)
pareto_seqs = np.array([p_cand.mutant_residue_seq for p_cand in pareto_candidates])
pareto_cand_history = pareto_candidates.copy()
pareto_seq_history = pareto_seqs.copy()
pareto_target_history = pareto_targets.copy()
norm_pareto_targets = hypercube_transform(pareto_targets)
self._ref_point = -infer_reference_point(-torch.tensor(norm_pareto_targets)).numpy()
print(self._ref_point)
rescaled_ref_point = hypercube_transform.inv_transform(self._ref_point.copy())
# logging setup
total_bb_evals = 0
start_time = time.time()
round_idx = 0
self._log_candidates(pareto_candidates, pareto_targets, round_idx, log_prefix)
metrics = self._log_optimizer_metrics(norm_pareto_targets, round_idx, total_bb_evals, start_time, log_prefix)
print('\n best candidates')
obj_vals = {f'obj_val_{i}': pareto_targets[:, i].min() for i in range(self.bb_task.obj_dim)}
print(pd.DataFrame([obj_vals]).to_markdown(floatfmt='.4f'))
for round_idx in range(1, self.num_rounds + 1):
metrics = {}
# contract active pool to current Pareto frontier
if (self.concentrate_pool > 0 and round_idx % self.concentrate_pool == 0) or self.latent_init == 'perturb_pareto':
self.active_candidates, self.active_targets = pareto_frontier(
self.active_candidates, self.active_targets
)
self.active_seqs = np.array([a_cand.mutant_residue_seq for a_cand in self.active_candidates])
print(f'\nactive set contracted to {self.active_candidates.shape[0]} pareto points')
# augment active set with old pareto points
if self.active_candidates.shape[0] < batch_size:
num_samples = min(batch_size, pareto_cand_history.shape[0])
num_backtrack = min(num_samples, batch_size - self.active_candidates.shape[0])
_, weights, _ = weighted_resampling(pareto_target_history, k=self.resampling_weight)
hist_idxs = np.random.choice(
np.arange(pareto_cand_history.shape[0]), num_samples, p=weights, replace=False
)
is_active = np.in1d(pareto_seq_history[hist_idxs], self.active_seqs)
hist_idxs = hist_idxs[~is_active]
if hist_idxs.size > 0:
hist_idxs = hist_idxs[:num_backtrack]
backtrack_candidates = pareto_cand_history[hist_idxs]
backtrack_targets = pareto_target_history[hist_idxs]
backtrack_seqs = pareto_seq_history[hist_idxs]
self.active_candidates = np.concatenate((self.active_candidates, backtrack_candidates))
self.active_targets = np.concatenate((self.active_targets, backtrack_targets))
self.active_seqs = np.concatenate((self.active_seqs, backtrack_seqs))
print(f'active set augmented with {backtrack_candidates.shape[0]} backtrack points')
# augment active set with random points
if self.active_candidates.shape[0] < batch_size:
num_samples = min(batch_size, pool_candidates.shape[0])
num_rand = min(num_samples, batch_size - self.active_candidates.shape[0])
_, weights, _ = weighted_resampling(pool_targets, k=self.resampling_weight)
rand_idxs = np.random.choice(
np.arange(pool_candidates.shape[0]), num_samples, p=weights, replace=False
)
is_active = np.in1d(pool_seqs[rand_idxs], self.active_seqs)
rand_idxs = rand_idxs[~is_active][:num_rand]
rand_candidates = pool_candidates[rand_idxs]
rand_targets = pool_targets[rand_idxs]
rand_seqs = pool_seqs[rand_idxs]
self.active_candidates = np.concatenate((self.active_candidates, rand_candidates))
self.active_targets = np.concatenate((self.active_targets, rand_targets))
self.active_seqs = np.concatenate((self.active_seqs, rand_seqs))
print(f'active set augmented with {rand_candidates.shape[0]} random points')
print(rescaled_ref_point)
print(self.active_targets)
for seq in self.active_seqs:
if hasattr(self.tokenizer, 'to_smiles'):
print(self.tokenizer.to_smiles(seq))
else:
print(seq)
print('\n---- fitting surrogate model ----')
# acquisition fns assume maximization so we normalize and negate targets here
z_score_transform = Normalizer(all_targets.mean(0), all_targets.std(0))
tgt_transform = lambda x: -z_score_transform(x)
transformed_ref_point = tgt_transform(rescaled_ref_point)
new_split = DataSplit(new_seqs, new_targets)
holdout_ratio = self.surrogate_model.holdout_ratio
all_splits = update_splits(
self.train_split, self.val_split, self.test_split, new_split, holdout_ratio,
)
self.train_split, self.val_split, self.test_split = all_splits
X_train, Y_train = self.train_split.inputs, tgt_transform(self.train_split.targets)
X_val, Y_val = self.val_split.inputs, tgt_transform(self.val_split.targets)
X_test, Y_test = self.test_split.inputs, tgt_transform(self.test_split.targets)
records = self.surrogate_model.fit(
X_train, Y_train, X_val, Y_val, X_test, Y_test,
encoder_obj=self.encoder_obj, resampling_temp=None
)
# log result
last_entry = {key.split('/')[-1]: val for key, val in records[-1].items()}
best_idx = last_entry['best_epoch']
best_entry = {key.split('/')[-1]: val for key, val in records[best_idx].items()}
print(pd.DataFrame([best_entry]).to_markdown(floatfmt='.4f'))
metrics.update(dict(
test_rmse=best_entry['test_rmse'],
test_nll=best_entry['test_nll'],
test_s_rho=best_entry['test_s_rho'],
test_ece=best_entry['test_ece'],
test_post_var=best_entry['test_post_var'],
test_perplexity=best_entry['test_perplexity'],
round_idx=round_idx,
num_bb_evals=total_bb_evals,
num_train=X_train.shape[0],
time_elapsed=time.time() - start_time,
))
metrics = {
'/'.join((log_prefix, 'opt_metrics', key)): val for key, val in metrics.items()
}
wandb.log(metrics)
baseline_seqs = np.array([cand.mutant_residue_seq for cand in self.active_candidates])
baseline_targets = self.active_targets
baseline_seqs, baseline_targets = pareto_frontier(baseline_seqs, baseline_targets)
baseline_targets = tgt_transform(baseline_targets)
acq_fn = hydra.utils.instantiate(
self.acquisition,
X_baseline=baseline_seqs,
known_targets=torch.tensor(baseline_targets).to(self.surrogate_model.device),
surrogate=self.surrogate_model,
ref_point=torch.tensor(transformed_ref_point).to(self.surrogate_model.device),
obj_dim=self.bb_task.obj_dim,
)
print('\n---- optimizing candidates ----')
if self.resampling_weight is None:
weights = np.ones(self.active_targets.shape[0]) / self.active_targets.shape[0]
else:
_, weights, _ = weighted_resampling(self.active_targets, k=self.resampling_weight)
base_cand_batches = []
new_seq_batches = []
new_seq_scores = []
batch_entropy = []
for gen_idx in range(self.num_gens):
# select candidate sequences to mutate
base_idxs = np.random.choice(np.arange(weights.shape[0]), batch_size, p=weights, replace=True)
base_candidates = self.active_candidates[base_idxs]
base_seqs = np.array([cand.mutant_residue_seq for cand in base_candidates])
base_tok_idxs = str_to_tokens(base_seqs, self.encoder.tokenizer)
base_mask = (base_tok_idxs != self.encoder.tokenizer.padding_idx)
base_lens = base_mask.float().sum(-1).long()
tgt_lens = None if self.bb_task.allow_len_change else base_lens
with torch.no_grad():
window_mask_idxs, window_entropy = evaluate_windows(
base_seqs, self.encoder, self.window_size, replacement=True, encoder_obj=self.encoder_obj
)
# select token positions to mutate
if self.position_sampler == 'entropy_method':
mask_idxs = self.sample_mutation_window(window_mask_idxs, window_entropy)
elif self.position_sampler == 'uniform':
mask_idxs = np.concatenate([
random.choice(w_idxs) for w_idxs in window_mask_idxs.values()
])
else:
raise ValueError
with torch.no_grad():
src_tok_idxs = base_tok_idxs.clone().to(self.surrogate_model.device)
if self.latent_init == 'perturb_pareto':
opt_features, src_mask = self.encoder.get_token_features(src_tok_idxs)
opt_features += 1e-3 * torch.randn_like(opt_features)
elif self.encoder_obj == 'lanmt':
src_tok_idxs = corrupt_tok_idxs(
src_tok_idxs, self.encoder.tokenizer, max_len_delta=None, select_idxs=mask_idxs
)
opt_features, src_mask = self.encoder.get_token_features(src_tok_idxs)
elif self.encoder_obj == 'mlm':
# this line assumes padding tokens are always added at the end
np.put_along_axis(src_tok_idxs, mask_idxs, self.encoder.tokenizer.masking_idx, axis=1)
src_tok_features, src_mask = self.encoder.get_token_features(src_tok_idxs)
opt_features = np.take_along_axis(src_tok_features, mask_idxs[..., None], axis=1)
else:
raise ValueError
# initialize latent token-choice decision variables
opt_params = torch.empty(
*opt_features.shape, requires_grad=self.optimize_latent, device=self.surrogate_model.device
)
opt_params.copy_(opt_features)
# optimize decision variables
optimizer = torch.optim.Adam(params=[opt_params], lr=self.lr, betas=(0., 1e-2))
lr_sched = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=self.patience)
best_score, best_step = None, 0
for step_idx in range(self.num_opt_steps):
if self.encoder_obj == 'lanmt':
lat_tok_features, pooled_features = self.encoder.pool_features(opt_params, src_mask)
tgt_tok_logits, tgt_mask = self.encoder.logits_from_features(
opt_params, src_mask, lat_tok_features, tgt_lens
)
tgt_tok_idxs, logit_entropy = self.encoder.sample_tgt_tok_idxs(
tgt_tok_logits, tgt_mask, temp=1.
)
elif self.encoder_obj == 'mlm':
current_features = src_tok_features.clone()
np.put_along_axis(current_features, mask_idxs[..., None], opt_params, axis=1)
lat_tok_features, pooled_features = self.encoder.pool_features(current_features, src_mask)
tgt_tok_logits, tgt_mask = self.encoder.logits_from_features(
current_features, src_mask, lat_tok_features, tgt_lens
)
new_tok_idxs, logit_entropy = sample_tokens(
base_tok_idxs, tgt_tok_logits, self.encoder.tokenizer, replacement=False
)
new_tok_idxs = np.take_along_axis(new_tok_idxs, mask_idxs, axis=1)
tgt_tok_idxs = src_tok_idxs.clone()
np.put_along_axis(tgt_tok_idxs, mask_idxs, new_tok_idxs, axis=1)
logit_entropy = np.take_along_axis(logit_entropy, mask_idxs, axis=1)
else:
raise ValueError
lat_acq_vals = acq_fn(pooled_features.unsqueeze(0))
loss = -lat_acq_vals.mean() + self.entropy_penalty * logit_entropy.mean()
if self.optimize_latent:
loss.backward()
optimizer.step()
lr_sched.step(loss)
tgt_seqs = tokens_to_str(tgt_tok_idxs, self.encoder.tokenizer)
act_acq_vals = acq_fn(tgt_seqs[None, :]).mean().item()
best_score, best_step, _, stop = check_early_stopping(
model=None,
best_score=best_score,
best_epoch=best_step,
best_weights=None,
curr_score=-act_acq_vals,
curr_epoch=step_idx + 1,
patience=self.patience,
save_weights=False,
)
if (step_idx + 1) == best_step:
best_seqs = tgt_seqs.copy()
best_entropy = logit_entropy.mean().item()
if stop:
break
base_cand_batches.append(base_candidates.copy())
new_seq_batches.append(best_seqs.copy())
new_seq_scores.append(best_score)
batch_entropy.append(best_entropy)
# print(f'batch {gen_idx + 1}: score - {best_score:0.4f}, entropy - {logit_entropy.mean().item():0.4f}')
# score all decoded batches, observe the highest value batch
new_seq_batches = np.stack(new_seq_batches)
new_seq_scores = np.stack(new_seq_scores)
best_batch_idx = new_seq_scores.argmin()
base_candidates = base_cand_batches[best_batch_idx]
base_seqs = np.array([b_cand.mutant_residue_seq for b_cand in base_candidates])
new_seqs = new_seq_batches[best_batch_idx]
# new_tokens = new_tok_batches[best_batch_idx]
# logging
metrics = dict(
acq_val=new_seq_scores[best_batch_idx].mean().item(),
entropy=batch_entropy[best_batch_idx],
round_idx=round_idx,
num_bb_evals=total_bb_evals,
time_elapsed=time.time() - start_time,
)
print(pd.DataFrame([metrics]).to_markdown(floatfmt='.4f'))
metrics = {'/'.join((log_prefix, 'opt_metrics', key)): val for key, val in metrics.items()}
wandb.log(metrics)
print('\n---- querying objective function ----')
new_candidates = self.bb_task.make_new_candidates(base_candidates, new_seqs)
# filter infeasible candidates
is_feasible = self.bb_task.is_feasible(new_candidates)
base_candidates = base_candidates[is_feasible]
base_seqs = base_seqs[is_feasible]
new_seqs = new_seqs[is_feasible]
new_candidates = new_candidates[is_feasible]
# new_tokens = new_tokens[is_feasible]
if new_candidates.size == 0:
print('no new candidates')
continue
# filter duplicate candidates
new_seqs, unique_idxs = np.unique(new_seqs, return_index=True)
base_candidates = base_candidates[unique_idxs]
base_seqs = base_seqs[unique_idxs]
new_candidates = new_candidates[unique_idxs]
# filter redundant candidates
is_new = np.in1d(new_seqs, all_seqs, invert=True)
base_candidates = base_candidates[is_new]
base_seqs = base_seqs[is_new]
new_seqs = new_seqs[is_new]
new_candidates = new_candidates[is_new]
if new_candidates.size == 0:
print('no new candidates')
continue
new_targets = self.bb_task.score(new_candidates)
all_targets = np.concatenate((all_targets, new_targets))
all_seqs = np.concatenate((all_seqs, new_seqs))
for seq in new_seqs:
if hasattr(self.tokenizer, 'to_smiles'):
print(self.tokenizer.to_smiles(seq))
else:
print(seq)
assert base_seqs.shape[0] == new_seqs.shape[0] and new_seqs.shape[0] == new_targets.shape[0]
for b_cand, n_cand, f_val in zip(base_candidates, new_candidates, new_targets):
print(f'{len(b_cand)} --> {len(n_cand)}: {f_val}')
pool_candidates = np.concatenate((pool_candidates, new_candidates))
pool_targets = np.concatenate((pool_targets, new_targets))
pool_seqs = np.concatenate((pool_seqs, new_seqs))
# augment active pool with candidates that can be mutated again
self.active_candidates = np.concatenate((self.active_candidates, new_candidates))
self.active_targets = np.concatenate((self.active_targets, new_targets))
self.active_seqs = np.concatenate((self.active_seqs, new_seqs))
# overall Pareto frontier including terminal candidates
pareto_candidates, pareto_targets = pareto_frontier(
np.concatenate((pareto_candidates, new_candidates)),
np.concatenate((pareto_targets, new_targets)),
)
pareto_seqs = np.array([p_cand.mutant_residue_seq for p_cand in pareto_candidates])
print('\n new candidates')
obj_vals = {f'obj_val_{i}': new_targets[:, i].min() for i in range(self.bb_task.obj_dim)}
print(pd.DataFrame([obj_vals]).to_markdown(floatfmt='.4f'))
print('\n best candidates')
obj_vals = {f'obj_val_{i}': pareto_targets[:, i].min() for i in range(self.bb_task.obj_dim)}
print(pd.DataFrame([obj_vals]).to_markdown(floatfmt='.4f'))
# store good candidates for backtracking
par_is_new = np.in1d(pareto_seqs, pareto_seq_history, invert=True)
pareto_cand_history = safe_np_cat([pareto_cand_history, pareto_candidates[par_is_new]])
pareto_seq_history = safe_np_cat([pareto_seq_history, pareto_seqs[par_is_new]])
pareto_target_history = safe_np_cat([pareto_target_history, pareto_targets[par_is_new]])
# logging
norm_pareto_targets = hypercube_transform(pareto_targets)
total_bb_evals += batch_size
self._log_candidates(new_candidates, new_targets, round_idx, log_prefix)
metrics = self._log_optimizer_metrics(
norm_pareto_targets, round_idx, total_bb_evals, start_time, log_prefix
)
return metrics
def sample_mutation_window(self, window_mask_idxs, window_entropy, temp=1.):
# selected_features = []
selected_mask_idxs = []
for seq_idx, entropies in window_entropy.items():
mask_idxs = window_mask_idxs[seq_idx]
assert len(mask_idxs) == len(entropies)
window_idxs = np.arange(len(mask_idxs)).astype(int)
entropies = torch.tensor(entropies)
weights = F.softmax(entropies / temp).cpu().numpy()
selected_window = np.random.choice(window_idxs, 1, p=weights).item()
selected_mask_idxs.append(mask_idxs[selected_window])
return np.concatenate(selected_mask_idxs)
def _log_candidates(self, candidates, targets, round_idx, log_prefix):
table_cols = ['round_idx', 'cand_uuid', 'cand_ancestor', 'cand_seq']
table_cols.extend([f'obj_val_{idx}' for idx in range(self.bb_task.obj_dim)])
for cand, obj in zip(candidates, targets):
new_row = [round_idx, cand.uuid, cand.wild_name, cand.mutant_residue_seq]
new_row.extend([elem for elem in obj])
record = {'/'.join((log_prefix, 'candidates', key)): val for key, val in zip(table_cols, new_row)}
wandb.log(record)
def _log_optimizer_metrics(self, normed_targets, round_idx, num_bb_evals, start_time, log_prefix):
hv_indicator = get_performance_indicator('hv', ref_point=self._ref_point)
new_hypervol = hv_indicator.do(normed_targets)
self._hv_ref = new_hypervol if self._hv_ref is None else self._hv_ref
metrics = dict(
round_idx=round_idx,
hypervol_abs=new_hypervol,
hypervol_rel=new_hypervol / max(1e-6, self._hv_ref),
num_bb_evals=num_bb_evals,
time_elapsed=time.time() - start_time,
)
print(pd.DataFrame([metrics]).to_markdown(floatfmt='.4f'))
metrics = {'/'.join((log_prefix, 'opt_metrics', key)): val for key, val in metrics.items()}
wandb.log(metrics)
return metrics
| StarcoderdataPython |
160863 | <filename>operations/sgRNAProcessing/UpdateGroupReferences.py
#!/bin/env python
# Take a set of sgRNA_Groups and attempt to map them
# to better references
import sys, string, argparse
import MySQLdb
import Config
import Database
from classes import Lookups
with Database.db as cursor :
cursor.execute( "SELECT sgrna_group_id, sgrna_group_reference_original, sgrna_group_reference_original_type FROM " + Config.DB_MAIN + ".sgRNA_groups WHERE sgrna_group_reference='-'" )
for row in cursor.fetchall( ) :
# See if we can find a match with an Official Symbol
cursor.execute( "SELECT gene_id FROM " + Config.DB_QUICK + ".quick_identifiers WHERE quick_identifier_value=%s AND quick_identifier_type IN ('OFFICIAL SYMBOL') AND organism_id='9606' GROUP BY gene_id", [row['sgrna_group_reference_original']] )
idInfo = ""
if cursor.rowcount == 1 :
idInfo = cursor.fetchone( )
else :
cursor.execute( "SELECT gene_id FROM " + Config.DB_QUICK + ".quick_identifiers WHERE quick_identifier_value=%s AND quick_identifier_type IN ('SYNONYM', 'ORDERED LOCUS') AND organism_id='9606' GROUP BY gene_id", [row['sgrna_group_reference_original']] )
if cursor.rowcount == 1 :
idInfo = cursor.fetchone( )
if idInfo != "" :
cursor.execute( "UPDATE " + Config.DB_MAIN + ".sgRNA_groups SET sgrna_group_reference=%s, sgrna_group_reference_type=%s WHERE sgrna_group_id=%s", [idInfo['gene_id'], 'BIOGRID', row['sgrna_group_id']] )
else :
cursor.execute( "UPDATE " + Config.DB_MAIN + ".sgRNA_groups SET sgrna_group_reference=%s, sgrna_group_reference_type=%s WHERE sgrna_group_id=%s", [row['sgrna_group_reference_original'], row['sgrna_group_reference_original_type'], row['sgrna_group_id']] )
Database.db.commit( )
Database.db.commit( )
sys.exit( ) | StarcoderdataPython |
1722174 | <filename>lists/remove_even_numbers.py
#!/bin/env python3
# Path: python-dynamic-programming/lists/remove_even_numbers.py
# Create a funtion to remove even numbers from a list
'''
step 1: Define a function that takes a list as an argument
step 2: Create a new list
step 3: Iterate through the list
step 4: Filter - If the number is even, remove it from the list
'''
# lets call the function remove_even_numbers
def remove_even_numbers(numbers): # step 1
my_new_list = [] # step 2
for element in numbers: # step 3
if element % 2 != 0:
my_new_list.append(element) # step 4
return my_new_list
print(remove_even_numbers([1,2,3,4,5,6,7,8,9]))
# pythonic way
'''
define function
return [expression(n) for n in old_list if n % 2 != 0]
'''
def remove_even(given_list: list) -> list:
return [elements for elements in given_list if elements % 2 !=0]
print(remove_even([11,21,33,44,55,66,77,88,99]))
| StarcoderdataPython |
75665 | from __future__ import absolute_import
from __future__ import print_function
from pysnptools.util.mapreduce1.runner import *
import logging
import fastlmm.pyplink.plink as plink
import pysnptools.util as pstutil
import pysnptools.util.pheno as pstpheno
import numpy as np
from fastlmm.inference import LMM
import scipy.stats as stats
from pysnptools.snpreader import Bed
from fastlmm.util.pickle_io import load, save
import time
import pandas as pd
from six.moves import range
def epistasis(test_snps,pheno,G0, G1=None, mixing=0.0, covar=None,output_file_name=None,sid_list_0=None,sid_list_1=None,
log_delta=None, min_log_delta=-5, max_log_delta=10,
cache_file = None,
runner=None, count_A1=None):
"""
Function performing epistasis GWAS with ML (never REML). See http://www.nature.com/srep/2013/130122/srep01099/full/srep01099.html.
:param test_snps: SNPs from which to test pairs. If you give a string, it should be the base name of a set of PLINK Bed-formatted files.
:type test_snps: a `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string
:param pheno: A single phenotype: A 'pheno dictionary' contains an ndarray on the 'vals' key and a iid list on the 'iid' key.
If you give a string, it should be the file name of a PLINK phenotype-formatted file.
:type pheno: a 'pheno dictionary' or a string
:param G0: SNPs from which to construct a similarity matrix.
If you give a string, it should be the base name of a set of PLINK Bed-formatted files.
:type G0: a `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string
:param G1: SNPs from which to construct a second similarity kernel, optional. Also, see 'mixing').
If you give a string, it should be the base name of a set of PLINK Bed-formatted files.
:type G1: a `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string
:param mixing: Weight between 0.0 (inclusive, default) and 1.1 (inclusive) given to G1 relative to G0.
If you give no mixing number, G0 will get all the weight and G1 will be ignored.
:type mixing: number
:param covar: covariate information, optional: A 'pheno dictionary' contains an ndarray on the 'vals' key and a iid list on the 'iid' key.
If you give a string, it should be the file name of a PLINK phenotype-formatted file.
:type covar: a 'pheno dictionary' or a string
:param sid_list_0: list of sids, optional:
All unique pairs from sid_list_0 x sid_list_1 will be evaluated.
If you give no sid_list_0, all sids in test_snps will be used.
:type sid_list_0: list of strings
:param sid_list_1: list of sids, optional:
All unique pairs from sid_list_0 x sid_list_1 will be evaluated.
If you give no sid_list_1, all sids in test_snps will be used.
:type sid_list_1: list of strings
:param output_file_name: Name of file to write results to, optional. If not given, no output file will be created. The output format is tab-delimited text.
:type output_file_name: file name
:param log_delta: A parameter to LMM learning, optional
If not given will search for best value.
:type log_delta: number
:param min_log_delta: (default:-5)
When searching for log_delta, the lower bounds of the search.
:type min_log_delta: number
:param max_log_delta: (default:-5)
When searching for log_delta, the upper bounds of the search.
:type max_log_delta: number
:param cache_file: Name of file to read or write cached precomputation values to, optional.
If not given, no cache file will be used.
If given and file does not exists, will write precomputation values to file.
If given and file does exists, will read precomputation values from file.
The file contains the U and S matrix from the decomposition of the training matrix. It is in Python's np.savez (\*.npz) format.
Calls using the same cache file should have the same 'G0' and 'G1'
:type cache_file: file name
:param runner: a `Runner <http://fastlmm.github.io/PySnpTools/#util-mapreduce1-runner-runner>`__, optional: Tells how to run locally, multi-processor, or on a cluster.
If not given, the function is run locally.
:type runner: `Runner <http://fastlmm.github.io/PySnpTools/#util-mapreduce1-runner-runner>`__
:param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1
alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True.
:type count_A1: bool
:rtype: Pandas dataframe with one row per SNP pair. Columns include "PValue"
:Example:
>>> from __future__ import print_function #Python 2 & 3 compatibility
>>> import logging
>>> from pysnptools.snpreader import Bed
>>> from fastlmm.association import epistasis
>>> logging.basicConfig(level=logging.INFO)
>>> test_snps = Bed('../../tests/datasets/all_chr.maf0.001.N300',count_A1=True)
>>> pheno = '../../tests/datasets/phenSynthFrom22.23.N300.randcidorder.txt'
>>> covar = '../../tests/datasets/all_chr.maf0.001.covariates.N300.txt'
>>> results_dataframe = epistasis(test_snps, pheno, G0=test_snps, covar=covar,
... sid_list_0=test_snps.sid[:10], #first 10 snps
... sid_list_1=test_snps.sid[5:15], #Skip 5 snps, use next 10
... count_A1=False)
>>> print(results_dataframe.iloc[0].SNP0, results_dataframe.iloc[0].SNP1,round(results_dataframe.iloc[0].PValue,5),len(results_dataframe))
1_12 1_9 0.07779 85
"""
if runner is None:
runner = Local()
epistasis = _Epistasis(test_snps, pheno, G0, G1, mixing, covar, sid_list_0, sid_list_1, log_delta, min_log_delta, max_log_delta, output_file_name, cache_file, count_A1=count_A1)
logging.info("# of pairs is {0}".format(epistasis.pair_count))
epistasis.fill_in_cache_file()
result = runner.run(epistasis)
return result
def write(sid0_list, sid1_list, pvalue_list, output_file):
"""
Given three arrays of the same length [as per the output of epistasis(...)], writes a header and the values to the given output file.
"""
with open(output_file,"w") as out_fp:
out_fp.write("{0}\t{1}\t{2}\n".format("sid0","sid1","pvalue"))
for i in range(len(pvalue_list)):
out_fp.write("{0}\t{1}\t{2}\n".format(sid0_list[i],sid1_list[i],pvalue_list[i]))
# could this be written without the inside-out of IDistributable?
class _Epistasis(object) : #implements IDistributable
def __init__(self, test_snps, pheno, G0, G1=None, mixing=0.0, covar=None,sid_list_0=None,sid_list_1=None,
log_delta=None, min_log_delta=-5, max_log_delta=10, output_file=None, cache_file=None, count_A1=None):
self._ran_once = False
self.test_snps = test_snps
self.pheno = pheno
self.output_file_or_none = output_file
self.cache_file = cache_file
self.count_A1 = count_A1
self.covar = covar
self.sid_list_0 = np.array(sid_list_0,dtype='str') if sid_list_0 is not None else None
self.sid_list_1 = np.array(sid_list_1,dtype='str') if sid_list_1 is not None else None
self.G0=G0
self.G1_or_none=G1
self.mixing=mixing
self.external_log_delta=log_delta
self.min_log_delta = min_log_delta
self.max_log_delta = max_log_delta
self._str = "{0}({1},{2},G0={6},G1={7},mixing={8},covar={3},output_file={12},sid_list_0={4},sid_list_1{5},log_delta={9},min_log_delta={10},max_log_delta={11},cache_file={13})".format(
self.__class__.__name__, self.test_snps,self.pheno,self.covar,self.sid_list_0,self.sid_list_1,
self.G0, self.G1_or_none, self.mixing, self.external_log_delta, self.min_log_delta, self.max_log_delta, output_file, cache_file)
self.block_size = 1000
def order_by_test_snps(self, sid_sequence):
return self.test_snps.sid[sorted(self.test_snps.sid_to_index(sid_sequence))]
def set_sid_sets(self):
sid_set_0 = set(self.sid_list_0)
self.intersect = self.order_by_test_snps(sid_set_0.intersection(self.sid_list_1))
self.just_sid_0 = self.order_by_test_snps(sid_set_0.difference(self.intersect))
self.just_sid_1 = self.order_by_test_snps(set(self.intersect).symmetric_difference(self.sid_list_1))
self._pair_count = len(self.just_sid_0)*len(self.intersect) + len(self.just_sid_0)*len(self.just_sid_1) + len(self.intersect)*len(self.just_sid_1) + len(self.intersect) * (len(self.intersect)-1)//2
self.test_snps, self.pheno, self.covar, self.G0, self.G1_or_none = pstutil.intersect_apply([self.test_snps, self.pheno, self.covar, self.G0, self.G1_or_none]) #should put G0 and G1 first
def _run_once(self):
if self._ran_once:
return
self._ran_once = None
if isinstance(self.test_snps, str):
self.test_snps = Bed(self.test_snps,count_A1=self.count_A1)
if isinstance(self.G0, str):
self.G0 = Bed(self.G0,count_A1=self.count_A1)
if isinstance(self.pheno, str):
self.pheno = pstpheno.loadOnePhen(self.pheno,vectorize=True,missing='NaN')
if self.covar is not None and isinstance(self.covar, str):
self.covar = pstpheno.loadPhen(self.covar,missing='NaN')
if self.G1_or_none is not None and isinstance(self.G1_or_none, str):
self.G1_or_none = Bed(self.G1_or_none,count_A1=self.count_A1)
if self.sid_list_0 is None:
self.sid_list_0 = self.test_snps.sid
if self.sid_list_1 is None:
self.sid_list_1 = self.test_snps.sid
self.set_sid_sets()
#!!Should fix up to add only of no constant columns - will need to add a test case for this
if self.covar is None:
self.covar = np.ones((self.test_snps.iid_count, 1))
else:
self.covar = np.hstack((self.covar['vals'],np.ones((self.test_snps.iid_count, 1))))
self.n_cov = self.covar.shape[1]
if self.output_file_or_none is None:
self.__tempdirectory = ".working"
else:
self.__tempdirectory = self.output_file_or_none + ".working"
self._ran_once = True
#start of IDistributable interface--------------------------------------
@property
def work_count(self):
self._run_once()
block_count = self.div_ceil(self._pair_count, self.block_size)
return block_count
def work_sequence(self):
self._run_once()
return self.work_sequence_range(0,self.work_count)
def work_sequence_range(self, start, end):
self._run_once()
lmm = self.lmm_from_cache_file()
lmm.sety(self.pheno['vals'])
for sid0_list, sid1_list in self.pair_block_sequence_range(start,end):
yield lambda lmm=lmm,sid0_list=sid0_list,sid1_list=sid1_list : self.do_work(lmm,sid0_list,sid1_list) # the 'lmm=lmm,...' is need to get around a strangeness in Python
def reduce(self, result_sequence):
#doesn't need "run_once()"
frame = pd.concat(result_sequence)
frame.sort_values(by="PValue", inplace=True)
frame.index = np.arange(len(frame))
if self.output_file_or_none is not None:
frame.to_csv(self.output_file_or_none, sep="\t", index=False)
return frame
#!!Find a place to output info like this near the end of the run
#logging.info("PhenotypeName\t{0}".format(pheno['header']))
#logging.info("SampleSize\t{0}".format(test_snps.iid_count))
#logging.info("SNPCount\t{0}".format(test_snps.sid_count))
#logging.info("Runtime\t{0}".format(time.time()-t0))
@property
def tempdirectory(self):
self._run_once()
return self.__tempdirectory
#optional override -- the str name of the instance is used by the cluster as the job name
def __str__(self):
#Doesn't need run_once
return self._str
def copyinputs(self, copier):
self._run_once()
if isinstance(self.test_snps, str):
copier.input(self.test_snps + ".bed")
copier.input(self.test_snps + ".bim")
copier.input(self.test_snps + ".fam")
else:
copier.input(self.test_snps)
copier.input(self.pheno)
copier.input(self.covar)
if isinstance(self.G0, str):
copier.input(self.G0 + ".bed")
copier.input(self.G0 + ".bim")
copier.input(self.G0 + ".fam")
else:
copier.input(self.G0)
copier.input(self.G1_or_none)
copier.input(self.cache_file)
def copyoutputs(self,copier):
#Doesn't need run_once
copier.output(self.output_file_or_none)
#end of IDistributable interface---------------------------------------
@staticmethod
def div_ceil(num, den): #!!move to utils?
return -(-num//den) #The -/- trick makes it do ceiling instead of floor. "//" will do integer division even in the future and on floats.
def pair_block_sequence_range(self,block_start,block_end):
self._run_once()
assert 0 <= block_start and block_start <= block_end and block_end <= self.work_count, "real assert"
block_index = block_start
start = block_index * self.pair_count // self.work_count
next_start = (block_index+1) * self.pair_count // self.work_count
size_goal = next_start - start
end = block_end * self.pair_count // self.work_count
sid0_list = []
sid1_list = []
for sid0, sid1 in self.pair_sequence_range(start,end):
sid0_list.append(sid0)
sid1_list.append(sid1)
if len(sid0_list) == size_goal:
yield sid0_list, sid1_list
block_index += 1
if block_index == block_end:
return
sid0_list = []
sid1_list = []
start = next_start
next_start = (block_index+1) * self.pair_count // self.work_count
size_goal = next_start - start
assert len(sid0_list) == 0, "real assert"
#If start == end, then returns without yielding anything
def pair_sequence_range(self,start,end):
self._run_once()
assert 0 <= start and start <= end and end <= self._pair_count, "real assert"
i = start
for sid0, sid1 in self.pair_sequence_with_start(start):
yield sid0, sid1
i = i + 1
if i == end:
break
assert i == end, "Not enough items found. Didn't get to the end"
def pair_sequence_with_start(self,start):
self._run_once()
skip_ref = [start]
just_sid_0_list = list(self.just_sid_0)
just_sid_1_list = list(self.just_sid_1)
intersect_list = list(self.intersect)
for sid0, sid1 in self.combo_distinct(just_sid_0_list, intersect_list, skip_ref):
yield sid0, sid1
for sid0, sid1 in self.combo_distinct(just_sid_0_list, just_sid_1_list, skip_ref):
yield sid0, sid1
for sid0, sid1 in self.combo_distinct(intersect_list, just_sid_1_list, skip_ref):
yield sid0, sid1
for sid0, sid1 in self.combo_same(intersect_list, skip_ref):
yield sid0, sid1
assert skip_ref[0] == 0, "real assert"
def combo_distinct(self, distinct__list0, distinct__list1, skip_ref):
row_count = len(distinct__list0)
col_count = len(distinct__list1)
if skip_ref[0] >= row_count * col_count:
skip_ref[0] = skip_ref[0] - row_count * col_count
assert skip_ref[0] >=0, "real assert"
return
row_start = skip_ref[0] // col_count
skip_ref[0] = skip_ref[0] - row_start * col_count
assert skip_ref[0] >=0, "real assert"
for row_index in range(row_start, row_count):
sid0 = distinct__list0[row_index]
if row_index == row_start:
col_start = skip_ref[0]
skip_ref[0] = 0
else:
col_start = 0
for col_index in range(col_start, col_count):
sid1 = distinct__list1[col_index]
yield sid0, sid1
def combo_same(self, list, skip_ref):
count = len(list)
full_size = count * (count + 1) // 2
if skip_ref[0] >= full_size:
skip_ref[0] = skip_ref[0] - full_size
assert skip_ref[0] >=0, "real assert"
return
row_start = int((-1 + 2*count - np.sqrt(1 - 4*count + 4*count**2 - 8*skip_ref[0]))//2)
skip_ref[0] = skip_ref[0] - (count*row_start - (row_start*(1 + row_start))//2)
assert skip_ref[0] >=0, "real assert"
for row_index in range(row_start, count):
sid0 = list[row_index]
if row_index == row_start:
col_start = skip_ref[0]
skip_ref[0] = 0
else:
col_start = 0
for col_index in range(col_start + 1 + row_index, count):
sid1 = list[col_index]
assert sid0 is not sid1, "real assert"
yield sid0, sid1
@property
def pair_count(self):
self._run_once()
return self._pair_count
def lmm_from_cache_file(self):
logging.info("Loading precomputation from {0}".format(self.cache_file))
lmm = LMM()
with np.load(self.cache_file) as data:
lmm.U = data['arr_0']
lmm.S = data['arr_1']
return lmm
def fill_in_cache_file(self):
self._run_once()
logging.info("filling in the cache_file and log_delta, as needed")
if self.G1_or_none is None:
self.G1val_or_none = None
else:
self.G1val_or_none = self.G1_or_none.read().val
# The S and U are always cached, in case they are needed for the cluster or for multi-threaded runs
if self.cache_file is None:
self.cache_file = os.path.join(self.__tempdirectory, "cache_file.npz")
if os.path.exists(self.cache_file): # If there is already a cache file in the temp directory, it must be removed because it might be out-of-date
os.remove(self.cache_file)
lmm = None
if not os.path.exists(self.cache_file):
logging.info("Precomputing eigen")
lmm = LMM()
G0_standardized = self.G0.read().standardize()
lmm.setG(G0_standardized.val, self.G1val_or_none, a2=self.mixing)
logging.info("Saving precomputation to {0}".format(self.cache_file))
pstutil.create_directory_if_necessary(self.cache_file)
np.savez(self.cache_file, lmm.U,lmm.S) #using np.savez instead of pickle because it seems to be faster to read and write
if self.external_log_delta is None:
if lmm is None:
lmm = self.lmm_from_cache_file()
logging.info("searching for internal delta")
lmm.setX(self.covar)
lmm.sety(self.pheno['vals'])
#log delta is used here. Might be better to use findH2, but if so will need to normalized G so that its K's diagonal would sum to iid_count
result = lmm.find_log_delta(REML=False, sid_count=self.G0.sid_count, min_log_delta=self.min_log_delta, max_log_delta=self.max_log_delta ) #!!what about findA2H2? minH2=0.00001
self.external_log_delta = result['log_delta']
self.internal_delta = np.exp(self.external_log_delta) * self.G0.sid_count
logging.info("internal_delta={0}".format(self.internal_delta))
logging.info("external_log_delta={0}".format(self.external_log_delta))
do_pair_count = 0
do_pair_time = time.time()
def do_work(self, lmm, sid0_list, sid1_list):
dataframe = pd.DataFrame(
index=np.arange(len(sid0_list)),
columns=('SNP0', 'Chr0', 'GenDist0', 'ChrPos0', 'SNP1', 'Chr1', 'GenDist1', 'ChrPos1', 'PValue', 'NullLogLike', 'AltLogLike')
)
#!!Is this the only way to set types in a dataframe?
dataframe['Chr0'] = dataframe['Chr0'].astype(np.float)
dataframe['GenDist0'] = dataframe['GenDist0'].astype(np.float)
dataframe['ChrPos0'] = dataframe['ChrPos0'].astype(np.float)
dataframe['Chr1'] = dataframe['Chr1'].astype(np.float)
dataframe['GenDist1'] = dataframe['GenDist1'].astype(np.float)
dataframe['ChrPos1'] = dataframe['ChrPos1'].astype(np.float)
dataframe['PValue'] = dataframe['PValue'].astype(np.float)
dataframe['NullLogLike'] = dataframe['NullLogLike'].astype(np.float)
dataframe['AltLogLike'] = dataframe['AltLogLike'].astype(np.float)
#This is some of the code for a different way that reads and dot-products 50% more, but does less copying. Seems about the same speed
#sid0_index_list = self.test_snps.sid_to_index(sid0_list)
#sid1_index_list = self.test_snps.sid_to_index(sid1_list)
#sid_index_union_dict = {}
#sid0_index_index_list = self.create_index_index(sid_index_union_dict, sid0_index_list)
#sid1_index_index_list = self.create_index_index(sid_index_union_dict, sid1_index_list)
#snps0_read = self.test_snps[:,sid0_index_list].read().standardize()
#snps1_read = self.test_snps[:,sid1_index_list].read().standardize()
sid_union = set(sid0_list).union(sid1_list)
sid_union_index_list = sorted(self.test_snps.sid_to_index(sid_union))
snps_read = self.test_snps[:,sid_union_index_list].read().standardize()
sid0_index_list = snps_read.sid_to_index(sid0_list)
sid1_index_list = snps_read.sid_to_index(sid1_list)
products = snps_read.val[:,sid0_index_list] * snps_read.val[:,sid1_index_list] # in the products matrix, each column i is the elementwise product of sid i in each list
X = np.hstack((self.covar, snps_read.val, products))
UX = lmm.U.T.dot(X)
k = lmm.S.shape[0]
N = X.shape[0]
if (k<N):
UUX = X - lmm.U.dot(UX)
else:
UUX = None
for pair_index, sid0 in enumerate(sid0_list):
sid1 = sid1_list[pair_index]
sid0_index = sid0_index_list[pair_index]
sid1_index = sid1_index_list[pair_index]
index_list = np.array([pair_index]) #index to product
index_list = index_list + len(sid_union_index_list) #Shift by the number of snps in the union
index_list = np.hstack((np.array([sid0_index,sid1_index]),index_list)) # index to sid0 and sid1
index_list = index_list + self.covar.shape[1] #Shift by the number of values in the covar
index_list = np.hstack((np.arange(self.covar.shape[1]),index_list)) #indexes of the covar
index_list_less_product = index_list[:-1] #index to everything but the product
#Null -- the two additive SNPs
lmm.X = X[:,index_list_less_product]
lmm.UX = UX[:,index_list_less_product]
if (k<N):
lmm.UUX = UUX[:,index_list_less_product]
else:
lmm.UUX = None
res_null = lmm.nLLeval(delta=self.internal_delta, REML=False)
ll_null = -res_null["nLL"]
#Alt -- now with the product feature
lmm.X = X[:,index_list]
lmm.UX = UX[:,index_list]
if (k<N):
lmm.UUX = UUX[:,index_list]
else:
lmm.UUX = None
res_alt = lmm.nLLeval(delta=self.internal_delta, REML=False)
ll_alt = -res_alt["nLL"]
test_statistic = ll_alt - ll_null
degrees_of_freedom = 1
pvalue = stats.chi2.sf(2.0 * test_statistic, degrees_of_freedom)
logging.debug("<{0},{1}>, null={2}, alt={3}, pvalue={4}".format(sid0,sid1,ll_null,ll_alt,pvalue))
dataframe.iloc[pair_index] = [
sid0, snps_read.pos[sid0_index,0], snps_read.pos[sid0_index,1], snps_read.pos[sid0_index,2],
sid1, snps_read.pos[sid1_index,0], snps_read.pos[sid1_index,1], snps_read.pos[sid1_index,2],
pvalue, ll_null, ll_alt]
self.do_pair_count += 1
if self.do_pair_count % 100 == 0:
start = self.do_pair_time
self.do_pair_time = time.time()
logging.info("do_pair_count={0}, time={1}".format(self.do_pair_count,self.do_pair_time-start))
return dataframe
if __name__ == "__main__":
import doctest
doctest.testmod()
print("done")
| StarcoderdataPython |
119631 | # Задача 2. Вариант 34
# Напишите программу, которая будет выводить на экран наиболее понравившееся
# вам высказывание, автором которого является Платон. Не забудьте о том,
# что автор должен быть упомянут на отдельной строке.
# <NAME>.
# 31.03.2016
print ('\nНикто не знает, что такое смерть и не есть ли она величайшее для человека добро. И однако, все ее страшатся как бы в сознании, что она - величайшее зло.')
print ('\n\t\t Платон')
input ('\n\n Нажмите Enter для выхода') | StarcoderdataPython |
1626174 | <gh_stars>0
from .plugin import Include
| StarcoderdataPython |
104976 | from domain.entities.value_objects.cashback import Cashback
| StarcoderdataPython |
1712548 | <filename>batchflow/tests/research_test.py
""" Tests for Research and correspong classes. """
# pylint: disable=no-name-in-module, missing-docstring, redefined-outer-name
import os
from contextlib import ExitStack as does_not_raise
import pytest
import numpy as np
from batchflow import Dataset, Pipeline, B, V, C
from batchflow import NumpySampler as NS
from batchflow.models.torch import ResNet
from batchflow.opensets import CIFAR10
from batchflow.research import Experiment, Executor, Domain, Option, Research, E, EC, O, ResearchResults, Alias
class Model:
def __init__(self):
self.dataset = CIFAR10()
self.model_config = {
'head/layout': C('layout'),
'head/units': C('units'),
'loss': 'ce',
'device': 'cpu',
'amp': False
}
self.create_train_ppl()
self.create_test_ppl()
def create_train_ppl(self):
ppl = (Pipeline()
.init_model('model', ResNet, 'dynamic', config=self.model_config)
.to_array(channels='first', src='images', dst='images')
.train_model('model', B('images'), B('labels'))
.run_later(batch_size=8, n_iters=1, shuffle=True, drop_last=True)
)
self.train_ppl = ppl << self.dataset.train
def create_test_ppl(self):
test_ppl = (Pipeline()
.import_model('model', self.train_ppl)
.init_variable('metrics', None)
.to_array(channels='first', src='images', dst='images')
.predict_model('model', B('images'), fetches='predictions', save_to=B('predictions'))
.gather_metrics('classification', B('labels'), B('predictions'), fmt='logits', axis=-1,
num_classes=10, save_to=V('metrics', mode='update'))
.run_later(batch_size=8, n_iters=2, shuffle=False, drop_last=False)
)
self.test_ppl = test_ppl << self.dataset.test
def eval_metrics(self, metrics, **kwargs):
return self.test_ppl.v('metrics').evaluate(metrics, **kwargs)
@pytest.fixture
def generator():
def _generator(n):
s = 0
for i in range(n):
s += i
yield s
return _generator
@pytest.fixture
def simple_research(tmp_path):
def f(x, y):
return x + y
experiment = (Experiment()
.add_callable('sum', f, x=EC('x'), y=EC('y'))
.save(O('sum'), 'sum')
)
domain = Domain(x=[1, 2], y=[2, 3, 4])
research = Research(name=os.path.join(tmp_path, 'research'), experiment=experiment, domain=domain)
return research
@pytest.fixture
def research_with_controller(tmp_path):
domain = Domain({'layout': ['f', 'faf']}) @ Domain({'units': [[10], [100, 10]]})
research = (Research(name=os.path.join(tmp_path, 'research'), domain=domain, n_reps=2)
.add_instance('controller', Model)
.add_pipeline('controller.train_ppl')
.add_pipeline('controller.test_ppl', run=True, when='last')
.add_callable('controller.eval_metrics', metrics='accuracy', when='last')
.save(O('controller.eval_metrics'), 'accuracy', when='last')
)
return research
SIZE_CALC = {
'+': lambda x, y: x + y,
'*': lambda x, y: x * y,
'@': lambda x, y: x
}
class TestDomain:
@pytest.mark.parametrize('op', ['+', '*', '@'])
@pytest.mark.parametrize('a', [[0, 1, 2], [0, 1, 2, 4]])
@pytest.mark.parametrize('b', [[2, 3, 4]])
@pytest.mark.parametrize('n_reps', [1, 2])
def test_operations(self, op, a, b, n_reps):
option_1 = Domain({'a': a}) #pylint:disable=unused-variable
option_2 = Domain(b=b) #pylint:disable=unused-variable
if not (op == '@' and len(a) != len(b)):
domain = eval(f'option_1 {op} option_2') # pylint:disable=eval-used
domain.set_iter_params(n_reps=n_reps)
configs = list(domain.iterator)
n_items = SIZE_CALC[op](len(a), len(b))
assert len(domain) == n_items
assert domain.size == n_items * n_reps
assert len(configs) == n_items * n_reps
@pytest.mark.parametrize('repeat_each', [None, 1, 2])
@pytest.mark.parametrize('n_reps', [1, 2, 3])
def test_repetitions_order(self, repeat_each, n_reps):
domain = Domain(a=[1, 2], b=[3, 4], c=NS('normal'))
domain.set_iter_params(n_reps=n_reps, repeat_each=repeat_each)
configs = list(domain.iterator)
for i, config in enumerate(configs):
if repeat_each is None:
assert config.config()['repetition'] == i // len(domain)
else:
assert config.config()['repetition'] == i % (repeat_each * n_reps) // repeat_each
def test_domain_update(self):
domain = Domain({'a': [1, 2]})
def update():
return Domain({'x': [3, 4]})
domain.set_update(update, ['last'])
configs = list(domain.iterator)
domain = domain.update(len(domain), None)
configs += list(domain.iterator)
assert len(configs) == 4
for i, config in enumerate(configs):
assert config.config()['updates'] == (2 * i) // len(configs)
def test_sample_options(self):
domain = Domain({'a': NS('normal')})
domain.set_iter_params(n_items=3, n_reps=2, seed=42)
res = [config['a'] for config in domain.iterator]
exp_res = [0.03, 0.96, 0.73] * 2
assert np.allclose(res, exp_res, atol=0.01, rtol=0)
def test_weights(self):
domain = (
1. * Domain(a=[1,2]) +
1. * Domain(b=[3,4]) +
Domain(a=[5,6]) +
0.3 * Domain(a=NS('normal')) +
0.7 * Domain(b=NS('normal', loc=10))
)
domain.set_iter_params(n_items=8, n_reps=2, seed=41)
res = [config['a'] if 'a' in config.config() else config['b'] for config in domain.iterator]
exp_res = [1, 3, 4, 2, 5, 6, 9.87, -1.08] * 2
assert np.allclose(res, exp_res, atol=0.01, rtol=0)
class TestExecutor:
def test_callable(self):
experiment = (Experiment()
.add_callable('sum', sum, args=[range(10)])
.save(O('sum'), 'sum')
)
executor = Executor(experiment, target='f', n_iters=1)
executor.run()
assert executor.experiments[0].results['sum'][0] == sum(range(10))
def test_generator(self, generator):
experiment = (Experiment()
.add_generator('sum', generator, n=10)
.save(O('sum'), 'sum')
)
executor = Executor(experiment, target='f', n_iters=10)
executor.run()
assert executor.experiments[0].results['sum'][9] == sum(range(10))
def test_direct_callable(self):
experiment = (Experiment()
.sum(range(10), save_to='sum')
)
executor = Executor(experiment, target='f', n_iters=1)
executor.run()
assert executor.experiments[0].results['sum'][0] == sum(range(10))
def test_direct_generator(self, generator): #pylint: disable=unused-argument
experiment = (Experiment()
.add_namespace(locals())
.generator(10, mode='generator')
.save(O('generator'), 'sum')
)
executor = Executor(experiment, target='f', n_iters=10)
executor.run()
assert executor.experiments[0].results['sum'][9] == sum(range(10))
def test_units_without_name(self, generator):
experiment = (Experiment()
.add_callable(sum, args=[range(10)])
.add_generator(generator, n=10)
.save(O('sum'), 'sum')
)
executor = Executor(experiment, target='f', n_iters=1)
executor.run()
assert executor.experiments[0].results['sum'][0] == sum(range(10))
def test_configs(self):
def f(x, y, z):
return (x, y, z)
experiment = (Experiment()
.add_callable('sum', f, x=EC('x'), y=EC('y'), z=EC('z'), save_to='sum')
)
executor = Executor(experiment, target='f', configs=[{'x': 10}, {'x': 20}],
branches_configs=[{'y': 20}, {'y': 30}], executor_config={'z': 5},
n_iters=1)
executor.run()
assert executor.experiments[0].results['sum'][0] == (10, 20, 5)
assert executor.experiments[1].results['sum'][0] == (20, 30, 5)
def test_root(self):
def root():
return 10
experiment = (Experiment()
.add_callable('root', root, root=True)
.add_callable('sum', sum, args=[[EC('x'), O('root')]])
.save(E().outputs['sum'], 'sum')
)
executor = Executor(experiment, target='f', configs=[{'x': 10}, {'x': 20}], n_iters=1)
executor.run()
assert executor.experiments[0].results['sum'][0] == 20
assert executor.experiments[1].results['sum'][0] == 30
def test_instances(self):
class MyClass:
def __init__(self, x):
self.x = x
def sum(self):
return sum(range(self.x))
experiment = (Experiment()
.add_instance('instance', MyClass, x=EC('x'))
.add_callable('instance.sum')
.save(O('instance.sum'), 'sum')
)
executor = Executor(experiment, target='f', configs=[{'x': 10}, {'x': 20}], n_iters=1)
executor.run()
assert executor.experiments[0].results['sum'][0] == sum(range(10))
assert executor.experiments[1].results['sum'][0] == sum(range(20))
def test_pipeline(self):
ppl = (Dataset(10).p
.init_variable('var', 0)
.update(V('var'), V('var') + B().indices.sum())
.run_later(1, n_epochs=1, shuffle=False)
)
experiment = (Experiment()
.add_pipeline('ppl', ppl)
.save(E('ppl').v('var'), dst='var', when=['last'])
)
executor = Executor(experiment, target='f', n_iters=10)
executor.run()
assert executor.experiments[0].results['var'][9] == sum(range(10))
def test_pipeline_with_branches(self):
root = Dataset(10).p.run_later(1, n_epochs=1, shuffle=False)
ppl = (Pipeline()
.init_variable('var', 0)
.update(V('var'), V('var') + B().indices.sum() * C('x'))
)
experiment = (Experiment()
.add_pipeline('ppl', root, ppl)
.save(E('ppl').v('var'), dst='var', when=['last'])
)
executor = Executor(experiment, target='f', n_iters=10, configs=[{'x': 10}, {'x': 20}], )
executor.run()
assert executor.experiments[0].results['var'][9] == sum(range(10)) * 10
assert executor.experiments[1].results['var'][9] == sum(range(10)) * 20
def test_stop_iteration(self, generator):
def inc(x):
return x + 1
experiment = (Experiment()
.add_generator('sum', generator, n=EC('n'))
.add_callable('func', inc, x=O('sum'))
.save(O('sum'), 'sum', when='last')
.save(O('func'), 'func', when='last')
)
executor = Executor(experiment, target='f', configs=[{'n': 10}, {'n': 20}], n_iters=30, finalize=True)
executor.run()
assert executor.experiments[0].results['sum'][10] == sum(range(10))
assert executor.experiments[1].results['sum'][20] == sum(range(20))
assert executor.experiments[0].results['func'][10] == sum(range(10)) + 1
assert executor.experiments[1].results['func'][20] == sum(range(20)) + 1
executor = Executor(experiment, target='f', configs=[{'n': 10}, {'n': 20}], n_iters=None)
executor.run()
@pytest.mark.parametrize('save_to', ['a', ['a'], ['a', 'b'], ['a', 'b', 'c']])
def test_multiple_output(self, save_to):
def func():
return 1, 2, 3
research = Research().add_callable(func, save_to=save_to)
research.run(dump_results=False)
if isinstance(save_to, list) and len(save_to) != 3:
assert len(research.monitor.exceptions) != 0
else:
assert len(research.monitor.exceptions) == 0
class TestResearch:
@pytest.mark.parametrize('parallel', [False, True])
@pytest.mark.parametrize('dump_results', [False, True])
@pytest.mark.parametrize('workers', [1, 3])
@pytest.mark.parametrize('branches, target', [[1, 'f'], [3, 'f'], [3, 't']])
def test_simple_research(self, parallel, dump_results, target, workers, branches, simple_research):
n_iters = 3
simple_research.run(n_iters=n_iters, workers=workers, branches=branches, parallel=parallel,
dump_results=dump_results, executor_target=target)
assert len(simple_research.monitor.exceptions) == 0
assert len(simple_research.results.df) == 18
if dump_results:
loaded_research = Research.load(simple_research.name)
assert len(loaded_research.results.df) == 18
def test_empty_domain(self):
research = Research().add_callable('func', lambda: 100).save(O('func'), 'sum')
research.run(n_iters=10, dump_results=False)
assert len(research.monitor.exceptions) == 0
assert len(research.results.df) == 10
def test_domain_update(self):
def update():
return Option('x', [4, 5, 6])
research = (Research(domain=Option('x', [1, 2, 3]), n_reps=2)
.add_callable('func', lambda x: x, x=EC('x'))
.save(O('func'), 'sum')
.update_domain(update, when=['%5', '%8'], n_reps=2)
)
research.run(n_iters=1, dump_results=False, bar=False)
assert len(research.monitor.exceptions) == 0
assert len(research.results.df) == 15
@pytest.mark.slow
@pytest.mark.parametrize('workers', [1, 2])
def test_research_with_controller(self, workers, research_with_controller):
research_with_controller.run(dump_results=True, parallel=True, workers=workers, bar=False, finalize=True)
assert len(research_with_controller.monitor.exceptions) == 0
assert len(research_with_controller.results.df) == 4
loaded_research = Research.load(research_with_controller.name)
assert len(loaded_research.results.df) == 4
@pytest.mark.slow
@pytest.mark.parametrize('branches', [False, True])
def test_research_with_pipelines(self, branches):
dataset = CIFAR10()
model_config = {
'head/layout': C('layout'),
'head/units': C('units'),
'loss': 'ce',
'device': 'cpu',
'amp': False
}
root_ppl = (Pipeline()
.to_array(channels='first', src='images', dst='images')
.run_later(batch_size=8, n_iters=1, shuffle=True, drop_last=True)
) << dataset.train
branch_ppl = (Pipeline()
.init_model('model', ResNet, 'dynamic', config=model_config)
.init_variable('loss', None)
.train_model('model', B('images'), B('labels'), fetches='loss', save_to=V('loss'))
)
test_ppl = (Pipeline()
.import_model('model', C('import_from'))
.init_variable('metrics', None)
.to_array(channels='first', src='images', dst='images')
.predict_model('model', B('images'), fetches='predictions', save_to=B('predictions'))
.gather_metrics('classification', B('labels'), B('predictions'), fmt='logits', axis=-1,
num_classes=10, save_to=V('metrics', mode='update'))
.run_later(batch_size=8, n_iters=2, shuffle=False, drop_last=False)
) << dataset.test
def eval_metrics(ppl, metrics, **kwargs):
return ppl.v('metrics').evaluate(metrics, **kwargs)
domain = Domain({'layout': ['f', 'faf']}) @ Domain({'units': [[10], [100, 10]]})
args = (root_ppl, branch_ppl) if branches else (root_ppl+branch_ppl, )
research = (Research(name='research', domain=domain, n_reps=2)
.add_pipeline('train_ppl', *args, variables='loss')
.add_pipeline('test_ppl', test_ppl, import_from=E('train_ppl'), run=True, when='last')
.add_callable(eval_metrics, ppl=E('test_ppl'), metrics='accuracy', when='last')
.save(O('eval_metrics'), 'accuracy', when='last')
)
research.run(dump_results=False)
results = research.results.df.dtypes.values
assert all(results == [np.dtype(i) for i in ['O', 'O', 'O', 'int64', 'float64', 'float64']])
def test_remove(self, simple_research):
simple_research.run(n_iters=1)
assert os.path.exists(simple_research.name)
Research.remove(simple_research.name, ask=False)
assert not os.path.exists(simple_research.name)
@pytest.mark.slow
@pytest.mark.parametrize('debug,expectation',
list(zip([False, True], [does_not_raise(), pytest.raises(NotImplementedError)])))
def test_debug(self, debug, expectation):
def func():
raise NotImplementedError
with expectation:
research = Research().add_callable(func)
research.run(dump_results=False, executor_target='f', parallel=False, debug=debug)
@pytest.mark.parametrize('profile, shape', list(zip([2, 1], [9, 6])))
def test_profile(self, profile, shape, simple_research):
simple_research.run(n_iters=3, dump_results=False, profile=profile)
assert simple_research.profiler.profile_info.shape[1] == shape
def test_coincided_names(self):
def f(a):
return a ** 10
research = (Research()
.add_callable(f, a=2, save_to='a')
.add_callable(f, a=3, save_to='b')
)
research.run(dump_results=False)
assert research.results.df.iloc[0].a == f(2)
assert research.results.df.iloc[0].b == f(3)
class TestResults:
@pytest.mark.parametrize('parallel', [False, True])
@pytest.mark.parametrize('dump_results', [False, True])
def test_filter_by_config(self, parallel, dump_results, simple_research):
simple_research.run(n_iters=3, parallel=parallel, dump_results=dump_results)
df = simple_research.results.to_df(use_alias=False, config={'y': 2})
assert len(df) == 6
assert (df.y.values == 2).all()
@pytest.mark.parametrize('parallel', [False, True])
@pytest.mark.parametrize('dump_results', [False, True])
def test_filter_by_alias(self, parallel, dump_results, simple_research):
simple_research.run(n_iters=3, parallel=parallel, dump_results=dump_results)
df = simple_research.results.to_df(use_alias=False, alias={'y': '2'})
assert len(df) == 6
assert (df.y.values == 2).all()
@pytest.mark.parametrize('parallel', [False, True])
@pytest.mark.parametrize('dump_results', [False, True])
def test_filter_by_domain(self, parallel, dump_results, simple_research):
simple_research.run(n_iters=3, parallel=parallel, dump_results=dump_results)
df = simple_research.results.to_df(use_alias=False, domain=Option('y', [2, 3]))
assert len(df) == 12
def test_load(self, simple_research):
simple_research.run(n_iters=3)
df = ResearchResults(simple_research.name, domain=Option('y', [2, 3])).df
assert len(df) == 12
@pytest.mark.parametrize('use_alias', [False, True])
def test_lists_in_config(self, use_alias):
def dummy():
return 1
domain = Domain({'a': [Alias([1, 2, 3], 'list')]})
research = Research(domain=domain).add_callable(dummy, save_to='res')
research.run(dump_results=False)
df = research.results.to_df(use_alias=use_alias)
assert len(df) == 1
# #TODO: logging tests, test that exceptions in one branch don't affect other bracnhes,
# # divices splitting, ...
| StarcoderdataPython |
3329345 | def solution(x, y, d):
if y < x or d <= 0:
raise Exception("Invalid argument")
if (y - x) % d ==0:
return (y - x) // d
else:
return (y - x) // d + 1
print(solution(10, 85, 30))
print(solution(10, 10, 2))
# print(solution(10, 5, 30))
# print(solution(10, 85, -30))
| StarcoderdataPython |
1759627 | from Products.validation import validation as validationService
from bika.lims.testing import BIKA_FUNCTIONAL_TESTING
from bika.lims.tests.base import BikaFunctionalTestCase
from plone.app.testing import login
from plone.app.testing import TEST_USER_NAME
import unittest
class Tests(BikaFunctionalTestCase):
layer = BIKA_FUNCTIONAL_TESTING
def test_UniqueFieldValidator(self):
login(self.portal, TEST_USER_NAME)
clients = self.portal.clients
client1 = clients['client-2'] # not Happy Hills
self.assertEqual(
client1.schema.get('Name').validate('<NAME>', client1),
u"Validation failed: 'Happy Hills' is not unique")
self.assertEqual(
None,
client1.schema.get(
'title').validate(
'Another Client',
client1))
def test_ServiceKeywordValidator(self):
login(self.portal, TEST_USER_NAME)
services = self.portal.bika_setup.bika_analysisservices
service1 = services['analysisservice-1']
self.assertEqual(
service1.schema.get('Keyword').validate('', service1),
u'Analysis Keyword is required, please correct.')
self.assertEqual(
service1.schema.get('Keyword').validate('&', service1),
u'Validation failed: keyword contains invalid characters')
self.assertEqual(
service1.schema.get('Keyword').validate('Ca', service1),
u"Validation failed: 'Ca': This keyword is already in use by service 'Calcium'")
self.assertEqual(
service1.schema.get('Keyword').validate('TV', service1),
u"Validation failed: 'TV': This keyword is already in use by calculation 'Titration'")
self.assertEqual(
None,
service1.schema.get(
'Keyword').validate(
'VALID_KW',
service1))
def test_InterimFieldsValidator(self):
login(self.portal, TEST_USER_NAME)
calcs = self.portal.bika_setup.bika_calculations
# Titration
calc1 = calcs['calculation-1']
key = calc1.id + 'InterimFields'
interim_fields = []
self.portal.REQUEST.form['InterimFields'] = interim_fields
self.portal.REQUEST['validated'] = None
if key in self.portal.REQUEST:
self.portal.REQUEST[key] = False
self.assertEqual(
None,
calc1.schema.get(
'InterimFields').validate(
interim_fields,
calc1,
REQUEST=self.portal.REQUEST))
interim_fields = [{'keyword': '&',
'title': 'Titration Volume',
'unit': '',
'default': ''},
]
self.portal.REQUEST.form['InterimFields'] = interim_fields
self.portal.REQUEST['validated'] = None
if key in self.portal.REQUEST:
self.portal.REQUEST[key] = False
self.assertEqual(
calc1.schema.get(
'InterimFields').validate(
interim_fields,
calc1,
REQUEST=self.portal.REQUEST),
u"Validation failed: keyword contains invalid characters")
interim_fields = [
{'keyword': 'XXX',
'title': 'Gross Mass',
'unit': '',
'default': ''},
{'keyword': 'TV', 'title': 'Titration Volume', 'unit': '', 'default': ''}]
self.portal.REQUEST.form['InterimFields'] = interim_fields
self.portal.REQUEST['validated'] = None
if key in self.portal.REQUEST:
self.portal.REQUEST[key] = False
self.assertEqual(
calc1.schema.get(
'InterimFields').validate(
interim_fields,
calc1,
REQUEST=self.portal.REQUEST),
u"Validation failed: column title 'Gross Mass' must have keyword 'GM'")
interim_fields = [
{'keyword': 'GM', 'title': 'XXX', 'unit': '', 'default': ''},
{'keyword': 'TV', 'title': 'Titration Volume', 'unit': '', 'default': ''}]
self.portal.REQUEST.form['InterimFields'] = interim_fields
self.portal.REQUEST['validated'] = None
if key in self.portal.REQUEST:
self.portal.REQUEST[key] = False
self.assertEqual(
calc1.schema.get(
'InterimFields').validate(
interim_fields,
calc1,
REQUEST=self.portal.REQUEST),
u"Validation failed: keyword 'GM' must have column title 'Gross Mass'")
interim_fields = [
{'keyword': 'TV',
'title': 'Titration Volume',
'unit': '',
'default': ''},
{'keyword': 'TV', 'title': 'Titration Volume 1', 'unit': '', 'default': ''}]
self.portal.REQUEST.form['InterimFields'] = interim_fields
self.portal.REQUEST['validated'] = None
if key in self.portal.REQUEST:
self.portal.REQUEST[key] = False
self.assertEqual(
calc1.schema.get(
'InterimFields').validate(
interim_fields,
calc1,
REQUEST=self.portal.REQUEST),
u"Validation failed: 'TV': duplicate keyword")
interim_fields = [
{'keyword': 'TV',
'title': 'Titration Volume',
'unit': '',
'default': ''},
{'keyword': 'TF', 'title': 'Titration Volume', 'unit': '', 'default': ''}]
self.portal.REQUEST.form['InterimFields'] = interim_fields
self.portal.REQUEST['validated'] = None
if key in self.portal.REQUEST:
self.portal.REQUEST[key] = False
self.assertEqual(
calc1.schema.get(
'InterimFields').validate(
interim_fields,
calc1,
REQUEST=self.portal.REQUEST),
u"Validation failed: 'Titration Volume': duplicate title")
interim_fields = [
{'keyword': 'TV',
'title': 'Titration Volume',
'unit': '',
'default': ''},
{'keyword': 'TF', 'title': 'Titration Factor', 'unit': '', 'default': ''}]
self.portal.REQUEST.form['InterimFields'] = interim_fields
self.portal.REQUEST['validated'] = None
if key in self.portal.REQUEST:
self.portal.REQUEST[key] = False
self.assertEqual(
None,
calc1.schema.get(
'InterimFields').validate(
interim_fields,
calc1,
REQUEST=self.portal.REQUEST))
def test_UncertaintyValidator(self):
login(self.portal, TEST_USER_NAME)
services = self.portal.bika_setup.bika_analysisservices
serv1 = services['analysisservice-1']
v = validationService.validatorFor('uncertainties_validator')
field = serv1.schema['Uncertainties']
key = serv1.id + field.getName()
uncertainties = [{'intercept_min': '100.01', 'intercept_max': '200', 'errorvalue': '200%'}]
self.portal.REQUEST['Uncertainties'] = uncertainties
res = v(uncertainties, instance=serv1, field=field, REQUEST=self.portal.REQUEST)
self.failUnlessEqual(res, "Validation failed: Error percentage must be between 0 and 100")
uncertainties = [{'intercept_min': 'a', 'intercept_max': '200', 'errorvalue': '10%'}]
self.portal.REQUEST['Uncertainties'] = uncertainties
if key in self.portal.REQUEST:
self.portal.REQUEST[key] = False
res = v(uncertainties, instance=serv1, field=field, REQUEST=self.portal.REQUEST)
self.failUnlessEqual(res, "Validation failed: Min values must be numeric")
uncertainties = [{'intercept_min': '100.01', 'intercept_max': 'a', 'errorvalue': '10%'}]
self.portal.REQUEST['Uncertainties'] = uncertainties
if key in self.portal.REQUEST:
self.portal.REQUEST[key] = False
res = v(uncertainties, instance=serv1, field=field, REQUEST=self.portal.REQUEST)
self.failUnlessEqual(res, "Validation failed: Max values must be numeric")
uncertainties = [{'intercept_min': '100.01', 'intercept_max': '200', 'errorvalue': 'a%'}]
self.portal.REQUEST['Uncertainties'] = uncertainties
if key in self.portal.REQUEST:
self.portal.REQUEST[key] = False
res = v(uncertainties, instance=serv1, field=field, REQUEST=self.portal.REQUEST)
self.failUnlessEqual(res, "Validation failed: Error values must be numeric")
uncertainties = [{'intercept_min': '200', 'intercept_max': '100', 'errorvalue': '10%'}]
self.portal.REQUEST['Uncertainties'] = uncertainties
if key in self.portal.REQUEST:
self.portal.REQUEST[key] = False
res = v(uncertainties, instance=serv1, field=field, REQUEST=self.portal.REQUEST)
self.failUnlessEqual(res, "Validation failed: Max values must be greater than Min values")
uncertainties = [{'intercept_min': '100', 'intercept_max': '200', 'errorvalue': '-5%'}]
self.portal.REQUEST['Uncertainties'] = uncertainties
if key in self.portal.REQUEST:
self.portal.REQUEST[key] = False
res = v(uncertainties, instance=serv1, field=field, REQUEST=self.portal.REQUEST)
self.failUnlessEqual(res, "Validation failed: Error percentage must be between 0 and 100")
uncertainties = [{'intercept_min': '100', 'intercept_max': '200', 'errorvalue': '-5'}]
self.portal.REQUEST['Uncertainties'] = uncertainties
if key in self.portal.REQUEST:
self.portal.REQUEST[key] = False
res = v(uncertainties, instance=serv1, field=field, REQUEST=self.portal.REQUEST)
self.failUnlessEqual(res, "Validation failed: Error value must be 0 or greater")
def test_FormulaValidator(self):
login(self.portal, TEST_USER_NAME)
v = validationService.validatorFor('formulavalidator')
calcs = self.portal.bika_setup.bika_calculations
calc1 = calcs['calculation-1']
interim_fields = [
{'keyword': 'TV',
'title': 'Titration Volume',
'unit': '',
'default': ''},
{'keyword': 'TF', 'title': 'Titration Factor', 'unit': '', 'default': ''}]
self.portal.REQUEST.form['InterimFields'] = interim_fields
formula = "[TV] * [TF] * [Wrong]"
self.failUnlessEqual(
v(formula, instance=calc1, field=calc1.schema.get(
'Formula'), REQUEST=self.portal.REQUEST),
"Validation failed: Keyword 'Wrong' is invalid")
formula = "[TV] * [TF]"
self.assertEqual(
True,
v(formula,
instance=calc1,
field=calc1.schema.get('Formula'),
REQUEST=self.portal.REQUEST))
def test_CoordinateValidator(self):
login(self.portal, TEST_USER_NAME)
sp = self.portal.bika_setup.bika_samplepoints['samplepoint-1']
latitude = {
'degrees': '!',
'minutes': '2',
'seconds': '3',
'bearing': 'N'}
self.portal.REQUEST.form['Latitude'] = latitude
val = sp.schema.get('Latitude').validate(latitude, sp)
self.assertEqual(
True,
u"Validation failed: degrees must be numeric" in val)
latitude = {
'degrees': '0',
'minutes': '!',
'seconds': '3',
'bearing': 'N'}
self.portal.REQUEST.form['Latitude'] = latitude
self.portal.REQUEST['validated'] = None
val = sp.schema.get('Latitude').validate(latitude, sp)
self.assertEqual(
True,
u"Validation failed: minutes must be numeric" in val)
latitude = {
'degrees': '0',
'minutes': '0',
'seconds': '!',
'bearing': 'N'}
self.portal.REQUEST.form['Latitude'] = latitude
self.portal.REQUEST['validated'] = None
val = sp.schema.get('Latitude').validate(latitude, sp)
self.assertEqual(
True,
u"Validation failed: seconds must be numeric" in val)
latitude = {
'degrees': '0',
'minutes': '60',
'seconds': '0',
'bearing': 'N'}
self.portal.REQUEST.form['Latitude'] = latitude
self.portal.REQUEST['validated'] = None
val = sp.schema.get('Latitude').validate(latitude, sp)
self.assertEqual(
True,
u"Validation failed: minutes must be 0 - 59" in val)
latitude = {
'degrees': '0',
'minutes': '0',
'seconds': '60',
'bearing': 'N'}
self.portal.REQUEST.form['Latitude'] = latitude
self.portal.REQUEST['validated'] = None
val = sp.schema.get('Latitude').validate(latitude, sp)
self.assertEqual(
True,
u"Validation failed: seconds must be 0 - 59" in val)
# latitude specific
latitude = {
'degrees': '91',
'minutes': '0',
'seconds': '0',
'bearing': 'N'}
self.portal.REQUEST.form['Latitude'] = latitude
self.portal.REQUEST['validated'] = None
val = sp.schema.get('Latitude').validate(latitude, sp)
self.assertEqual(
True,
u"Validation failed: degrees must be 0 - 90" in val)
latitude = {
'degrees': '90',
'minutes': '1',
'seconds': '0',
'bearing': 'N'}
self.portal.REQUEST.form['Latitude'] = latitude
self.portal.REQUEST['validated'] = None
val = sp.schema.get('Latitude').validate(latitude, sp)
self.assertEqual(
True,
u"Validation failed: degrees is 90; minutes must be zero" in val)
latitude = {
'degrees': '90',
'minutes': '0',
'seconds': '1',
'bearing': 'N'}
self.portal.REQUEST.form['Latitude'] = latitude
self.portal.REQUEST['validated'] = None
val = sp.schema.get('Latitude').validate(latitude, sp)
self.assertEqual(
True,
u"Validation failed: degrees is 90; seconds must be zero" in val)
latitude = {
'degrees': '90',
'minutes': '0',
'seconds': '0',
'bearing': 'E'}
self.portal.REQUEST.form['Latitude'] = latitude
self.portal.REQUEST['validated'] = None
val = sp.schema.get('Latitude').validate(latitude, sp)
self.assertEqual(
True,
u"Validation failed: Bearing must be N/S" in val)
# longitude specific
longitude = {
'degrees': '181',
'minutes': '0',
'seconds': '0',
'bearing': 'E'}
self.portal.REQUEST.form['Longitude'] = longitude
self.portal.REQUEST['validated'] = None
val = sp.schema.get('Longitude').validate(longitude, sp)
self.assertEqual(
True,
u"Validation failed: degrees must be 0 - 180" in val)
longitude = {
'degrees': '180',
'minutes': '1',
'seconds': '0',
'bearing': 'E'}
self.portal.REQUEST.form['Longitude'] = longitude
self.portal.REQUEST['validated'] = None
val = sp.schema.get('Longitude').validate(longitude, sp)
self.assertEqual(
True,
u"Validation failed: degrees is 180; minutes must be zero" in val)
longitude = {
'degrees': '180',
'minutes': '0',
'seconds': '1',
'bearing': 'E'}
self.portal.REQUEST.form['Longitude'] = longitude
self.portal.REQUEST['validated'] = None
val = sp.schema.get('Longitude').validate(longitude, sp)
self.assertEqual(
True,
u"Validation failed: degrees is 180; seconds must be zero" in val)
longitude = {
'degrees': '0',
'minutes': '0',
'seconds': '0',
'bearing': 'N'}
self.portal.REQUEST.form['Longitude'] = longitude
self.portal.REQUEST['validated'] = None
val = sp.schema.get('Longitude').validate(longitude, sp)
self.assertEqual(
True,
u"Validation failed: Bearing must be E/W" in val)
longitude = {
'degrees': '1',
'minutes': '1',
'seconds': '1',
'bearing': 'E'}
self.portal.REQUEST.form['Longitude'] = longitude
self.portal.REQUEST['validated'] = None
self.assertEqual(
None,
sp.schema.get(
'Longitude').validate(
longitude,
sp))
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Tests))
suite.layer = BIKA_FUNCTIONAL_TESTING
return suite
| StarcoderdataPython |
1616839 | # Generated by Django 2.2.9 on 2020-01-01 20:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('datasets', '0015_profileindicator_label'),
]
operations = [
migrations.AddField(
model_name='profileindicator',
name='name',
field=models.CharField(blank=True, max_length=60),
),
migrations.AlterField(
model_name='profileindicator',
name='universe',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='datasets.Universe'),
),
]
| StarcoderdataPython |
3321853 | import fractions
#Software by francote
print("Triangulacion de matrices 3x3")
def main():
print("A B C")
print("D E F")
print("G H I")
A = int(input("Valor de A : "))
B = int(input("Valor de B : "))
C = int(input("Valor de C : "))
D = int(input("Valor de D : "))
E = int(input("Valor de E : "))
F = int(input("Valor de F : "))
G = int(input("Valor de G : "))
H = int(input("Valor de H : "))
I = int(input("Valor de I : "))
print("")
AuxDA = fractions.Fraction(D,A)
AuxDA2 = (AuxDA*A)
AuxDA3 = D - AuxDA2
AuxEB = (AuxDA*B)
AuxEB2 = E - AuxEB
AuxFC = (AuxDA*C)
AuxFC2 = F - AuxFC
AuxGD = fractions.Fraction(G,D)
AuxGD2 = (AuxGD*D)
AuxGD3 = G - AuxGD2
AuxHE = (AuxGD*E)
AuxHE2 = H - AuxHE
AuxIF = (AuxGD*F)
AuxIF2 = F - AuxIF
print("")
print(A,B,C)
print(AuxDA3,AuxEB2,AuxFC2)
print(AuxGD3,AuxHE2,AuxIF2)
if AuxHE2 >= 0:
AxHE = fractions.Fraction(AuxHE2,AuxEB2)
AxHE2 = AxHE *AuxEB2
AxHE3 = AuxHE2 - AxHE2
AxIF = AxHE * AuxIF2
AxIF2 = AuxIF2 - AxIF
print("")
print(A, B, C)
print(AuxDA3, AuxEB2, AuxFC2)
print(AuxGD3, AxHE3, AxIF2)
main()
| StarcoderdataPython |
4816759 | <reponame>luerhard/edge_gravity
"Docstring yay"
__version__ = "0.0.3" | StarcoderdataPython |
41717 | import numpy as np
def bowl(vs, v_ref=1.0, scale=.1):
def normal(v, loc, scale):
return 1 / np.sqrt(2 * np.pi * scale**2) * np.exp( - 0.5 * np.square(v - loc) / scale**2 )
def _bowl(v):
if np.abs(v-v_ref) > 0.05:
return 2 * np.abs(v-v_ref) - 0.095
else:
return - 0.01 * normal(v, v_ref, scale) + 0.04
return np.array([_bowl(v) for v in vs]) | StarcoderdataPython |
91854 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 5 12:38:45 2017
@author: abench
"""
import cv2
camera_num=0
camera=cv2.VideoCapture(camera_num)
fourcc=cv2.VideoWriter_fourcc(*'XVID')
out=cv2.VideoWriter('output.avi',fourcc,20.0,(640,480))
while(camera.isOpened()):
ret,frame=camera.read()
if ret==True:
frame=cv2.flip(frame,1)
out.write(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
camera.release()
out.release()
cv2.destroyAllWindows() | StarcoderdataPython |
1635195 | <filename>backend/db/base/schemas.py
from pydantic import BaseModel
from tortoise.contrib.pydantic import PydanticModel, pydantic_model_creator
from db.base.models import File
from config import DOMAIN_BACKEND
class Status(BaseModel):
message: str
class GetFile(PydanticModel):
id: int
url: str = None
path: str
def __init__(self, **kwargs):
super(GetFile, self).__init__(**kwargs)
self.url = f'{DOMAIN_BACKEND}/{self.path}'
class Config:
orig_model = File
class URL(PydanticModel):
url: str
| StarcoderdataPython |
3236407 | from selenium import webdriver
import time
import requests
# 这个地方是通过观察html代码得到的,因为我先前通过find方法定位switch始终提示我没有这个元素,那么我就猜想它肯定是被隐藏或者嵌套在别的
# frame中了
login_url = 'http://xui.ptlogin2.qq.com/cgi-bin/xlogin?proxy_url=http%3A//qzs.qq.com/qzone/v6/portal/proxy.html' \
'&daid=5&&hide_title_bar=1&low_login=0&qlogin_auto_login=1&no_verifyimg=1&link_target=blank&' \
'appid=549000912&style=22&target=self&s_url=http%3A%2F%2Fqzs.qq.com%2Fqzone%2Fv5%2Floginsucc.html%3Fpara' \
'%3Dizone%26specifyurl%3Dhttp%253A%252F%252Fuser.qzone.qq.com%252F1796246076&pt_qr_app=手机QQ空间' \
'&pt_qr_link=http%3A//z.qzone.com/download.html&self_regurl=http%3A//qzs.qq.com/qzone/v6/reg/index.html' \
'&pt_qr_help_link=http%3A//z.qzone.com/download.html'
login_name = input('请输入QQ号:')
login_password = input('请输入QQ密码:')
# 不知是不是selenium的版本问题还是操作平台问题,如今的selenium安装好了使用webdriver.Firefox()/webdriver.Chrome()可能会报错,在ide下并不会
# 提示webdriver.*。遇到这种情况直接删除python安装目录下的selenium文件夹。这样import selenium.webdriver才是导入的site-packges目录下的webdriver模块
driver = webdriver.Chrome(executable_path='D:/webdriver/chromedriver.exe')
driver.get(login_url)
time.sleep(3)
login_type = driver.find_element_by_id('switcher_plogin')
login_type.click()
username = driver.find_element_by_id('u')
username.clear()
password = driver.find_element_by_id('p')
password.clear()
username.send_keys(<PASSWORD>)
password.send_keys(<PASSWORD>)
submit = driver.find_element_by_id('login_button')
submit.click()
time.sleep(5)
cookies = driver.get_cookies()
driver.close()
cookie = [item['name'] + "=" + item['value'] for item in cookies]
cookiestr = '; '.join(item for item in cookie)
headers = {'cookie': cookiestr}
# 验证cookie是否正确
myspace = 'http://user.qzone.qq.com/17962460'
content = requests.get(myspace, headers=headers)
print(content.text) | StarcoderdataPython |
1760586 | <filename>2015/04/p1.py
import hashlib
puzzle_input = b"iwrupvqb"
number = 100000
while True:
key = puzzle_input + str(number).encode()
if hashlib.md5(key).hexdigest()[:5] == "00000":
break
number += 1
print(number)
# Runs way faster than I expected, lol
| StarcoderdataPython |
3385433 | import cv2
import numpy as np
img=cv2.imread("kare2.jpeg") # benim şeklim içerdeki kareleri neden bulmuyor, kare3 ün yamukluğunu düzelt(araştırma), en büyük kareyi bul(alandan yola çık(h*w))
frame=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(frame, (7, 7), 2)
edge = cv2.Canny(blur, 0, 50, 3)
contours, hierarchy = cv2.findContours(edge,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
try: hierarchy = hierarchy[0]
except: hierarchy = []
height, width = edge.shape
alanlar=[]
alan=0
for contour, hier in zip(contours, hierarchy):
(x,y,w,h) = cv2.boundingRect(contour)
# min_x, max_x = min(x, min_x), max(x+w, max_x)
# min_y, max_y = min(y, min_y), max(y+h, max_y)
alan=w*h
alanlar.append(alan)
kareDict = {
alan:{
"w":w,
"h":h
}}
kareDict.update({
alan: {"w":w,"h":h}
})
kareDict[alan]["w"]=w
kareDict[alan]["h"]=h
alanlar.sort(reverse=True)
enbuyuk=alanlar[0]
values= []
for item in kareDict[enbuyuk].values():
values.append(item)
w=values[0]
h=values[1]
cv2.rectangle(img, (x,y), (x+w,y+h), (0,0,255), 2)
cv2.imshow("Contour", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
#----------------------
# deneme
# import cv2 as cv
# import numpy as np
# import random as rng
# img = cv.imread('kare2.jpeg') # read picture
# imgray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) # BGR to grayscale
# imgray = cv.blur(imgray, (3,3))
# ret, threshold = cv.threshold(imgray, 200, 255, cv.THRESH_BINARY)
# canny_output = cv.Canny(threshold, 0, 50, 3)
# # edge = cv2.Canny(blur, 0, 50, 3)
# contours, hierarchy = cv.findContours(canny_output, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
# contours_poly = [None]*len(contours)
# boundRect = [None]*len(contours)
# # centers = [None]*len(contours)
# # radius = [None]*len(contours)
# for i, c in enumerate(contours):
# contours_poly[i] = cv.approxPolyDP(c, 3, True)
# boundRect[i] = cv.boundingRect(contours_poly[i])
# # centers[i], radius[i] = cv.minEnclosingCircle(contours_poly[i])
# drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8)
# for i in range(len(contours)):
# color = (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256))
# cv.drawContours(drawing, contours_poly, i, color)
# cv.rectangle(drawing, (int(boundRect[i][0]), int(boundRect[i][1])), \
# (int(boundRect[i][0]+boundRect[i][2]), int(boundRect[i][1]+boundRect[i][3])), color, 2)
# # cv.circle(drawing, (int(centers[i][0]), int(centers[i][1])), int(radius[i]), color, 2)
# cv.imshow('Contours', drawing)
# # epsilon = 0.1 * cv2.arcLength(countours[0], True)
# # approx = cv2.approxPolyDP(countours[0], epsilon, True)
# # cv2.drawContours(im, approx, -1, (0, 255, 0), 3)
# # cv2.imshow("Contour", im)
# cv.waitKey(0)
# cv.destroyAllWindows()
| StarcoderdataPython |
1600265 | <filename>LaGou/config.py
MONGO_URL = 'localhost'
# 数据库名
MONGO_DB = 'lagou'
# 表名
MONGO_TABLE = 'Python'
# 拉勾网用户名和密码
USERNAME = '123'
PASSWORD = '<PASSWORD>' | StarcoderdataPython |
35991 | <filename>scripts/rpc/cmd_parser.py<gh_stars>1000+
args_global = ['server_addr', 'port', 'timeout', 'verbose', 'dry_run', 'conn_retries',
'is_server', 'rpc_plugin', 'called_rpc_name', 'func', 'client']
def strip_globals(kwargs):
for arg in args_global:
kwargs.pop(arg, None)
def remove_null(kwargs):
keys = []
for key, value in kwargs.items():
if value is None:
keys.append(key)
for key in keys:
kwargs.pop(key, None)
def apply_defaults(kwargs, **defaults):
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
def group_as(kwargs, name, values):
group = {}
for arg in values:
if arg in kwargs and kwargs[arg] is not None:
group[arg] = kwargs.pop(arg, None)
kwargs[name] = group
| StarcoderdataPython |
1674432 | <reponame>ChaosCodes/beta-recsys
"""isort:skip_file."""
import argparse
import os
import sys
sys.path.append("../")
from torch.utils.data import DataLoader
from tqdm import tqdm
from beta_rec.core.eval_engine import SeqEvalEngine
from beta_rec.core.train_engine import TrainEngine
from beta_rec.datasets.seq_data_utils import (
SeqDataset,
collate_fn,
create_seq_db,
dataset_to_seq_target_format,
load_dataset,
reindex_items,
)
from beta_rec.models.narm import NARMEngine
from beta_rec.utils.monitor import Monitor
def parse_args():
"""Parse args from command line.
Returns:
args object.
"""
parser = argparse.ArgumentParser(description="Run NARM..")
parser.add_argument(
"--config_file",
nargs="?",
type=str,
default="../configs/narm_default.json",
help="Specify the config file name. Only accept a file from ../configs/",
)
# If the following settings are specified with command line,
# these settings will be updated.
parser.add_argument(
"--dataset",
nargs="?",
type=str,
help="Options are: tafeng, dunnhunmby and instacart",
)
parser.add_argument(
"--data_split",
nargs="?",
type=str,
help="Options are: leave_one_out and temporal",
)
parser.add_argument("--root_dir", nargs="?", type=str, help="working directory")
parser.add_argument(
"--n_sample", nargs="?", type=int, help="Number of sampled triples."
)
parser.add_argument("--sub_set", nargs="?", type=int, help="Subset of dataset.")
parser.add_argument(
"--temp_train",
nargs="?",
type=int,
help="IF value >0, then the model will be trained based on the temporal feeding, else use normal trainning.",
)
parser.add_argument(
"--emb_dim", nargs="?", type=int, help="Dimension of the embedding."
)
parser.add_argument(
"--late_dim", nargs="?", type=int, help="Dimension of the latent layers.",
)
parser.add_argument("--lr", nargs="?", type=float, help="Intial learning rate.")
parser.add_argument("--num_epoch", nargs="?", type=int, help="Number of max epoch.")
parser.add_argument(
"--batch_size", nargs="?", type=int, help="Batch size for training."
)
parser.add_argument("--optimizer", nargs="?", type=str, help="OPTI")
parser.add_argument("--activator", nargs="?", type=str, help="activator")
parser.add_argument("--alpha", nargs="?", type=float, help="ALPHA")
return parser.parse_args()
class NARM_train(TrainEngine):
"""An instance class from the TrainEngine base class."""
def __init__(self, config):
"""Initialize NARM_trian Class.
Args:
config (dict): All the parameters for the model.
"""
self.config = config
super(NARM_train, self).__init__(self.config)
self.load_dataset_seq()
self.build_data_loader()
self.engine = NARMEngine(self.config)
self.seq_eval_engine = SeqEvalEngine(self.config)
def load_dataset_seq(self):
"""Build a dataset for model."""
# ml = Movielens_100k()
# ml.download()
# ml.load_interaction()
# self.dataset = ml.make_temporal_split(n_negative=0, n_test=0)
ld_dataset = load_dataset(self.config)
ld_dataset.download()
ld_dataset.load_interaction()
self.dataset = ld_dataset.make_temporal_split(n_negative=0, n_test=0)
self.train_data = self.dataset[self.dataset.col_flag == "train"]
self.valid_data = self.dataset[self.dataset.col_flag == "validate"]
self.test_data = self.dataset[self.dataset.col_flag == "test"]
# self.dataset = Dataset(self.config)
self.config["dataset"]["n_users"] = self.train_data.col_user.nunique()
self.config["dataset"]["n_items"] = self.train_data.col_item.nunique() + 1
def build_data_loader(self):
"""Convert users' interactions to sequences.
Returns:
load_train_data (DataLoader): training set.
"""
# reindex items from 1
self.train_data, self.valid_data, self.test_data = reindex_items(
self.train_data, self.valid_data, self.test_data
)
# data to sequences
self.valid_data = create_seq_db(self.valid_data)
self.test_data = create_seq_db(self.test_data)
# convert interactions to sequences
seq_train_data = create_seq_db(self.train_data)
# convert sequences to (seq, target) format
load_train_data = dataset_to_seq_target_format(seq_train_data)
# define pytorch Dataset class for sequential datasets
load_train_data = SeqDataset(load_train_data)
# pad the sequences with 0
self.load_train_data = DataLoader(
load_train_data,
batch_size=self.config["model"]["batch_size"],
shuffle=False,
collate_fn=collate_fn,
)
return self.load_train_data
def _train(self, engine, train_loader, save_dir):
"""Train the model with epochs."""
epoch_bar = tqdm(range(self.config["model"]["max_epoch"]), file=sys.stdout)
for epoch in epoch_bar:
print("Epoch {} starts !".format(epoch))
print("-" * 80)
if self.check_early_stop(engine, save_dir, epoch):
break
engine.train_an_epoch(train_loader, epoch=epoch)
"""evaluate model on validation and test sets"""
# evaluation
self.seq_eval_engine.train_eval_seq(
self.valid_data, self.test_data, engine, epoch
)
def train(self):
"""Train and test NARM."""
self.monitor = Monitor(
log_dir=self.config["system"]["run_dir"], delay=1, gpu_id=self.gpu_id
)
train_loader = self.load_train_data
self.engine = NARMEngine(self.config)
self.narm_save_dir = os.path.join(
self.config["system"]["model_save_dir"], self.config["model"]["save_name"]
)
self._train(self.engine, train_loader, self.narm_save_dir)
self.config["run_time"] = self.monitor.stop()
self.seq_eval_engine.test_eval_seq(self.test_data, self.engine)
if __name__ == "__main__":
args = parse_args()
narm = NARM_train(args)
narm.train()
# narm.test() have already implemented in train()
| StarcoderdataPython |
26505 | version = "2018-04-26" | StarcoderdataPython |
1705139 | """rnnt is a python package for RNN-Transduction loss support in TensorFlow==2.0
This loss function is well described here - https://arxiv.org/pdf/1211.3711.pdf
"""
from .rnnt import (
rnnt_loss,
)
| StarcoderdataPython |
1730756 | <reponame>JoseArtur/phyton-exercices<filename>PyUdemy/Day3/PizzaOrder.py
print("Welcome to Python Pizza Deliveries!!")
size = input("What size pizza do you want? S, M or L\n").upper()
add_pepperoni = input("Do you want pepperoni? Y or N\n").upper()
extra_cheese = input("Do you want extra cheese? Y or N\n").upper()
bill = 0
if size=="S":
bill+=15
if add_pepperoni=="Y":
bill+=3
elif size=="M":
bill+=20
if add_pepperoni=="Y":
bill+=3
elif size=="L":
bill+=25
if add_pepperoni=="Y":
bill+=3
if extra_cheese=="Y":
bill+=1
print(f"The total bill is €{bill}") | StarcoderdataPython |
3345188 | <reponame>yoki31/aiopyarr<filename>example.py
"""Example usage of aiopyarr."""
import asyncio
from aiopyarr.models.host_configuration import PyArrHostConfiguration
from aiopyarr.radarr_client import RadarrClient
IP = "192.168.100.3"
TOKEN = "xxxxxxxxxxxxxxxx<PASSWORD>"
async def async_example():
"""Example usage of aiopyarr."""
host_configuration = PyArrHostConfiguration(ipaddress=IP, api_token=TOKEN)
async with RadarrClient(host_configuration=host_configuration) as client:
print(await client.async_get_system_status())
asyncio.get_event_loop().run_until_complete(async_example())
| StarcoderdataPython |
3384956 | """
glglobs are almagamated lists, useful for drawing comparisons between lists.
renamed to glglob as it clashes with a matplotlib and python module name
"""
import sys, os, csv, string, math, numpy, pickle
from numpy import array, zeros, object_, arange
from copy import deepcopy
from operator import itemgetter
from statistics import pstdev, mean
from . import config, utils
from .flags import *
from .base_genelist import _base_genelist
from .draw import draw
from .errors import AssertionError, NotImplementedError, GlglobDuplicateNameError
from .location import location
from .progress import progressbar
from .genelist import genelist
from .expression import expression
from .data import positive_strand_labels, negative_strand_labels
import matplotlib.pyplot as plot
import matplotlib.cm as cm
from scipy.stats import spearmanr, pearsonr
if config.SKLEARN_AVAIL: # These are optional
from sklearn.cluster import DBSCAN
class glglob(_base_genelist): # cannot be a genelist, as it has no keys...
def __init__(self, *args, **kargs):
"""
**Purpose**
An bunch of genelists to enable across-genelist meta comparisons
**Arguments**
list of genelists
The first argument must be a list of genelists.
Optional arguments:
None specified.
"""
_base_genelist.__init__(self)
# args should be a list of lists.
# we then store them in the linearData set
if args: # So we can have empty glglobs.
self.linearData = args
self._optimiseData()
else:
self.linearData = []
self.draw = draw(self)
def __repr__(self):
return "glbase.glglob"
def __str__(self):
# work out all of the types
types = []
for item in self.linearData:
if item.__repr__() not in types:
types.append(item.__repr__())
return "glglob contains: %s items of type(s): %s" % (len(self.linearData), ", ".join(types))
def _optimiseData(self): # no keys, so would die.
"""
(Override)
actually does have keys: the list names
"""
self.__list_name_lookback = {} # the index location.
for index, item in enumerate(self.linearData):
if item.name in self.__list_name_lookback:
raise GlglobDuplicateNameError("self._optimiseData", item.name)
else:
self.__list_name_lookback[item.name] = index
def loadCSV(self):
config.log.error("glglobs cannot be represented as a CSV/TSV file, use glload() to load binary files")
return(False)
def saveCSV(self):
config.log.error("glglobs cannot be represented as a CSV/TSV file, use .save() to save binary files")
return False
def __getitem__(self, value):
"""
(Override)
glglobs should be accesible by name.
"""
if value in self.__list_name_lookback:
return(self.linearData[self.__list_name_lookback[value]]) # get the actual item
return None
def __setitem__(self, value):
"""
(Override)
"""
config.log.error("glglobs cannot be written to")
def compare(self, key=None, filename=None, method=None, delta=200, matrix_tsv=None,
mode='fast',
row_cluster=True, col_cluster=True, bracket=None,
pearson_tsv=None,
jaccard=False,
**kargs):
"""
**Purpose**
perform a square comparison between the genelists according
to some sort of criteria.
Performs Pearson correlation between the patterns of overlap
**Arguments**
key (string, required)
key to use when performing the comparison.
filename (string, required)
filename to save heatmap image as.
method (string, "overlap|collide|map", required)
method to use to compare the genelists
delta (integer, optional, default=200)
an optional command for delta expansion of the reads. This feeds into
"overlap" and "collide" methods. And is ignored by "map"
See the documentation for overlap and collide in genelist for
exactly what these mean
distance_score (string, "euclidean", optional defaults to "euclidean")
Scoring method for distance caluclations.
This is not implemented at the moment, and only uses a
euclidean distance.
output_pair_wise_correlation_plots (True|False)
This is a debug option to output graphs showing the
actual correlation matrices of the pair-wise correlations.
Probably best only to do this if you
know what you are doing!
matrix_tsv (filename)
If true save a tsv containing the overlap scores
row_cluster (optional, True|False, default=True)
cluster the rows of the heatmap
col_cluster (optional, True|False, default=True)
cluster the columns of the heatmap
row_font_size (Optional, default=guess suitable size)
the size of the row labels (in points). If set this will also override the hiding of
labels if there are too many elements.
col_font_size (Optional, default=8)
the size of the column labels (in points)
jaccard (Optional, default=False)
Use the Jaccard index https://en.wikipedia.org/wiki/Jaccard_index
instead of the (Classic) number of overlaps.
**Result**
returns the distance matrix if succesful or False|None if not.
Saves an image to 'filename' containing a grid heatmap
"""
valid_modes = ['fast', 'slow']
valid_methods = ["overlap", "collide", "map"]
valid_dist_score = ["euclidean"]
distance_score = "euclidean" # override for the moment
assert mode in valid_modes, "must use a valid method for comparison ({0})".format(", ".join(valid_modes))
assert method in valid_methods, "must use a valid method for comparison (%s)" % ", ".join(valid_methods)
assert filename, "Filename must be valid"
assert key in self.linearData[0].linearData[0], "key '%s' not found" % (key,) # just check one of the lists
assert distance_score in valid_dist_score, "%s is not a valid distance metric" % (distance_score,)
if mode == 'slow':
return self.__compare_slow(key=key, filename=filename, method=method, delta=delta,
matrix_tsv=matrix_tsv, pearson_tsv=pearson_tsv,
row_cluster=row_cluster, col_cluster=col_cluster, bracket=bracket,
jaccard=jaccard,
**kargs)
elif mode == 'fast':
return self.__compare_fast(key=key, filename=filename, method=method, delta=delta,
matrix_tsv=matrix_tsv, pearson_tsv=pearson_tsv,
row_cluster=row_cluster, col_cluster=col_cluster, bracket=bracket,
jaccard=jaccard,
**kargs)
def __compare_fast(self, key=None, filename=None, method=None, delta=200, matrix_tsv=None,
row_cluster=True, col_cluster=True, bracket=None, pearson_tsv=None, bin_size=2000,
jaccard=False, **kargs):
"""
**Purpose**
The new style faster O(lin*lin/23) version
"""
assert not jaccard, 'Jaccard not implemented for compare mode=fast'
assert method != 'map', 'method=map not implemented when mode=fast'
mat = {}
num_samples = len(self.linearData)
if jaccard:
if not bracket:
bracket = [0.0, 0.4]
else: # Integers will do fine to store the overlaps
if not bracket:
bracket = [-0.2, 1]
matrix = zeros( (len(self), len(self)), dtype=numpy.float64 ) # Must be float;
config.log.info('Stage 1: Overlaps')
p = progressbar(num_samples)
# Prep the overlap table;
for ia, this_peak_list in enumerate(self.linearData):
for la in this_peak_list.linearData:
chrom = la['loc']['chr']
if chrom not in mat:
mat[chrom] = {}
# check for an overlap;
left = la['loc']['left'] - delta # overlap;
rite = la['loc']['left'] + delta
ctr = (left + rite) // 2
if method == 'collide':
left = ctr - delta
rite = ctr + delta
hit = None
# super-fast mode (genome is binned)
bin = int(ctr / bin_size)*bin_size# still keep proper locations if you want an accurate matrix_tsv
bin = (bin, bin+bin_size)
if bin not in mat[chrom]:
mat[chrom][bin] = [0] * num_samples # no hit found
mat[chrom][bin][ia] = 1
'''
# fast mode (jiggle algortihm)
for l in mat[chrom]:
if rite >= l[0] and left <= l[1]:
newctr = ((l[0] + left) // 2 + (l[1] + rite) // 2 ) // 2
hit = l
#hit = (newctr-delta, newctr+delta)
#if hit != l:
# mat[chrom][hit] = mat[chrom][l] # jiggle together the peak overlaps;
# del mat[chrom][l] # delete the old one, and expand the coords to jiggle the peak
break
if hit:
mat[chrom][hit][ia] = 1
else:
mat[chrom][(left,rite)] = [0] * num_samples # no hit found
'''
p.update(ia)
# output the full location matrix;
if matrix_tsv:
names = [i.name for i in self.linearData]
oh = open(matrix_tsv, "w")
oh.write("%s\n" % "\t".join(['loc',] + names))
for chrom in mat:
for bin in sorted(mat[chrom]):
line = ['chr{0}:{1}-{2}'.format(chrom, bin[0], bin[1]), ]
line += [str(i) for i in mat[chrom][bin]]
oh.write('{0}\n'.format('\t'.join(line)))
oh.close()
config.log.info('compare: save {0} matrix_tsv'.format(matrix_tsv))
# Go through the table once more and merge overlapping peaks?
# Only if the jiggle=True
# You can now dispose of the location data and convert the matrices to numpy arrays
config.log.info('Stage 2: Clean matrix')
total_num_peaks = 0
for chrom in mat:
mat[chrom] = numpy.array([mat[chrom][loc] for loc in mat[chrom]])
total_num_peaks += mat[chrom].shape[0]
config.log.info('Total number of peaks = {0:,}'.format(total_num_peaks))
# Now it's simpler, take the column sums;
config.log.info('Stage 3: Collect overlaps')
peak_lengths = [len(a) for a in self.linearData]
# convert to the triangular matrix:
p = progressbar(len(peak_lengths))
for ia, la in enumerate(peak_lengths):
for ib, lb in enumerate(peak_lengths):
if ia == ib:
matrix[ia, ib] = peak_lengths[ia] # should be filled in with the maximum possible overlap.
continue
elif ia < ib: # make triangular
continue
# the overlap is each row sums to 2
res = 1 # If two lists collide to produce 0 hits it eventually ends up with nan, so put in a pseudo overlap;
for chrom in mat:
#print(mat[chrom][ia,:], mat[chrom][ib:,])
s = mat[chrom][:,ia] + mat[chrom][:,ib] # down, across
res += len(s[s>=2])
if jaccard:
res = (res / float(len(la) + len(lb) - res))
matrix[ia, ib] = res
p.update(ia)
# fill in the gaps in the triangle
for ia, la in enumerate(self.linearData):
for ib, lb in enumerate(self.linearData):
if ia < ib:
matrix[ia,ib] = matrix[ib,ia]
config.log.info('Stage 4: Correlations')
# Normalise
if jaccard:
result_table = matrix # normalised already;
else:
# data must be normalised to the maximum possible overlap.
for ia, la in enumerate(peak_lengths):
for ib, lb in enumerate(peak_lengths):
matrix[ia,ib] = (matrix[ia,ib] / min([la, lb]))
#print(matrix)
corr_result_table = zeros( (len(self), len(self)) ) # square matrix to store the data.
# convert the data to a pearson score.
for ia, this_col in enumerate(matrix):
for ib, other_col in enumerate(matrix):
if ia != ib:
corr_result_table[ia,ib] = pearsonr(this_col, other_col)[0] # [0] = r score, [1] = p-value
else:
corr_result_table[ia,ib] = 1.0
result_table = corr_result_table
if pearson_tsv:
names = [i.name for i in self.linearData]
oh = open(pearson_tsv, "w")
oh.write("%s\n" % "\t".join([] + names))
for ia, la in enumerate(names):
oh.write("%s" % la)
for ib, lb in enumerate(names):
oh.write("\t%s" % corr_result_table[ia,ib])
oh.write("\n")
oh.close()
config.log.info('compare: save {0} pearson_tsv'.format(pearson_tsv))
# need to add the labels and serialise into a dict of lists.
dict_of_lists = {}
row_names = []
for index, item in enumerate(self.linearData):
dict_of_lists[item.name] = result_table[index]
row_names.append(item.name) # preserve order of row names.
if "output_pair_wise_correlation_plots" in kargs and kargs["output_pair_wise_correlation_plots"]:
# output the plot matrices.
for ia, la in enumerate(self.linearData):
for ib, lb in enumerate(self.linearData):
plot_data = []
if ia != ib:
x = matrix[ia,]
y = matrix[ib,]
self.draw._scatter(x, y, xlabel=row_names[ia], ylabel=row_names[ib],
filename="dpwc_plot_%s_%s.png" % (row_names[ia], row_names[ib]))
if "aspect" in kargs:
aspect = kargs["aspect"]
else:
aspect = "normal"
# respect heat_wid, hei if present
square = True
if "heat_hei" in kargs or "heat_wid" in kargs:
square=False
#print dict_of_lists
if jaccard:
colbar_label = 'Jaccard index'
else:
colbar_label = 'Pearson correlation'
# draw the heatmap and save:
ret = self.draw.heatmap(data=dict_of_lists, filename=filename,
colbar_label=colbar_label, bracket=bracket,
square=square, cmap=cm.RdBu_r, cluster_mode="euclidean", row_cluster=row_cluster, col_cluster=col_cluster,
row_names=row_names, col_names=row_names, aspect=aspect, **kargs)
config.log.info("compare: Saved Figure to '%s'" % ret["real_filename"])
return dict_of_lists
def __compare_slow(self, key=None, filename=None, method=None, delta=200, matrix_tsv=None,
row_cluster=True, col_cluster=True, bracket=None, pearson_tsv=None,
jaccard=False, **kargs):
"""
**Purpose**
The old-style all-vs all intersect based
"""
config.log.info("This may take a while, all lists are intersected by '%s' with '%s' key" % (method, key))
if jaccard:
if not bracket:
bracket = [0.0, 0.4]
# I need a float matrix? I thought the default is a float64?
matrix = zeros( (len(self), len(self)), dtype=numpy.float64) # 2D matrix.
else: # Integers will do fine to store the overlaps
if not bracket:
bracket = [-0.2, 1]
matrix = zeros( (len(self), len(self)) ) # 2D matrix.
for ia, la in enumerate(self.linearData):
for ib, lb in enumerate(self.linearData):
if ia == ib:
matrix[ia, ib] = len(la) # should be filled in with the maximum possible overlap.
elif ia < ib: # make search triangular
pass
else:
res = 0
if method == "collide":
res = la.collide(genelist=lb, loc_key=key, delta=delta)
elif method == "overlap":
res = la.overlap(genelist=lb, loc_key=key, delta=delta)
elif method == "map":
res = la.map(genelist=lb, key=key)
if res:
res = len(res)
else:
res = 1 # If two lists collide to produce 0 hits it eventually ends up with nan
# in the table which then buggers up the clustering below.
if jaccard:
res = (res / float(len(la) + len(lb) - res))
matrix[ia, ib] = res
#print matrix
# fill in the gaps in the triangle
for ia, la in enumerate(self.linearData):
for ib, lb in enumerate(self.linearData):
if ia < ib:
matrix[ia,ib] = matrix[ib,ia]
if matrix_tsv:
names = [i.name for i in self.linearData]
oh = open(matrix_tsv, "w")
oh.write("%s\n" % "\t".join([] + names))
for ia, la in enumerate(names):
oh.write("%s" % la)
for ib, lb in enumerate(names):
oh.write("\t%s" % matrix[ia,ib])
oh.write("\n")
oh.close()
if jaccard:
result_table = matrix
corr_result_table = matrix
else:
# data must be normalised to the maximum possible overlap.
for ia, la in enumerate(self.linearData):
for ib, lb in enumerate(self.linearData):
matrix[ia,ib] = (matrix[ia,ib] / min([len(la), len(lb)]))
corr_result_table = zeros( (len(self), len(self)) ) # square matrix to store the data.
# convert the data to a pearson score.
for ia, this_col in enumerate(matrix):
for ib, other_col in enumerate(matrix):
if ia != ib:
corr_result_table[ia,ib] = pearsonr(this_col, other_col)[0] # [0] = r score, [1] = p-value
else:
corr_result_table[ia,ib] = 1.0
result_table = corr_result_table
if pearson_tsv:
names = [i.name for i in self.linearData]
oh = open(pearson_tsv, "w")
oh.write("%s\n" % "\t".join([''] + names))
for ia, la in enumerate(names):
oh.write("%s" % la)
for ib, lb in enumerate(names):
oh.write("\t%s" % corr_result_table[ia,ib])
oh.write("\n")
oh.close()
# need to add the labels and serialise into a doct of lists.
dict_of_lists = {}
row_names = []
for index, item in enumerate(self.linearData):
dict_of_lists[item.name] = result_table[index]
row_names.append(item.name) # preserve order of row names.
if "output_pair_wise_correlation_plots" in kargs and kargs["output_pair_wise_correlation_plots"]:
# output the plot matrices.
for ia, la in enumerate(self.linearData):
for ib, lb in enumerate(self.linearData):
plot_data = []
if ia != ib:
x = matrix[ia,]
y = matrix[ib,]
self.draw._scatter(x, y, xlabel=row_names[ia], ylabel=row_names[ib],
filename="dpwc_plot_%s_%s.png" % (row_names[ia], row_names[ib]))
if "aspect" in kargs:
aspect = kargs["aspect"]
else:
aspect = "normal"
# respect heat_wid, hei if present
square = True
if "heat_hei" in kargs or "heat_wid" in kargs:
square=False
#print dict_of_lists
if jaccard:
colbar_label = 'Jaccard index'
else:
colbar_label = 'Pearson correlation'
# draw the heatmap and save:
ret = self.draw.heatmap(data=dict_of_lists, filename=filename,
colbar_label=colbar_label, bracket=bracket,
square=square, cmap=cm.RdBu_r, cluster_mode="euclidean", row_cluster=row_cluster, col_cluster=col_cluster,
row_names=row_names, col_names=row_names, aspect=aspect, **kargs)
config.log.info("compare: Saved Figure to '%s'" % ret["real_filename"])
return dict_of_lists
def venn(self, key=None, filename=None, mode='map', **kargs):
"""
**Purpose**
draw 2, 3 or 4 venn Diagrams
currently only equally size venndiagrams are supported.
(proportional venn diagrams are experimental only, enable them
using experimental_proportional_venn = True as an argument).
your glglob should be loaded with several genelist-like objects.
Note that you can do simple 2 overlap venn_diagrams using any
pair of genelists with this sort of code:
genelist.map(genelist=other_genelist, <...>, image_filename="venndiagram.png")
Note also that the data from each genelist will be converted to unique
values. So the final numbers may not match your original list sizes
**Arguments**
key
key to use to map() between the two lists.
filename
save the venn diagram to this filename
mode (Optional, default='map')
set to 'collide' if you prefer to use the glbase location
collide() to map the Venn.
Note that 'key' should point to a location key, and you can pass an optional
'delta' command
Also note that location Venn overlaps are only partly accurate, as it is
possible to have multiple overlaps .
title (Optional)
title for the figures
defaults to <list> vs <list> vs ...
**Returns**
A venn diagram saved in filename.
"""
valid_args = ["filename", "key", "title", "experimental_proportional_venn"]
for k in kargs:
if not k in valid_args:
raise ArgumentError(self.map, k)
assert len(self.linearData) <= 5, "currently glglob venn diagrams only support at most 5 overlaps"
assert len(self.linearData) >= 2, "you must send at least two lists"
assert filename, "no filename specified for venn_diagrams to save to"
assert key, "Must specify a 'key' to map the two lists"
if mode == 'collide': # Deflect to the collide routine
return(self.__venn_collide(key=key, filename=filename))
proportional = False
if "experimental_proportional_venn" in kargs and kargs["experimental_proportional_venn"]:
proportional=True
# The code below is needlessly verbose for clarity.
if len(self.linearData) == 2:
A = set(self.linearData[0][key])
B = set(self.linearData[1][key])
AB = A & B
realfilename = self.draw.venn2(len(A), len(B), len(AB),
self.linearData[0].name, self.linearData[1].name,
filename, **kargs)
return(None)
elif len(self.linearData) == 3:
A = set(self.linearData[0][key])
B = set(self.linearData[1][key])
C = set(self.linearData[2][key])
AB = A & B
AC = A & C
BC = B & C
ABC = A & B & C
# check for none's:
if AB:
AB = len(AB)
else:
AB = 0
if AC:
AC = len(AC)
else:
AC = 0
if BC:
BC = len(BC)
else:
BC = 0
if ABC:
ABC = len(ABC)
else:
ABC = 0
realfilename = self.draw.venn3(len(A), len(B), len(C), AB, AC, BC, ABC,
self.linearData[0].name, self.linearData[1].name, self.linearData[2].name,
filename, **kargs)
elif len(self.linearData) == 4:
A = set(self.linearData[0][key])
B = set(self.linearData[1][key])
C = set(self.linearData[2][key])
D = set(self.linearData[3][key])
print(len(A), len(B), len(C), len(D))
# Use set logic to work out the actual values:
# I'm pretty sure this is accurate at this point.
ABCD = A & B & C & D
ABC = (A & B & C) - ABCD
ABD = (A & B & D) - ABCD
ACD = (A & C & D) - ABCD
BCD = (B & C & D) - ABCD
AB = (((A & B) - ABC) - ABD) - ABCD
AC = (((A & C) - ABC) - ACD) - ABCD
AD = (((A & D) - ABD) - ACD) - ABCD
BC = (((B & C) - ABC) - BCD) - ABCD
BD = (((B & D) - ABD) - BCD) - ABCD
CD = (((C & D) - ACD) - BCD) - ABCD
A = A - ABCD - ABC - ABD - ACD - AB - AC - AD
B = B - ABCD - ABC - ABD - BCD - AB - BC - BD
C = C - ABCD - ABC - ACD - BCD - AC - BC - CD
D = D - ABCD - ABD - ACD - BCD - AD - BD - CD
# A, B, C, D, AB, AC, AD, BC, BD, CD, ABC, ABD, ACD, BCD, ABCD
lists = [len(A), len(B), len(C), len(D),
len(AB), len(AC), len(AD), len(BC), len(BD), len(CD),
len(ABC), len(ABD), len(ACD), len(BCD), len(ABCD)]
labs = [self.linearData[i].name for i in range(4)]
realfilename = self.draw.venn4(lists, labs, filename, **kargs)
elif len(self.linearData) == 5:
raise NotImplementedError
A = set(self.linearData[0][key])
B = set(self.linearData[1][key])
C = set(self.linearData[2][key])
D = set(self.linearData[3][key])
E = set(self.linearData[4][key])
AB = A & B
AC = A & C
AD = A & D
AE = A & E
BC = B & C
BD = B & D
BE = B & E
CD = C & D
CE = C & E
DE = D & E
ABC = A & B & C
ABD = A & B & D
ABE = A & B & E
ACD = A & C & D
ACE = A & C & E
ADE = A & D & E
BCD = B & C & D
BCE = B & C & E
BDE = B & D & E
CDE = C & D & E
ABCD = A & B & C & D
ABCE = A & B & C & E
ACDE = A & C & D & E
BCDE = B & C & D & E
ABCDE = A & B & C & D & E
# A, B, C, D, E,
# AB, AC, AD, AE, BC, BD, BE, CD, CE, DE,
# ABC, ABD, ABE, ACD, ACE, ADE, BCD, BCE, BDE, CDE
# ABCD, ABCE, ACDE, BCDE,
# ABCDE
lists = [len(A), len(B), len(C), len(D), len(E),
len(AB), len(AC), len(AD), len(AE), len(BC), len(BD), len(BE), len(CD), len(CE), len(DE),
len(ABC), len(ABD), len(ABE), len(ACD), len(ACE), len(ADE), len(BCD), len(BCE), len(BDE), len(CDE),
len(ABCD), len(ABCE), len(ACDE), len(BCDE), (ABCDE)]
labs = [self.linearData[i].name for i in range(5)]
realfilename = self.draw.venn5(lists, labs, filename, **kargs)
config.log.info("venn: Saved Figure to '%s'" % realfilename)
return None
def __venn_collide(self, key, filename, delta=200, **kargs):
#assert key in self.keys()
assert len(self.linearData) >= 3, "currently glglob venn diagrams only support at least 3-way overlaps"
assert len(self.linearData) <= 3, "currently glglob venn diagrams only support at most 3-way overlaps"
if len(self.linearData) == 3:
A = self.linearData[0]
B = self.linearData[1]
C = self.linearData[2]
AB = A.collide(genelist=B, key=key, delta=delta)
AC = A.collide(genelist=C, key=key, delta=delta)
BC = B.collide(genelist=C, key=key, delta=delta)
ABC = AB.collide(genelist=BC, key=key, delta=delta)
# check for none's:
AB = len(AB) if AB else 0
AC = len(AC) if AC else 0
BC = len(BC) if BC else 0
ABC = len(ABC) if ABC else 0
# You only need to provide the lengths, the overlaps are calculated in venn3:
realfilename = self.draw.venn3(len(A), len(B), len(C), AB, AC, BC, ABC,
self.linearData[0].name, self.linearData[1].name, self.linearData[2].name,
filename, **kargs)
return None
def moving_average_maps(self, mode="graph", compare_array=None, filename=None, key=None,
normalise=True, **kargs):
"""
**Purpose**
Draw moving average maps in a variety of formats.
**Arguments**
mode (Optional, defaults to "graph")
"graph"
draw a series of line graphs on the same graph.
"heatmap"
draw as a heatmap
"stacked_plots"
draw as a series of stacked line plots.
compare_array (Required)
The name of the array to use as a compare array. You have to draw the
moving average by comparing against some array already in
the glglob.
filename
filename to save the image as.
key
key to use to match between the compare_array and the rest of the
data in this glglob
normalise (Optional, default=True)
normalise the data, True or False.
**Returns**
The actual filename used to save and an image saved
to 'filename'
"""
# get the compare array:
compare_array = self[compare_array]
assert compare_array, "the compare array was not found in this glglob"
res = {}
last_val = 0
for pl in self.linearData:
if pl.name != compare_array.name: # a simple == will fail here, so I use the names to compare
res[pl.name] = []
last_val = 0
names = pl[key] # get the name column.
for i, v in enumerate(compare_array):
if v[key] in names:
#last_val += 1
#res[pl.name].append(last_val)
res[pl.name].append(1)
else:
res[pl.name].append(0)
res[pl.name] = utils.movingAverage(res[pl.name], int(len(res[pl.name]) * 0.2))[1] # keep only y,
typical_length = len(res[pl.name]) # measure length to generate x axis later.
# append the compare_array for comparison.
res[compare_array.name] = [i[0] for i in compare_array["conditions"]][0:typical_length] # this will be a list of lists (with 1 entry) - flatten the list.
# the arange fakes the x axis for plots.
# I have to slice the array as it's slightly shorter than the movingAverage plots...
# You will only notice this if the list is short and you can see the names
if normalise: #this will normalise the y-axis
for k, v_ in res.items():
# normalise to 0-->100
min_val = min(res[k])
max_val = max(res[k]) - min_val
for i, v in enumerate(v_):
res[k][i] = ((v-min_val) / max_val) * 100.0
if mode == "graph":
# fake the xdata:
xdata = arange(0,typical_length)
plot.cla()
fig = plot.figure(figsize=(8,5))
axis = fig.add_subplot(111)
for pl in res:
axis.plot(xdata, res[pl][1], label=pl)
axis.set_title("")
axis.legend(loc=2, markerscale=0.1)#, prop={"size": "xx-small"})
fig.savefig(filename)
real_filename = filename
elif mode == "heatmap":
# At the moment the data is in the form: [ [ (x,y), (x,y), ]:class ... []]
# I need to strip out the x data.
scalebar_name = "score"
if normalise:
scalebar_name = "normalised score"
# use key to build an array ready for draw._heatmap
colnames=[]
for k in res:
res[k] = res[k]
colnames.append(k)
real_filename = self.draw._heatmap(data=res, filename=filename, col_names=colnames, row_names=compare_array["name"],
row_cluster=False, col_cluster=True, colour_map=cm.Blues, #vmax=1,
colbar_label=scalebar_name, aspect="square")
config.log.info("moving_average_maps: Saved image to '%s'" % real_filename)
def overlap_heatmap(self, filename=None, score_key=None, resolution=1000, optics_cluster=True):
"""
**Purpose**
Draw a heatmap from ChIP-seq binding data (or lists of genomic coordinates),
showing one row for each binding site, particularly where the sites overlap.
This will compare (and overlap) each of the lists, then produce a large heatmap
with each row for a unique genomic location.
For the purposes of this tool, the genome is divided up into blocks (based on
resolution) and binding is then tested against these blocks.
**Arguments**
filename (Required)
filename to save the heatmap to.
score_key (Optional, default=None)
by default each row will be scored in a binary manner. i.e. Is the binding site present or not?
However, if your ChIP-seq lists have some sort of intensity score then this
can be used instead to score the overlap.
resolution (Optional, default=1000)
Number of base pairs to divide the genome up into.
optics_cluster (Optional, default=True)
By default overlap_heatmap() will cluster the data based on OPTICS:
https://en.wikipedia.org/wiki/OPTICS_algorithm
If optics_cluster=False
**Returns**
A heatmap and the heatmap data (as a Numpy array) for any further processing.
"""
assert filename, "Must provide a filename"
assert config.SKLEARN_AVAIL, "the Python package sklearn is required for overlap_heatmap()"
# Iterate through each genelist and build a non-redundant table of genomic
# blocks
chr_blocks = {}
total_rows = 0
for index, gl in enumerate(self.linearData):
for item in gl:
# Add chromosome to the cache if not present.
if item["loc"]["chr"] not in chr_blocks:
chr_blocks[item["loc"]["chr"]] = {}
block_id = "bid:%s" % (math.floor(item["loc"]["left"] / resolution) * 1000, )
if block_id not in chr_blocks[item["loc"]["chr"]]:
chr_blocks[item["loc"]["chr"]][block_id] = [0 for x in range(len(self.linearData))] # one for each gl
total_rows += 1
if score_key:
chr_blocks[item["loc"]["chr"]][block_id][index] = item[score_key]
else:
chr_blocks[item["loc"]["chr"]][block_id][index] = 1
config.log.info("overlap_heatmap(): Found %s unique genomic regions" % total_rows)
# Build the table for the heatmap
tab = numpy.zeros([len(self.linearData), total_rows])
crow = 0
for c, value in chr_blocks.items():
for bid in chr_blocks[c]:
for i in range(len(value[bid])): # or len(self.linearData)
tab[i, crow] = chr_blocks[c][bid][i]
crow += 1
tab = tab.T
# dendrogram dies here, so need other ways to cluster
# DBSCAN consumes too much unnecessary memory.
"""
alg = DBSCAN(eps=0.2)
print alg.fit(tab)
print alg.labels_
clusters = numpy.unique(alg.labels_)
print clusters
# reorder the list based on cluster membership
newd = {}
for index, c in enumerate(alg.labels_):
if c not in newd:
newd[c] = []
newd[c].append(tab[index])
# load it back into a numpy array
tab = None
for c in clusters:
new = numpy.vstack(newd[c])
if tab is None:
tab = new
else:
tab = numpy.vstack([tab, new])
"""
# Yay, roll my own clustering!
# I already know how many possible clusters there will be.
#num_clusters = math.factorial(len(self.linearData))
# build a cluster table, containing all possible variants for this len(self.linearData)
clusters = {}
for row in tab:
# Make an identifier for the cluster:
id = tuple(bool(i) for i in row)
if id not in clusters:
clusters[id] = []
clusters[id].append(row)
# I want to sort the clusters first:
sorted_clusters = [{"id": c, "score": sum(c)} for c in clusters]
sorted_clusters = sorted(sorted_clusters, key=itemgetter("score"))
# Flattent the arrays and load it back into a numpy array
tab = None
for c in sorted_clusters:
new = numpy.vstack(clusters[c["id"]])
tab = new if tab is None else numpy.vstack([tab, new])
ret = self.draw.heatmap(data=tab, filename=filename, col_names=[gl.name for gl in self.linearData], row_names=None,
row_cluster=False, col_cluster=True, colour_map=cm.Reds, heat_wid=0.7, heat_hei=0.7, bracket=[0,tab.max()])
config.log.info("overlap_heatmap: Saved overlap heatmap to '%s'" % ret["real_filename"])
return(tab)
def __peak_cluster(self, list_of_peaks, merge_peaks_distance):
# Merge overlapping peaks
chr_blocks = {}
total_rows = 0
#merged_peaks = {}
p = progressbar(len(list_of_peaks))
for idx, gl in enumerate(list_of_peaks):
for p1 in gl["loc"]:
#p1 = p1.pointify().expand(merge_peaks_distance) # about 10% of the time is in __getitem__ from the loc, so unpack it;
cpt = (p1.loc["left"] + p1.loc['right']) // 2
p1_chr = 'chr{0}'.format(p1['chr'])
p1_left = cpt - merge_peaks_distance
p1_right = cpt + merge_peaks_distance
if not p1_chr in chr_blocks:
chr_blocks[p1_chr] = {}
binary = [0 for x in range(len(list_of_peaks))] # set-up here in case I need to modify it.
for p2 in chr_blocks[p1_chr]: # p2 is now a block_id tuple
#if p1.qcollide(p2):
if p1_right >= p2[0] and p1_left <= p2[1]: # unfolded for speed.
binary = chr_blocks[p1_chr][p2]["binary"] # preserve the old membership
# remove the original entry
del chr_blocks[p1_chr][p2]
total_rows -= 1
# Add in a new merged peak:
cpt = (((p1_left+p2[0])//2) + ((p1_right+p2[1])//2)) // 2 # pointify()
p1_left=cpt-merge_peaks_distance
p1_right=cpt+merge_peaks_distance
# Don't get confused here, p1 is added onto the block heap below:
break
# modify binary to signify membership for this peaklist
binary[idx] = 1
# Add p1 onto the blocklist
block_id = (p1_left, p1_right)
if block_id not in chr_blocks[p1_chr]:
chr_blocks[p1_chr][block_id] = {"binary": binary,
"pil": [0 for x in range(len(list_of_peaks))]} # one for each gl, load pil with dummy data.
total_rows += 1 # because the result is a dict of dicts {"<chrname>": {"bid": {data}}, so hard to keep track of the total size.
p.update(idx)
return total_rows, chr_blocks
def chip_seq_cluster(self, list_of_peaks, merge_peaks_distance=400, sort_clusters=True,
_get_chr_blocks=False, **kargs):
"""
**Purpose**
Combine and merge all peaks, extract the read pileups then categorize the peaks into
similar groupings. Return a new list of genelists, one genelist for each grouping
that contains the list of genomic locations in each group.
Return a glbase expression object with each row a merged (unique) peak, each
column is a peak
Be careful, the resulting objects can get very huge!
The order of the genomic locations and order of the groups must be maintained
between the heatmap and the returned data.
NOTE: I sort of named this function incorectly with the whole 'cluster' business.
Although it's not wrong to label the returned groups as clusters it is certainly
confusing and may imply that some sort of k-means or hierarchical clustering
is performed. No clustering is performed, instead groups are made based on a binary
determination from the list_of_peaks. So below, where I refer to 'cluster'
I really mean group. Later I may add k-means clustering, which may make things even more
confusing.
Here is a detailed explanation of this function:
1. Join all of the peaks into a redundant set of coordinates
2. Merge all of the genomic regions to produce a single list of unique genomic regions
(this is what it means by "chip_seq_cluster_heatmap(): Found <number> unique
genomic regions")
3. Build a table of all possible peak combinations:
e.g. for two chip-seq lists, A and B:
listA only: [True, False]
listB only: [False, True]
listA and listB: [True, True]
It is these that are the 'clusters' (or groups). In this case there would
be just 3 groups. The more lists the more possible groups.
Note that groups with no members are culled.
**Arguments**
list_of_peaks (Required)
A list of genelists of peaks from your ChIP-seq data to interrogate. The order of the libraries
Genomic location data should be stored in a 'loc' key in the genelist.
merge_peaks_distance (Optional, default=400)
Maximum distance that the centers of any two peaks can be apart before the two peaks are merged into
a single peak. (taking the mean of the peak centers)
sort_clusters (Optional, default=True)
sort the clusters from most complex to least complex.
Note that chip_seq_cluster_heatmap cannot preserve the order of the peaks
(it's impossible), so setting this to false will just randomise the order of the clusters
which may not be particularly helpful.
**Returns**
Returns a glbase expression object, with rows as unique genomic peaks and
columns as each peak list.
The values will be filled with 0 or 1, if it was a peak or not a peak.
"""
assert list_of_peaks, 'list_of_peaks is empty'
assert len(list_of_peaks[0]) > 0, 'list_of_peaks lists appear to be empty'
# get a non-redundant list of genomic regions based on resolution.
chr_blocks = {} # stores a binary identifier
pil_blocks = {}
total_rows = 0
peak_lengths = sum([len(p) for p in list_of_peaks])
config.log.info("chip_seq_cluster_heatmap: Started with {0:,} redundant peaks".format(peak_lengths))
total_rows, chr_blocks = self.__peak_cluster(list_of_peaks, merge_peaks_distance)
config.log.info("chip_seq_cluster: Found {0:,} unique genomic regions".format(total_rows))
if _get_chr_blocks:
return chr_blocks
# Convert the chr_blocks into a expression object
tab = []
for chrom in chr_blocks:
for loc in chr_blocks[chrom]:
l = location(chr=chrom, left=loc[0], right=loc[1])
cid = int("".join([str(i) for i in chr_blocks[chrom][loc]["binary"]]), 2)
#print cid
tab.append({'loc': l, 'conditions': chr_blocks[chrom][loc]['binary'], 'cid': cid})
e = expression(loadable_list=tab, cond_names=[p.name for p in list_of_peaks])
if sort_clusters:
e.sort('cid')
return e
def chip_seq_cluster_heatmap(self, list_of_peaks, list_of_trks, filename=None, norm_by_library_size=False, bins=20,
pileup_distance=1000, merge_peaks_distance=400, sort_clusters=True, cache_data=False, bracket=None,
range_bracket=None, frames=False, titles=None, read_extend=200, imshow=True, cmap=cm.plasma,
log_pad=None, log=2,
size=None, **kargs):
"""
**Purpose**
Combine and merge all peaks, extract the read pileups then categorize the peaks into
similar groupings. Return a new list of genelists, one genelist for each grouping
that contains the list of genomic locations in each group. Finally, draw a nice
heatmap to <filename>.
The order of the genomic locations and order of the groups must be maintained
between the heatmap and the returned data.
NOTE: I sort of named this function incorectly with the whole 'cluster' business.
Although it's not wrong to label the returned groups as clusters it is certainly
confusing and may imply that some sort of k-means or hierarchical clustering
is performed. No clustering is performed, instead groups are made based on a binary
determination from the list_of_peaks. So below, where I refer to 'cluster'
I really mean group. Later I may add k-means clustering, which may make things even more
confusing.
Here is a detailed explanation of this function:
1. Join all of the peaks into a redundant set of coordinates
2. Merge all of the genomic regions to produce a single list of unique genomic regions
(this is what it means by "chip_seq_cluster_heatmap(): Found <number> unique
genomic regions")
3. Build a table of all possible peak combinations:
e.g. for two chip-seq lists, A and B:
listA only: [True, False]
listB only: [False, True]
listA and listB: [True, True]
It is these that are the 'clusters' (or groups). In this case there would
be just 3 groups. The more lists the more possible groups.
Note that groups with no members are culled.
4. for <each genome location> get the pileup from the approprate track. expand
around the genomic location of the bin by <pileup_distance> and build a heat map.
(This is the really slow bit). The option <bins> will bin the pileup
(i.e. chr1:10001-10400 would have 400 base pairs, but would be divided into twenty bins.
This makes drawing the heatmap feasable as too many squares for the heatmap
will consume RAM and CPU.
6. Order the heatmap from most complex bin (genome regions with peaks in all libraries)
to least complex bin (chip-seq library specific peaks) and draw.
**Arguments**
list_of_peaks (Required)
A list of genelists of peaks from your ChIP-seq data to interrogate. The order of the libraries
MUST be the same as the order of the list_of_trks. genomic location data
should be stored in a 'loc' key in the genelist.
list_of_trks (Required)
A list of trks to draw the sequence tag reads from to build the pileups.
This list MUST be in the same order as the list_of_peaks.
filename (Optional, default=None)
If set to a string a heatmap will be saved to filename.
norm_by_library_size (Optional, default=False)
Normalize the read pileup data within each library to the size of the
library to assist in cross-comparison of ChIP-seq libraries.
merge_peaks_distance (Optional, default=400)
Maximum distance that the centers of any two peaks can be apart before the two peaks are merged into
a single peak. (taking the mean of the peak centers)
pileup_distance (Optional, default=1000)
distance around the particular bin to draw in the pileup.
read_extend (Optional, default=200)
The size in base pairs to extend the read. If a strand is present it will expand
from the 3' end of the read.
bins (Optional, default=20)
number of bins to use for the pileup. Best to use conservative numbers (10-50) as
large numbers of bins can consume huge amounts of memory.
sort_clusters (Optional, default=True)
sort the clusters from most complex to least complex.
Note that chip_seq_cluster_heatmap cannot preserve the order of the peaks
(it's impossible), so setting this to false will just randomise the order of the clusters
which may not be particularly helpful.
log (Optional, default=2)
Use logarithms for the heatmap. Possible options are 2 and 10.
cmap (Optional, default=matplotlib.cm.YlOrRd)
A colour map for the heatmap.
titles (Optional, default=peaks.name)
Supply your own titles for the top of the heatmap columns
range_bracket (Optional, default=None, exclusive with range_bracket)
Okay, hold your hats, this is complicated.
range_bracket will bracket the range of values as a fraction between [min(data), max(data)]
i.e. If range_bracket=0.5 (the default) then the data is bracketed as:
[max(data)*range_bracket[0], max(data)*range_bracket[1]]. The practical upshot of this is it allows you to shift
the colour bracketing on the heatmap around without having to spend a long time finding
a suitable bracket value.
Bracketing is performed AFTER log.
Typical bracketing would be something like [0.4, 0.9]
By default glbase attempts to guess the best range to draw based on the
median and the stdev. It may not always succeed.
bracket (Optional, default=None, exclusive with range_bracket)
chip_seq_cluster_heatmap() will make a guess on the best bracket values and will output that
as information. You can then use those values to set the bracket manually here.
This is bracketing the data AFTER log transforming.
cache_data (Optional, default=False)
cache the pileup data into the file specified in cache_data. This speeds up analysis.
Note that storage of data is AFTER normalisation, resolution, pileup_distance,
bins, but before sort_clusters and before heatmap drawing.
This allows you to store the very slow part of chip_seq_cluster_heatmap()
and so iterate through different heatmap drawing options without having to
do the whole pileup again.
note that if cache_data file does not exist then it will be created and
pileup data generated. If the file does exist, data will be read from that
file and used for heatmap drawing.
frames (Optional, default=False)
Draw black frames around the heatmaps and category maps. I prefer without,
so that is the default!
imshow (Optional, default=False)
Embed the heatmap as an image inside a vector file. (Uses matplotlib imshow
to draw the heatmap part of the figure. Allows very large matrices to
be saved as an svg, with the heatmap part as a raster image and all other elements
as vectors).
**Returns**
Returns a list of genelists, one genelist for each major category. The genelist
contains a list of locations belonging to that group. Note that the genomic locations
may not exactly match with the original provided locations as chip_seq_cluster_heatmap()
will merge redundant peaks by taking the mid point between two close peaks.
The order of the genomic locations and order of the groups will be maintained
between the heatmap and the returned data.
The formal returned data is in a dict so that it can give information about each cluster grouping:
{"<cluster_id>": {"genelist": <a genelist object>, "cluster_membership": (True, True, ..., False)}, ...}
The "cluster_membership" key returns a tuple of the same length as the number
list_of_peaks (and in the same order) indicating that this particular cluster
represents binding (True) or not (False) in each original list_of_peaks.
"""
assert not (range_bracket and bracket), "You can't use both bracket and range_bracket"
assert len(list_of_peaks) == len(list_of_trks), 'len(list_of_peaks) != len(list_of_trks)'
# get a non-redundant list of genomic regions based on resolution.
chr_blocks = {} # stores a binary identifier
pil_blocks = {}
total_rows = 0
resolution = merge_peaks_distance # laziness hack!
# Confirm that all lists contain a 'loc' key
assert False not in ['loc' in gl.keys() for gl in list_of_peaks], 'One of your peak data (list_of_peaks) does not contain a "loc" key'
peak_lengths = sum([len(p) for p in list_of_peaks])
config.log.info("chip_seq_cluster_heatmap: Started with {0:,} redundant peaks".format(peak_lengths))
total_rows, chr_blocks = self.__peak_cluster(list_of_peaks, merge_peaks_distance)
config.log.info("chip_seq_cluster: Found {0:,} unique genomic regions".format(total_rows))
# I will need to go back through the chr_blocks data and add in the pileup data:
bin_size = int((resolution+resolution+pileup_distance) / bins)
block_len = (resolution+resolution+pileup_distance+pileup_distance) # get the block size
data = None
# sort out cached_data
if cache_data and os.path.isfile(cache_data): # reload previously cached data.
oh = open(os.path.realpath(cache_data), "rb")
loaded_chr_blocks = pickle.load(oh)
oh.close()
# Do some sanity checking;
assert len(loaded_chr_blocks) == len(chr_blocks), 'Error in the {0} file, does not match inputs, needs to be regenerated'.format(cache_data)
assert loaded_chr_blocks.keys() == chr_blocks.keys(), 'Error in the {0} file, does not match inputs, needs to be regenerated'.format(cache_data)
first_key = list(chr_blocks.keys())[0]
assert len(loaded_chr_blocks[first_key]) == len(chr_blocks[first_key]), 'Error in the {0} file, does not match inputs, needs to be regenerated'.format(cache_data)
# Seems okay, proceed:
chr_blocks = loaded_chr_blocks
config.log.info("chip_seq_cluster_heatmap: Reloaded previously cached pileup data: '%s'" % cache_data)
# this_loc will not be valid and I test it for length below, so I need to fake one.
else:
# No cached data, so we have to collect ourselves.
config.log.info('chip_seq_cluster_heatmap: Collecting pileup data...')
# Get the size of each library if we need to normalize the data.
if norm_by_library_size:
# get and store the read_counts for each library to reduce an sqlite hit.
read_totals = [trk.get_total_num_reads()/float(1e6) for trk in list_of_trks] # i.e something like RPM
p = progressbar(len(list_of_trks))
# New version that grabs all data and does the calcs in memory, uses more memory but ~2-3x faster
for pindex, trk in enumerate(list_of_trks):
for index, chrom in enumerate(chr_blocks):
for block_id in chr_blocks[chrom]:
left = block_id[0] - pileup_distance
right = block_id[1] + pileup_distance
dd = trk.mats[chrom][left:right] # trk.get(loc=None, c=chrom, left=left, rite=right) # I'm not sure why, but this is slow here, but superfast in pileup()
if len(dd) != block_len: # This should be a very rare case...
if len(dd) < block_len:
config.log.warning('Block miss (short)')
num_missing = block_len - len(dd)
ad = numpy.zeros(num_missing, dtype=numpy.float32)
dd = numpy.append(dd, ad)
elif len(dd) > block_len:
config.log.warning('Block miss (long)')
num_missing = block_len - len(dd)
dd = dd[0:block_len] # just grab the start. probably unreliable though?
if norm_by_library_size:
# normalise before bin?
pil_data = [av/read_totals[pindex] for av in dd]
chr_blocks[chrom][block_id]["pil"][pindex] = dd[:(dd.size // bin_size) * bin_size].reshape(-1, bin_size).sum(axis=1) # [sum(dd[i:i+bin_size]) for i in range(0, len(dd), bin_size)] #pil_data = utils.bin_sum_data(dd, bin_size)
#chr_blocks[chrom][block_id]["pil"][pindex] = [sum(dd[i:i+bin_size]) for i in range(0, len(dd), bin_size)] #pil_data = utils.bin_sum_data(dd, bin_size)
p.update(pindex)
if cache_data: # store the generated data for later.
oh = open(cache_data, "wb")
pickle.dump(chr_blocks, oh, -1)
oh.close()
config.log.info("chip_seq_cluster_heatmap: Saved pileup data to cache file: '{0}'".format(cache_data))
# assign each item to a group and work out all of the possible groups
cluster_ids = []
for chrom in chr_blocks:
for block_id in chr_blocks[chrom]:
cluster_id = tuple([bool(i) for i in chr_blocks[chrom][block_id]["binary"]])
if cluster_id not in cluster_ids:
cluster_ids.append(cluster_id)
chr_blocks[chrom][block_id]["cluster_membership"] = cluster_id
# I want to sort the groups from the most complex to the least complex.
if sort_clusters:
sorted_clusters = []
for c in cluster_ids:
sorted_clusters.append({"id": c, "score": sum(c)})
sorted_clusters = sorted(sorted_clusters, key=itemgetter("score"))
# This result is actually least to most, but as the heatmap is drawn bottom to top it makes sense to
# preserve this order.
else:
pass
#URK!
# build the super big heatmap table
tab_wid = block_len * len(list_of_peaks)
tab_spa = len(list_of_peaks) # distance between each set of blocks.
tab = None
ret_data = {}
list_of_tables = [None for i in list_of_peaks]
groups = []
pileup_data = {}
# And arrange according to the groups.
for cluster_index, cluster_id in enumerate(sorted_clusters):
for chrom in chr_blocks:
for block_id in chr_blocks[chrom]:
if chr_blocks[chrom][block_id]["cluster_membership"] == cluster_id["id"]:
for peaks in range(len(list_of_peaks)):
row = chr_blocks[chrom][block_id]["pil"][peaks]
if list_of_tables[peaks] is None:
# append together all pileup data in a long row and stick on the tab array.
list_of_tables[peaks] = [row,]
else:
list_of_tables[peaks].append(row)
# store the pileup_data for later.
if (cluster_index+1) not in pileup_data:
pileup_data[cluster_index+1] = [None for i in list_of_peaks]
if pileup_data[cluster_index+1][peaks] is None: # numpy testing.
pileup_data[cluster_index+1][peaks] = numpy.array(row, dtype=numpy.float64)
else:
pileup_data[cluster_index+1][peaks] += row
# Also add it into the return data.
if cluster_index+1 not in ret_data:
ret_data[cluster_index+1] = {"genelist": genelist(name="cluster_%s" % (cluster_index+1,)), "cluster_membership": cluster_id["id"]}
this_loc = location(loc="chr%s:%s-%s" % (chrom, int(block_id[0]), int(block_id[1]))) # does not include the pileup_distance
ret_data[cluster_index+1]["genelist"].linearData.append({"loc": this_loc})
groups.append(cluster_index+1)
# finish off the pileup_data:
for cid in pileup_data:
for pid in range(len(pileup_data[cid])):
pileup_data[cid][pid] /= len(ret_data[cid]["genelist"])# for i in pileup_data[cid][pid]]
self.__pileup_data = pileup_data
self.__pileup_names = [g.name for g in list_of_peaks] # names for each sample, taken from peaks.
self.__pileup_groups_membership = sorted_clusters
self.__pileup_group_sizes = [groups.count(i) for i in range(0, len(sorted_clusters)+1)]
config.log.info("chip_seq_cluster_heatmap: There are %s groups" % len(sorted_clusters))
# rebuild the genelist quickdata and make genelist valid:
for cid in ret_data:
ret_data[cid]["genelist"]._optimiseData()
ret_data[cid]['binary_membership'] = ''.join([str(int(a)) for a in ret_data[cid]["cluster_membership"]])
if not log_pad:
log_pad = 0.1
for index in range(len(list_of_tables)):
if log == 2:
list_of_tables[index] = numpy.log2(numpy.array(list_of_tables[index])+log_pad)
colbar_label = "Log2(Tag density)"
elif log == 10:
list_of_tables[index] = numpy.log10(numpy.array(list_of_tables[index])+log_pad)
colbar_label = "Log10(Tag density)"
else:
list_of_tables[index] = numpy.array(list_of_tables[index])
colbar_label = "Tag density"
if norm_by_library_size:
colbar_label = "Normalised %s" % colbar_label
self.__pileup_y_label = "Tag density" # Trust me, you don't want to log them...
tab_max = max([tab.max() for tab in list_of_tables]) # need to get new tab_max for log'd values.
tab_min = min([tab.min() for tab in list_of_tables])
#tab_median = numpy.median([numpy.median(tab) for tab in list_of_tables])
tab_mean = mean([numpy.average(tab) for tab in list_of_tables])
tab_stdev = numpy.std(numpy.array([tab for tab in list_of_tables]))
config.log.info("chip_seq_cluster_heatmap: min=%.2f, max=%.2f, mean=%.2f, stdev=%.2f" % (tab_min, tab_max, tab_mean, tab_stdev))
if range_bracket:
bracket = [tab_max*range_bracket[0], tab_max*range_bracket[1]]
elif bracket:
bracket = bracket # Fussyness for clarity.
else: # guess a range:
if log:
bracket = [tab_mean, tab_mean+(tab_stdev*2.0)]
else: # better to make it thinner
bracket = [tab_mean, tab_mean+(tab_stdev*1.0)]
config.log.info("chip_seq_cluster_heatmap: suggested bracket = [{:.2f}, {:.2f}]".format(bracket[0], bracket[1]))
#real_filename = self.draw.heatmap2(filename=filename, row_cluster=False, col_cluster=False,
# data=tab, colbar_label=colbar_label, bracket=bracket)
if filename:
if not titles:
titles = [p.name for p in list_of_peaks]
real_filename = self.draw.multi_heatmap(filename=filename, groups=groups, titles=titles, imshow=imshow, size=size,
list_of_data=list_of_tables, colour_map=cmap, colbar_label=colbar_label, bracket=bracket, frames=frames,
dpi=300, **kargs)
config.log.info("chip_seq_cluster_heatmap: Saved overlap heatmap to '%s'" % real_filename)
return ret_data
def chip_seq_cluster_pileup(self,
filename=None,
multi_plot=True,
min_members=False,
**kargs):
"""
**Purpose**
This is an addendum to chip_seq_cluster_heatmap(). You only need run this
directly after chip_seq_cluster_heatmap() and it will draw aggregate pileup
graphs either all on the same graph (with a legend) or will draw a multi_plot
with many graphs (when multi_plot=True, the default).
Note, you do not need to respecify the data for this, but you must run
chip_seq_cluster_heatmap() first, before this function.
By default the yscale is locked to the maximum value - so the plots are all to the same
scale.
**Arguments**
filename (Required)
A base file name to save the images to. This function will save multiple files, one
for each cluster/group. The file name will be modified in this manner:
if filename=="base.png", the modified versions will be: "base_cid[1..n].png"
i.e. "_cid<num>" will be inserted before the final filetype (in this case a png
file).
multi_plot (Optional, default=True) ONLY True IS IMPLEMENTED
If True, plot all pileups on separate graphs, plotted sequentially horizontally as part of the same
figure.
If False then plot them all on the same graph, and with a legend.
min_members (Optional, default=False)
Only save the image in > min_members
**Returns**
The pileup_data as a dict in the form:
{<cluster_id>: [array_data1, array_data2 ... array_dataN],
<cluster_id>: [...],
...
}
"""
assert filename, "chip_seq_cluster_pileup(): you must provide a filename"
assert self.__pileup_names, "chip_seq_cluster_pileup(): You must run chip_seq_cluster_heatmap() first"
#print self.__pileup_data
base_filename = ".".join(filename.split(".")[0:-1])
num_plots = len(self.__pileup_data[1])
if not "size" in kargs:
kargs["size"] = (4*num_plots, 7)
maxx = self.__pileup_data[1][0].shape[0]
for cid in self.__pileup_data:
binary_membership = ''.join([str(int(a)) for a in self.__pileup_groups_membership[cid-1]["id"]])
this_filename = "{}_bin_{}_cid{}.png".format(base_filename, binary_membership, cid) # savefigure will modify png if needed.
if min_members and self.__pileup_group_sizes[cid] < min_members:
continue
fig = self.draw.getfigure(**kargs)
fig.suptitle("Group: %s #members: %s Membership: %s" % (cid, self.__pileup_group_sizes[cid], self.__pileup_groups_membership[cid-1]["id"]))
# get the max x and y axes:
maxy = max([a.max() for a in self.__pileup_data[cid]])
miny = min([a.min() for a in self.__pileup_data[cid]])
for cfig, data in enumerate(self.__pileup_data[cid]):
ax = fig.add_subplot(1, len(self.__pileup_data[cid]), cfig+1)
x = numpy.arange(len(data))
ax.plot(x, data)
ax.set_xlim([0, maxx-2]) # -2 to trim off the unsightly tail due to binning.
[t.set_visible(False) for t in ax.get_xticklabels()]
ax.set_ylim([miny, maxy])
if cfig >= 1: # nice bodge to blank labels on subsequent graphs.
[t.set_visible(False) for t in ax.get_yticklabels()]
else:
ax.set_ylabel(self.__pileup_y_label)
ax.set_title("%s (%s)" % (self.__pileup_names[cfig], self.__pileup_groups_membership[cid-1]["id"][cfig]))
self.draw.do_common_args(ax, **kargs)
self.draw.savefigure(fig, this_filename)
config.log.info('Saved {}'.format(this_filename))
return self.__pileup_data
def genome_dist_radial(self, genome, layout, filename=None, randoms=None, **kargs):
"""
**Purpose**
Measure genome distributions of a list of genome coordinates relative to a list of TSS's.
As seen in Hutchins et al., 2013 NAR Figure 1D. The version here is slightly generalised compared
to the version used in that paper.
Also, one disadvantage here is the lack of a key...
**Arguments**
genome (Required)
A genome with a tss_loc key or loc key to annotate against
layout (Required)
You need to specify a tuple describing the layout arrangement i.e. the number of radial plot rows and columns
randoms (Optional)
A list of random peaks to treat as the background binding or binding pattern expected by chance alone.
filename (Required)
filename to save the radial plots to
"""
assert genome[0], "genome_dist_radial: genome appears to be empty"
assert "tss_loc" in list(genome.keys()), "genome_dist_radial: genome does not have a 'tss_loc' key"
# check layout == len(self.linearData)
annotation = genome
res = {}
for p in self.linearData:
data, back, back_err, cats = p.genome_distribution(annotation, randoms, filename=None)
res[p.name] = {"data": data, "back": back, "err": back_err}
fig = self.draw.getfigure(**kargs)
fig.subplots_adjust(0.02, 0.02, 0.97, 0.97, wspace=0.1, hspace=0.1)
# Work out values for the histograms
erad = 0.69813170079773 # each segment gets 0.69813170079773 (or thereabouts) rads
eradh = erad / 2.0
eradq = eradh / 2.0
theta = numpy.arange(0.0, 2*numpy.pi, 2*numpy.pi/len(data))
width = (numpy.pi/4)*len(data) # in rads?
width = 0.5
# colour for each segment
colors = ["#FFF800", # (255, 248, 0)
"#000E7C", # (0, 14, 177)
"#001EFF", # (0, 30, 255)
"#6275FF", # (98, 117, 255)
"#B1BAFF", # (177, 186, 255)
"#FFB7B1", # (255, 183, 177)
"#FF6E62", # (255, 110, 98)
"#FF1300", # (255, 19, 0)
"#7C0900"] # (124, 9, 0)
for i, k in enumerate(res): # ugh. random order...
ax = fig.add_subplot(layout[0], layout[1], i+1, polar=True)
if res[k]["back"]:
axes[k].bar(theta-0.10, res[k]["back"], width=erad, bottom=0.0, alpha=0.8, ec="none", color="grey")
ax.bar(theta, res[k]["data"], width=erad-0.20, bottom=0.0, alpha=0.9, ec="none", color=colors)
ax.set_title(k, size=7)
ax.set_xticks(theta-0.10)
ax.set_xticklabels("")
l = ax.get_ylim()
#print k, ["%s%%" % i for i in range(l[0], l[1]+5, l[1]//len(axes[k].get_yticklabels()))]
#print [str(t) for t in axes[k].get_yticklabels()]
[t.set_fontsize(10) for t in ax.get_yticklabels()]
#print ["%s%%" % (i*10, ) for i, t in enumerate(axes[k].get_yticklabels())]
#print [t.get_text() for t in axes[k].get_yticklabels()]
ax.set_yticklabels(["%s%%" % i for i in range(int(l[0]), int(l[1]+5), int(l[1]//len(ax.get_yticklabels())))][1:])
actual_filename = self.draw.savefigure(fig, filename)
config.log.info("genome_dist_radial: Saved '%s'" % actual_filename)
def GO_heatmap(self, filename, p_value_limit=0.01, num_top=5, pvalue_key='pvalue',
size=[8, 6], bracket=[1.3,4], row_cluster=True, col_cluster=False, # heatmap args
heat_wid=0.15, cmap=cm.Reds, border=True, row_font_size=7,
heat_hei='proportional', grid=True, ontology=None, draw_numbers_fmt='%.1f',
draw_numbers=True, draw_numbers_threshold=2.0, draw_numbers_font_size=5, do_negative_log10=True,
**kargs):
'''
**Purpose**
Produce a heatmap of GO categories from a glglob of GO genelists (glgo's)
**Arguments**
filename (Required)
filename to save the resulting heatmap to
p_value_limit (Optional, default=0.01)
minimum p-value to include in list
pvalue_key (Optional, default='p_value')
The key in your GO lists that contains some sort of significance
result.
num_top (Optional, default=5)
Generally these heatmaps do not have much space to contain that
many categories, so you need to take the top N from each list.
GO_heatmap will 'fill in' categories in other lists even if they do not
fulfill the 'p_value_limit' and 'num_top' criteria.
However, this only occurs if your GO lists contain all GO terms. If
you have already truncated the lists for some p-value then glbase cannot
fill in the missing data.
ontology (Optional, default=False)
DAVID will give you a table containing all GO categories. Use this to specify using only
a single ontology to use. Assumes the genelists have a 'ontology' key.
This function will also accept all glbase heatmap arguments (see expression.heatmap).
A few args have altered defaults:
heat_hei (Optional, default='proportional')
Sets the heatmap to a fixed y-size for each row.
Set to a normal heat_wid value if you prefer.
bracket (Optional, default=[1.3, 4.0])
the bracket for the min and max of the heatmap. This sort of bracket
assumes your data is -log10 transformed and so the p-value would
range from 0.05 to 0.0001
do_negative_log10 (Optional, default=True)
By default convert the value in pvalue into the -log10()
Set this to False if you don't want to convert
**Returns**
The resorted row names (as a list) and a heatmap in filename
'''
format = {'force_tsv': True, 'pvalue': 1, 'name': 0}
main_cluster_membership = {}
number_of_clusters = len(self)
go_store = {}
main_cluster_membership = {}
cond_names_idx = {}
for idx, go in enumerate(self.linearData):
cond_names_idx[go.name] = idx
if not go:
config.log.warning("GO_heatmap: GO list '%s' was empty, skipping" % go.name)
continue
go.sort(pvalue_key)
#go.reverse() # huh?
#print(go)
if ontology:
this_ont = go.getRowsByKey('ontology', ontology)
topN = this_ont[0:num_top]
else:
topN = go[0:num_top]
for item in topN:
if do_negative_log10:
if float(item[pvalue_key]) < p_value_limit:
if item['name'] not in go_store:
go_store[item['name']] = [-1] * (number_of_clusters)
go_store[item['name']][idx] = -math.log10(item['pvalue'])
else:
if float(item[pvalue_key]) > -math.log10(p_value_limit): # i.e. 0.01
if item['name'] not in go_store:
go_store[item['name']] = [-1] * (number_of_clusters)
go_store[item['name']][idx] = item['pvalue']
# fill in the holes:
for go in self.linearData:
for k in go_store:
this_k = go.get(key='name', value=k, mode='lazy') # by default
if this_k:
if do_negative_log10:
if float(item[pvalue_key]) < p_value_limit:
if item['name'] not in go_store:
go_store[item['name']] = [-1] * (number_of_clusters)
go_store[k][cond_names_idx[go.name]] = -math.log10(float(this_k[0]['pvalue']))
else:
if float(item[pvalue_key]) > -math.log10(p_value_limit): # i.e. 0.01
if item['name'] not in go_store:
go_store[item['name']] = [-1] * (number_of_clusters)
go_store[k][cond_names_idx[go.name]] = float(this_k[0]['pvalue'])
newe = []
for k in go_store:
newe.append({'name': k.replace("~", ":"), 'conditions': go_store[k]}) # REPAIR DAVID GO names
cond_names = sorted(zip(list(cond_names_idx.keys()), list(cond_names_idx.values())), key=itemgetter(1))
cond_names = [i[0] for i in cond_names]
goex = expression(loadable_list=newe, cond_names=cond_names)
if len(goex) == 0:
config.log.warning('GO list was empty, skipping')
return(False)
if heat_hei == 'proportional':
heat_hei=0.011*len(goex)
res = goex.heatmap(filename=filename, size=size, bracket=bracket,
row_cluster=row_cluster, col_cluster=col_cluster,
heat_wid=heat_wid, cmap=cmap, border=border,
row_font_size=row_font_size, heat_hei=heat_hei, grid=grid,
draw_numbers=draw_numbers, colbar_label='-log10(%s)' % pvalue_key,
draw_numbers_threshold = -math.log10(p_value_limit),
draw_numbers_fmt=draw_numbers_fmt,
draw_numbers_font_size=draw_numbers_font_size)
config.log.warning("GO_heatmap: Saved heatmap '%s'" % filename)
return(reversed(res["reordered_rows"]))
def measure_density(self, trks, peaks, norm_by_library_size=True, log=False,
read_extend=0, pointify=True, expand=1000,
**kargs):
"""
**Purpose**
get the seq tag density from the trks, and return as an expression object
**Arguments**
trks (Required)
a list of tracks/flats
peaks (Required)
a list of peaks, a genelist containing a 'loc' key
read_extend (Optional, default=200)
read extend the sequence tags in the tracks by xbp
norm_by_library_size (Optional, default=True)
normalise the result by the [library size/1e6]
log (Optional, default=False)
log transform the resulting matrix
pointify (Optional, default=True)
convert the genomic locations to the center of the peak
expand (Optional, default=500)
expand the left and right flanks of the genomic coordiantes by <expand> base pairs
Performed AFTER pointify
**Returns**
an expression object, with the conditions as the tag density from the tracks
"""
assert isinstance(trks, list), 'measure_density: trks must be a list'
assert 'loc' in list(peaks.keys()), 'measure_density: no loc key found in peaks'
all_trk_names = [t["name"] for t in trks]
assert len(set(all_trk_names)) == len(all_trk_names), 'track names are not unique. Please change the track.meta_data["name"] to unique names'
peaks = peaks.deepcopy()
if pointify:
peaks = peaks.pointify()
if expand:
peaks = peaks.expand('loc', expand)
peaks.sort('loc')
newl = []
curr_chrom = None
curr_data = None
curr_n = 0
for p in peaks:
p["conditions"] = [0.0 for t in trks]
all_chroms = len(set([i['chr'] for i in peaks['loc']])) * len(trks)
all_sizes = [t.get_total_num_reads() / 1e6 for t in trks]
for it, t in enumerate(trks):
pb = progressbar(all_chroms)
curr_chrom = None
for p in peaks:
p_loc = p['loc']
if p_loc['chr'] != curr_chrom:
del curr_data
curr_data = t.get_array_chromosome(p_loc['chr'], read_extend=read_extend) # this is a numpy array
curr_chrom = p_loc['chr']
pb.update(curr_n)
curr_n += 1
d = curr_data[p_loc['left']:p_loc['right']]
if len(d) == 0: # fell off edge of array
p["conditions"][it] = 0 # Need to put a value in here
continue
if norm_by_library_size:
p["conditions"][it] = numpy.average(d) / all_sizes[it]
else:
p["conditions"][it] = numpy.average(d)
expn = expression(loadable_list=peaks.linearData, cond_names=[t["name"] for t in trks])
if log:
expn.log(2, .1)
return(expn)
def measure_enrichment(self, trks, peaks, log=False,
read_extend=0, peak_window=200,local_lambda=5000,
**kargs):
"""
**Purpose**
get the seq tag enrichment from the trks,
and return as an expression object
**Arguments**
trks (Required)
a list of tracks/flats
peaks (Required)
a list of peaks, a genelist containing a 'loc' key
read_extend (Optional, default=200)
read extend the sequence tags in the tracks by xbp
log (Optional, default=False)
log transform the resulting matrix
peak_window (Optional, default=200)
window around the center of the peak to score the peak enrichment.
local_lambda (Optional, default=5000)
Number of base pairs around the peak to score the local lambda
**Returns**
an expression object, with the conditions as the tag density from the tracks
"""
assert isinstance(trks, list), 'measure_enrichment: trks must be a list'
assert 'loc' in list(peaks.keys()), 'measure_enrichment: no loc key found in peaks'
all_trk_names = [t["name"] for t in trks]
print(all_trk_names)
assert len(set(all_trk_names)) == len(all_trk_names), 'track names are not unique. Please change the track.meta_data["name"] to unique names'
peaks = peaks.deepcopy()
peaks.sort('loc')
newl = []
curr_chrom = None
curr_data = None
curr_n = 0
for p in peaks:
p["conditions"] = [0.0 for t in trks]
all_chroms = len(set([i['chr'] for i in peaks['loc']])) * len(trks)
all_sizes = [t.get_total_num_reads() / 1e6 for t in trks]
lambda_window = local_lambda
peak_window = peak_window
peak_window_half = peak_window //2
lambda_inner = lambda_window - peak_window_half
prog = progressbar(len(trks))
for it, t in enumerate(trks):
for p in peaks:
p_loc_chrom = 'chr{0}'.format(p['loc'].loc['chr'])
p_loc = (p['loc'].loc['left'] + p['loc'].loc['right']) // 2
p_loc_left = p_loc - peak_window_half
p_loc_rite = p_loc + peak_window_half
#all_data = f.get(loc=None, c=p_loc_chrom, left=p_loc, rite=p_loc)
all_data = t.mats[p_loc_chrom][p_loc-lambda_window:p_loc+lambda_window] # You can just reach in;
peak_data = all_data[lambda_inner:lambda_inner+peak_window]
# The above can fail, as peaks can come from dense data, and then be tested against a sparse flat
if len(peak_data) == 0:
continue
left_flank = all_data[0:lambda_inner]
rite_flank = all_data[lambda_inner+peak_window:]
len_lambda = len(left_flank) + len(rite_flank)
sum_lambda = float(left_flank.sum()) + float(rite_flank.sum()) # bug if pstdev kept as numpy numbers
lam = sum_lambda / len_lambda # mean_lambda
lam_std = max([0.001, left_flank.std(), rite_flank.std()]) # Bracket at 0.001
pea = max(peak_data) # should this be the max?
try:
if pea != 0:
#p["conditions"][it] = float(pea) / float(lam) # Otherwise numpy float64s
#p["conditions"][it] = lam_std / pea # CV
p["conditions"][it] = (pea-lam) / lam_std # Z
except ZeroDivisionError:
pass
#p["conditions"][it] = 100 # Don't append these, otherwise it distorts the results;
# I reason that if lam == 0 then there is something wrong with this locus;
prog.update(it)
expn = expression(loadable_list=peaks.linearData, cond_names=[t["name"] for t in trks])
if log:
expn.log(2, .1)
return(expn)
def redefine_peaks(self,
super_set_of_peaks,
list_of_flats,
filename = None,
Z_threshold = 1.2,
peak_window:int = 200,
lambda_window:int = 5000,
**kargs):
"""
**Purpose**
A strategy for re-calling peaks from some arbitrary set of flat files.
It's a bit similar to the strategy published in Li et al., Cell Stem Cell 2017.
However, this version uses a local lambda model drawn from a 10 kb window surrounding
the peak. This has the advantage that you don't need to go to the fiddly
trouble of generating a suitable pseudo-background for ATAC-seq. Additionally it
should take into account the relative background of the library, allowing extraction
of information even ion poor quality libraries. The disadvantage - and this
is theoretical - but if you have a lot of very large repeat elements then it
might lead to erroneous calling of not-peaks.
Breifly, the strategy is:
Peak calling is conservative on any single ChIP-seq library. To get better sensitivity
I pool information from other libraries by making a superset of peaks
(all possible peaks in some set of ChIP-seq) and then 're-calling' the peaks
in each library by modeling the enrichment. This allows me to rescue weak peaks.
It then builds a model, constructs a Z-score and then only keeps those peaks
that are greater than the threshold.
This allows you to rescue weak peaks, and also to clean the false-negatives
that are very common when cross-comparing peaks.
**Arguments**
super_set_of_peaks (Required)
A genelist, containing a 'loc' key.
This can be any list of peaks. I suggest you create this using the
glglob.chip_seq_cluster() function. However, you can also use bedtools
or your favourite grouping strategy.
I will pointify and expand the genomic locations to make them all uniform in size.
list_of_flats (Required)
a list of flats, make sure each flat has a unique 'name' slot as that
will be used to store the result.
For each flat, the peaks will be re-called, and returned as a key in the
returned dictionary.
filename (Optional)
the basefilename to save the model images to, one file for each flat.
Z_threshold (Optional, default=1.2)
The Z score cut off to call a peak or non-peak.
peak_window (Optional, default=200)
The window around the peak center, or summit to measure the peak enrichment.
lambda_window (Optional, default=5000)
The window around the
Note that the actual window will be from the peak_window to the extent:
left flank = -lambda_window <- lambda_window-peak_window
right flank = peak_window -> lambda_window-peak_window
i.e. with the default settings:
left flank peak right flank
|-----4900 bp-----|-200bp-|-----4900 bp-----|
**Returns**
A dictionary, in the form:
{flat[0]['name']: genelist(),
flat[0]['name']: genelist(),
...
flat[0]['name']: genelist()}
where each genelist contains a 'loc' key, and several new keys, one for each flat:
*_lam10 = the local lambda score
*_lam10std = the local lambda standard deviation
*_peak_score = the maximum peak height for this peak.
"""
assert isinstance(list_of_flats, list), 'list_of_flats must be a list'
assert False not in [i['name'] for i in list_of_flats], 'list_of_flats seems to not contain flats or flat-like objects'
assert len(set([i['name'] for i in list_of_flats])) == len(list_of_flats), 'the "name" slots of the flats are not unique'
assert 'loc' in super_set_of_peaks.keys(), 'super_set_of_peaks does not contain a "loc" (genomic location) key'
peak_window = peak_window
peak_window_half = peak_window //2
lambda_inner = lambda_window - peak_window_half
rets = {f['name']: [] for f in list_of_flats}
super_set_of_peaks = super_set_of_peaks.pointify().expand('loc', peak_window) # Make peaks symmetric
super_set_of_peaks = [p['loc'].loc for p in super_set_of_peaks]
# First I estimate the local background
for f in list_of_flats:
sam_name = f['name'].replace('.flat', '')
config.log.info('Doing {0}'.format(sam_name))
this_chrom = None
this_data = None
prog = progressbar(len(super_set_of_peaks))
for i, p in enumerate(super_set_of_peaks):
p_loc_chrom = 'chr{0}'.format(p['chr'])
p_loc = (p['left'] + p['right']) // 2
p_loc_left = p_loc - peak_window_half
p_loc_rite = p_loc + peak_window_half
#all_data = f.get(loc=None, c=p_loc_chrom, left=p_loc, rite=p_loc)
all_data = f.mats[p_loc_chrom][p_loc-lambda_window:p_loc+lambda_window] # You can just reach in;
peak_data = all_data[lambda_inner:lambda_inner+peak_window]
# The above can fail, as peaks can come from dense data, and then be tested against a sparse flat
if len(peak_data) == 0:
p['peak_score'] = 0 # fill the entries in, with 0 due to missing data in the array.
p['lam10'] = 0
p['lam10std'] = 0
continue
left_flank = all_data[0:lambda_inner]
rite_flank = all_data[lambda_inner+peak_window:]
len_lambda = len(left_flank) + len(rite_flank)
sum_lambda = float(left_flank.sum()) + float(rite_flank.sum()) # bug if pstdev kept as numpy numbers
p['lam10'] = sum_lambda / len_lambda # mean_lambda
p['lam10std'] = max([0.001, left_flank.std(), rite_flank.std()]) # Bracket at 0.001
p['peak_score'] = max(peak_data) # should this be the max?
prog.update(i)
lam10 = [p['lam10'] for p in super_set_of_peaks]
avg = mean(lam10)
std = pstdev(lam10)
config.log.info('Average background: %.3f' % avg)
config.log.info('Average STDev: %.3f' % std)
thresh = avg + (std * Z_threshold)
config.log.info('Guessed threshold value of {1:.2f} (For a Z of {0:.1f})'.format(Z_threshold, thresh))
# Plot the histogram:
if filename:
fig = self.draw.getfigure(**kargs)
ax = fig.add_subplot(111)
ax.hist(lam10, bins=50, range=[0,50], histtype='step', label='Background')
ax.hist([p['peak_score'] for p in super_set_of_peaks], bins=50, range=[0,50], histtype='step', label='Peaks')
ax.axvline(avg, ls=':', color='red')
ax.axvline(avg+(std * Z_threshold), ls=':', color='green')
ax.legend()
self.draw.savefigure(fig, '{0}_{1}.png'.format(filename, sam_name))
# redefine peaks:
prog = progressbar(len(super_set_of_peaks))
for i, p in enumerate(super_set_of_peaks):
# First, filter on the global Z:
if p['peak_score'] > thresh:
# Then filter on the localz:
z_score = (p['lam10'] + (p['lam10std']*Z_threshold))
if p['peak_score'] > z_score:
p_add = {'loc': location(chr=p['chr'], left=p['left'], right=p['right'])}
p_add['lambda_mean'] = p['lam10']
p_add['lambda_std'] = p['lam10std']
p_add['peak_score'] = p['peak_score']
try:
p_add['Z-score'] = ((p['peak_score'] - p['lam10']) / p['lam10std'])
except ZeroDivisionError:
p_add['Z-score'] = 100
rets[f['name']].append(p_add)
prog.update(i)
for f in rets:
bed = genelist()
bed.load_list(rets[f])
bed.name = f
rets[f] = bed
config.log.info('New peak lengths:')
for f in rets:
config.log.info(' %s: %s peaks' % (f, len(rets[f])))
return(rets)
def chip_seq_heatmap(self,
list_of_peaks,
list_of_trks,
filename:str = None,
norm_by_library_size = False,
bins:int = 100,
pileup_distance:int = 1000,
cache_data = False,
bracket = None,
range_bracket = None,
frames = True,
row_labels = None,
col_labels = None,
read_extend:int = 200,
imshow:bool = True,
cmap = cm.plasma,
log_pad = None,
log = 2,
per_column_bracket:bool = False,
sort_by_sum_intensity:bool = False,
sort_by_intensity:bool = False,
size = None,
respect_strand:bool = False,
**kargs):
"""
**Purpose**
Draw heatmaps for the indicated list of peaks.
peaks will be piled up on top of each other in proportional blocks (separated by a line).
pileups will be plotted above, and each
**Arguments**
list_of_peaks (Required)
A list of genelists of peaks from your ChIP-seq data to interrogate.
The peaks will be stacked from bottom to top
list_of_trks (Required)
A list of trks to draw the sequence tag reads from to build the pileups.
filename (Optional, default=None)
If set to a string a heatmap & pileup will be saved to filename.
norm_by_library_size (Optional, default=False)
Normalize the read pileup data within each library to the size of the
library to assist in cross-comparison of ChIP-seq libraries.
pileup_distance (Optional, default=1000)
distance around the particular location to draw in the pileup.
read_extend (Optional, default=200)
The size in base pairs to extend the read. If a strand is present it will expand
from the 3' end of the read.
row_labels (Optional, default= from the peak.name)
row labels for the heatmaps;
col_labels (Optional, default=from the trk['name'])
column labels
bins (Optional, default=100)
number of bins to use for the pileup. Best to use conservative numbers (30-200) as
large numbers of bins can consume huge amounts of memory.
log (Optional, default=2)
Use logarithms for the heatmap. Possible options are 2 and 10.
cmap (Optional, default=matplotlib.cm.plasma)
A colour map for the heatmap.
sort_by_sum_intensity (Optional, default=False)
False: USe the order that the peaks arrived in
True: Sort by the sums of intensities across all columns. (Sorting is before bracket)
Note, exclusive with sort_by_intensity
sort_by_intensity (Optional, defualt=False)
False: No sorting
True: Sort by the sum of each row, but independently for each heatmap;
Note, exclusive with sort_by_sum_intensity
respect_strand (Optional, default=False)
If available, respect the orientation of the strand from the genelist.
This is useful if you are, say, using the TSS's and want to maintain the
orientation with respect to the transcription direction.
######## Bracket system:
There area bunch of args here.
per_column_bracket sets a bracket for each column (track) in the heatmaps. If this is True,
you should use the range_bracket system.
If per_column_bracket is False, then you can use the range_bracket system (recommended),
but you cna also set your own bracket with the bracket option.
per_column_bracket (Optional, default=False)
Have a bracket for each column, or (when False) a single bracket for all of the data
range_bracket (Optional, default=[0.4, 0.9], exclusive with range_bracket)
chip_seq_heatmap can have column-wise (track-wise) specific brackets. So, only range_bracket is valid
Okay, hold your hats, this is complicated.
range_bracket will bracket the range of values as a fraction between [min(data), max(data)]
i.e. If range_bracket=0.5 (the default) then the data is bracketed as:
[max(data)*range_bracket[0], max(data)*range_bracket[1]]. The practical upshot of this is it allows you to shift
the colour bracketing on the heatmap around without having to spend a long time finding
a suitable bracket value.
Bracketing is performed AFTER log.
Typical bracketing would be something like [0.4, 0.9]
By default glbase attempts to guess the best range to draw based on the
median and the stdev. It may not always succeed.
cache_data (Optional, default=False)
cache the pileup data into the file specified in cache_data. This speeds up reanalysis.
Note that storage of data is AFTER normalisation, resolution, pileup_distance,
bins, but before heatmap drawing.
This allows you to store the slow part of chip_seq_heatmap()
and so iterate through different heatmap drawing options without having to
do the whole pileup again.
note that if cache_data file does not exist then it will be created and
pileup data generated. If the file does exist, data will be read from that
file and used for heatmap drawing.
frames (Optional, default=True)
Draw black frames around the heatmaps and category maps.
imshow (Optional, default=False)
Embed the heatmap as an image inside a vector file. (Uses matplotlib imshow
to draw the heatmap part of the figure. Allows very large matrices to
be saved as a reasonably sized svg/pdf, with the heatmap part as a raster image
and all other elements as vectors).
**Returns**
Returns None
"""
assert not (range_bracket and bracket), "You can't use both bracket and range_bracket"
assert False not in ['loc' in gl.keys() for gl in list_of_peaks], 'At least one of your peak data (list_of_peaks) does not contain a "loc" key'
assert not (sort_by_sum_intensity and sort_by_intensity), 'sort_by_sum_intensity and sort_by_intensity cannot both be True'
if 'normalize' in kargs: raise AssertionError('normalize has been deprecated, use norm_by_library_size')
total_rows = 0
# Get the size of each library if we need to normalize the data.
if norm_by_library_size:
# get and store the read_counts for each library to reduce an sqlite hit.
read_totals = [trk.get_total_num_reads()/float(1e6) for trk in list_of_trks]
if True in [i <= 0 for i in read_totals]:
raise AssertionError('norm_by_read_count=True, but at least one flat_track has no total number of reads')
# I will need to go back through the chr_blocks data and add in the pileup data:
bin_size = int((pileup_distance+pileup_distance) / bins)
block_len = pileup_distance+pileup_distance # get the block size
data = None
# Populate the datastores:
matrix = {}
pileup = {}
for tindex, _ in enumerate(list_of_trks):
matrix[tindex] = {} # Populate the final matrix
pileup[tindex] = {} # Populate the final matrix
for plidx, peaklist in enumerate(list_of_peaks):
matrix[tindex][plidx] = {} # Populate the final matrix
pileup[tindex][plidx] = {} # Populate the final matrix
for pindex, peak in enumerate(peaklist):
matrix[tindex][plidx][pindex] = None
pileup[tindex][plidx][pindex] = None
# Populate the order data so I can use the chromosome cache system;
porder = {}
for plidx, peaklist in enumerate(list_of_peaks):
porder[plidx] = {} # make an index hitter so that order is preserved:
for pindex, peak in enumerate(peaklist):
p_loc_chrom = peak['loc']['chr']
if p_loc_chrom not in porder[plidx]:
porder[plidx][p_loc_chrom] = []
porder[plidx][p_loc_chrom].append(pindex)
# sort out cached_data
if cache_data and os.path.isfile(cache_data): # reload previously cached data.
oh = open(os.path.realpath(cache_data), "rb")
matrix = pickle.load(oh)
oh.close()
config.log.info("chip_seq_heatmap: Reloaded previously cached pileup data: '%s'" % cache_data)
# sanity check the matrix data
assert isinstance(matrix, dict), '{} does not match the expected data, suggest you rebuild'.format(cache_data)
assert isinstance(matrix[0], dict), '{} does not match the expected data, suggest you rebuild'.format(cache_data)
assert isinstance(matrix[0][0], (numpy.ndarray, numpy.generic)), '{0} does not match the expected data, suggest you rebuild'.format(cache_data)
assert len(matrix) == len(list_of_trks), '{} does not match the expected data, suggest you rebuild'.format(cache_data)
assert len(matrix[0]) == len(list_of_peaks), '{} does not match the expected data, suggest you rebuild'.format(cache_data)
for it, t in enumerate(list_of_trks):
for ip, p in enumerate(list_of_peaks):
assert matrix[it][ip].shape == (len(p), bins), '{} ({} {}) does not match the expected data, suggest you rebuild'.format(cache_data, t['name'], p.name)
else:
# No cached data, so we have to collect ourselves.
expected_len = pileup_distance * 2
config.log.info('chip_seq_heatmap: Collecting pileup data...')
p = progressbar(len(list_of_trks))
# New version that grabs all data and does the calcs in memory, uses more memory but ~2-3x faster
for tindex, trk in enumerate(list_of_trks):
for plidx, peaklist in enumerate(list_of_peaks):
for chrom in porder[plidx]:
# The chr_blocks iterates across all chromosomes, so this only hits the db once per chromosome:
data = trk.get_array_chromosome(chrom, read_extend=read_extend) # This will use the fast cache version if available.
for pidx, peak in enumerate(porder[plidx][chrom]): # peak is the index to look into
left = peaklist.linearData[peak]['loc']['left']
rite = peaklist.linearData[peak]['loc']['right']
cpt = (left + rite) // 2
left = cpt - pileup_distance
rite = cpt + pileup_distance
# It's possible to ask for data beyond the edge of the actual data. truncate...
if rite > len(data):
rite = len(data)
if left > len(data):
left = len(data)
dd = data[left:rite]
if len(dd) < block_len: # This should be a very rare case...
config.log.warning('Block miss (short)')
num_missing = block_len - len(dd)
ad = numpy.zeros(num_missing, dtype=numpy.float32)
dd = numpy.append(dd, ad)
elif len(dd) > block_len: # Should be never?
config.log.warning('Block miss (long)')
num_missing = block_len - len(dd)
dd = dd[0:block_len] # just grab the start. probably unreliable though?
if respect_strand:
if peaklist.linearData[peak]["strand"] in negative_strand_labels: # For the reverse strand all I have to do is flip the array.
dd = dd[::-1]
# Fill in the matrix table:
#pileup[tindex][plidx][pidx] += pil_data
matrix[tindex][plidx][peak] = [sum(dd[i:i+bin_size]) for i in range(0, len(dd), bin_size)]
p.update(tindex)
# convert to numpy arrays;
for tindex, _ in enumerate(list_of_trks):
for plidx, peaklist in enumerate(list_of_peaks):
twoD_list = []
for pindex, _ in enumerate(peaklist): # preserve original order;
#print(len(matrix[tindex][plidx][pindex]))
twoD_list.append(matrix[tindex][plidx][pindex])
#print(matrix[tindex][plidx][pindex])
#print(twoD_list)
matrix[tindex][plidx] = numpy.array(twoD_list)
#print(matrix[tindex][plidx])
#print(matrix[tindex][plidx].shape)
if cache_data: # store the generated data for later.
oh = open(cache_data, "wb")
pickle.dump(matrix, oh, -1)
oh.close()
config.log.info("chip_seq_heatmap: Saved pileup data to cache file: '{0}'".format(cache_data))
colbar_label = "Tag density"
if log:
if not log_pad:
log_pad = 0.1
if not range_bracket: # suggest reasonable range;
range_bracket = [0.6, 0.9]
for tindex, _ in enumerate(list_of_trks):
for plidx, peaklist in enumerate(list_of_peaks):
if log == 2:
matrix[tindex][plidx] = numpy.log2(matrix[tindex][plidx]+log_pad)
colbar_label = "Log2(Tag density)"
elif log == 10:
matrix[tindex][plidx] = numpy.log10(matrix[tindex][plidx]+log_pad)
colbar_label = "Log10(Tag density)"
else:
raise AssertionError('log={} not found'.format(log))
else:
if not range_bracket: # suggest reasonable range;
range_bracket = [0.02, 0.1]
if norm_by_library_size:
colbar_label = "Normalised {}".format(colbar_label)
# Data is always saved unnormalised;
for plidx, peaklist in enumerate(list_of_peaks):
for tindex, _ in enumerate(list_of_trks):
matrix[tindex][plidx] /= read_totals[tindex]
if sort_by_sum_intensity:# i.e. sort for all columns and preserve row order
for plidx, peaklist in enumerate(list_of_peaks):
new_order = []
heat_sums = None
for tindex, _ in enumerate(list_of_trks):
s = numpy.sum(matrix[tindex][plidx], axis=1)
if heat_sums is None:
heat_sums = s
else:
heat_sums += s
d = [(i, v) for i, v in enumerate(heat_sums)]
d = sorted(d, key=itemgetter(1))
new_order = list([i[0] for i in d])
for tindex, _ in enumerate(list_of_trks):
matrix[tindex][plidx] = matrix[tindex][plidx][new_order,:]
elif sort_by_intensity:# i.e. each heatmap is independent
for plidx, peaklist in enumerate(list_of_peaks):
new_order = []
heat_sums = None
for tindex, _ in enumerate(list_of_trks):
heat_sums = numpy.sum(matrix[tindex][plidx], axis=1)
d = [(i, v) for i, v in enumerate(heat_sums)]
d = sorted(d, key=itemgetter(1))
new_order = list([i[0] for i in d])
matrix[tindex][plidx] = matrix[tindex][plidx][new_order,:]
if not row_labels:
row_labels = [p.name for p in list_of_peaks]
if not col_labels:
col_labels = [t['name'] for t in list_of_trks]
brackets = None
if per_column_bracket:
bracket = None
# Suggest brackets:
t_stats = []
brackets = []
for tindex, _ in enumerate(list_of_trks): # I can have track-wise brackets;
for plidx, peaklist in enumerate(list_of_peaks):
tab_max = max([tab.max() for tab in matrix[tindex][plidx]]) # need to get new tab_max for log'd values.
tab_min = min([tab.min() for tab in matrix[tindex][plidx]])
#tab_median = numpy.median([numpy.median(tab) for tab in list_of_tables])
tab_mean = mean([numpy.average(tab) for tab in matrix[tindex][plidx]])
tab_std = numpy.std(numpy.array([tab for tab in matrix[tindex][plidx]]))
t_stats.append((tab_max, tab_min, tab_mean, tab_std))
config.log.info('chip_seq_heatmap: trk={0} min={1:.2f}, max={2:.2f}, mean={3:.2f}, stdev={4:.2f}'.format(list_of_trks[tindex]['name'], tab_min, tab_max, tab_mean, tab_std))
tab_range = tab_max - tab_min
top = tab_min + tab_range*range_bracket[0]
bot = tab_min + tab_range*range_bracket[1]
brackets.append([top, bot])
config.log.info("chip_seq_heatmap: trk={0}, suggested bracket=({1:.2f}, {2:.2f})".format(list_of_trks[tindex]['name'], brackets[tindex][0], brackets[tindex][1]))
else:
# Suggest brackets:
if bracket:
pass # USe the arg
else: # Guess a bracket for all heatmaps;
t_stats = []
brackets = []
for tindex, _ in enumerate(list_of_trks): # I can have track-wise brackets;
for plidx, peaklist in enumerate(list_of_peaks):
tab_max = max([tab.max() for tab in matrix[tindex][plidx]]) # need to get new tab_max for log'd values.
tab_min = min([tab.min() for tab in matrix[tindex][plidx]])
#tab_median = numpy.median([numpy.median(tab) for tab in list_of_tables])
tab_mean = mean([numpy.average(tab) for tab in matrix[tindex][plidx]])
tab_std = numpy.std(numpy.array([tab for tab in matrix[tindex][plidx]]))
t_stats.append((tab_max, tab_min, tab_mean, tab_std))
config.log.info('chip_seq_heatmap: trk={0} min={1:.2f}, max={2:.2f}, mean={3:.2f}, stdev={4:.2f}'.format(list_of_trks[tindex]['name'], tab_min, tab_max, tab_mean, tab_std))
tab_range = tab_max -tab_min
top = tab_min + tab_range*range_bracket[0]
bot = tab_min + tab_range*range_bracket[1]
brackets.append([top, bot])
bracket = [max([b[0] for b in brackets]), max([b[1] for b in brackets])]
brackets = None
config.log.info("chip_seq_heatmap: suggested bracket=({:.2f}, {:.2f})".format(bracket[0], bracket[1]))
if filename:
real_filename = self.draw.grid_heatmap(
data_dict_grid=matrix,
filename=filename,
row_labels=row_labels,
col_labels=col_labels,
colbar_label=colbar_label,
imshow=imshow,
size=size,
colour_map=cmap,
bracket=bracket,
brackets=brackets,
frames=frames,
dpi=300,
)
config.log.info("chip_seq_heatmap: Saved overlap heatmap to '{0}'".format(real_filename))
return None
def hic_correlate(self,
list_of_hiccys:list,
filename:str = None,
optimal_ordering=True,
bracket=[0.0, 1.0],
aspect="square",
**kargs):
'''
**Purpose**
Get the correlation between hiccy 2D matrix arrays.
**Arguments**
list_of_hiccys (required)
list of hiccy objects to perform correlations agains. Labels
for the rows/columns are taken from the hiccy['name'] slot
filename (Required)
filename to save the correlation heatmap to
**Returns**
A dictionary containing he Pearson R scores and the labels, in the same
order as the heatmap;
'''
assert list_of_hiccys, 'need a list_of_hiccys'
assert isinstance(list_of_hiccys, list), 'list_of_hiccys must be a list'
assert filename, 'Need a filename to save the correlation heatmap to'
mode = 'Pearson R' # only 1 mode
num_hiccys = len(list_of_hiccys)
result = numpy.zeros((num_hiccys, num_hiccys))
p = progressbar(num_hiccys)
for idx1, h1 in enumerate(list_of_hiccys):
for idx2, h2 in enumerate(list_of_hiccys):
if idx1 <= idx2:
if idx1 == idx2: # fill i nthe diagonals
result[idx1, idx2] = 1.0
continue
rs = []
for chrom in h1.all_chrom_names: # hope they match!
m1 = numpy.array(h1.mats[chrom]).flatten()
m2 = numpy.array(h2.mats[chrom]).flatten()
rs.append(pearsonr(m1, m2)[0]) # scipy.stats
r = numpy.mean(rs)
result[idx1, idx2] = r
result[idx2, idx1] = r
p.update(idx1)
labels = [i['name'] for i in list_of_hiccys]
square = True
if "heat_hei" in kargs or "heat_wid" in kargs:
square=False
results = self.draw.heatmap(filename=filename, data=result, square=square,
bracket=bracket, row_names=labels, aspect=aspect, col_names=labels,
colbar_label="Correlation (%s)" % mode, optimal_ordering=optimal_ordering,
**kargs)
config.log.info("hic_correlate: Saved '%s'" % results["real_filename"])
return {"pearsonr": results["reordered_data"], "labels": results["reordered_cols"]}
| StarcoderdataPython |
3316364 | """
Visualization functions for different classifiers.
Contains plots for decision boundaries.
"""
import numpy as np
import scipy.stats as st
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
def plotc(parameters, ax=[], color='k', gridsize=(101, 101)):
"""
Plot a linear classifier in a 2D scatterplot.
INPUT (1) tuple 'parameters': consists of a list of class proportions
(1 by K classes), an array of class means (K classes by
D features), an array of class-covariance matrices (D features
by D features by K classes)
(2) object 'ax': axes of a pyplot figure or subject (def: empty)
(3) str 'colors': colors of the contours in the plot (def: 'k')
(4) tuple 'gridsize': number of points in the grid
(def: (101, 101))
OUTPUT None
"""
# Check for figure object
if fig:
ax = fig.gca()
else:
fig, ax = plt.subplots()
# Get axes limits
xl = ax.get_xlim()
yl = ax.get_ylim()
# Define grid
gx = np.linspace(xl[0], xl[1], gridsize[0])
gy = np.linspace(yl[0], yl[1], gridsize[1])
x, y = np.meshgrid(gx, gy)
xy = np.vstack((x.ravel(), y.ravel())).T
# Values of grid
z = np.dot(xy, parameters[:-1, :]) + parameters[-1, :]
z = np.reshape(z[:, 0] - z[:, 1], gridsize)
# Plot grid
ax.contour(x, y, z, levels=0, colors=colors)
def plotlda(parameters, ax=[], colors='k', gridsize=(101, 101)):
"""
Plot a linear discriminant analysis classifier in a 2D scatterplot.
INPUT (1) tuple 'parameters': consists of a list of class proportions
(1 by K classes), an array of class means (K classes by
D features), an array of class-covariance matrices (D features
by D features by K classes)
(2) object 'ax': axes of a pyplot figure or subject (def: empty)
(3) str 'colors': colors of the contours in the plot (def: 'k')
(4) tuple 'gridsize': number of points in the grid
(def: (101, 101))
OUTPUT None
"""
# Unpack parameters
pi, mu, Si = parameters
# Number of classes
K = mu.shape[0]
# Sum class-covariance matrices
if len(Si) == 3:
Si = np.sum(Si, axis=2)
# Check for figure object
if not ax:
fig, ax = plt.subplots()
# Get axes limits
xl = ax.get_xlim()
yl = ax.get_ylim()
# Define grid
gx = np.linspace(xl[0], xl[1], gridsize[0])
gy = np.linspace(yl[0], yl[1], gridsize[1])
x, y = np.meshgrid(gx, gy)
xy = np.stack((x, y), axis=2)
# Generate pdf's
z = np.zeros((gridsize[0], gridsize[1], K))
for k in range(K):
z[:, :, k] = st.multivariate_normal(mean=mu[k, :], cov=Si).pdf(xy)
# Difference of Gaussians
dz = 10*(z[:, :, 1] - z[:, :, 0])
# Plot grid
ax.contour(x, y, dz, colors=colors)
| StarcoderdataPython |
108110 | """Test Service EHA client"""
# -*- coding: utf-8 -*-
import pytest
import allure
@pytest.fixture()
def maket3_test_5_con1(test_server_5_1, check_side_mea809, data_maket_mea809, ):
return test_server_5_1(data_maket_mea809, data_maket_mea809["server_port1"], "test_5")
@allure.step("Test connect_from EHA_port1")
def test_5_con1(maket3_test_5_con1):
"""Test Service connect client EHA to test server for port1"""
d = maket3_test_5_con1
def check_connect(status):
stat, desc = status
with pytest.allure.step("Result test"):
allure.attach("Show statistics data from test",\
"\n{}\nInfo: {}".format(stat, desc))
assert stat == None, desc
print("\ncheck_connect test status: {}\nInfo: {}".format(stat, desc))
d.addCallback(check_connect)
return d
| StarcoderdataPython |
3240627 | <reponame>braemt/attentive-multi-task-deep-reinforcement-learning<gh_stars>10-100
# Copyright 2017 the pycolab Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A scrolling maze to explore. Collect all of the coins!
Command-line usage: `scrolly_maze.py <level>`, where `<level>` is an optional
integer argument selecting Scrolly Maze levels 0, 1, or 2.
Keys: up, down, left, right - move. q - quit.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import curses
import sys
from gym_pycolab import ascii_art
from gym_pycolab import human_ui
from gym_pycolab.prefab_parts import drapes as prefab_drapes
from gym_pycolab.prefab_parts import sprites as prefab_sprites
# pylint: disable=line-too-long
MAZES_ART = [
# Each maze in MAZES_ART must have exactly one of the patroller sprites
# 'a', 'b', and 'c'. I guess if you really don't want them in your maze, you
# can always put them down in an unreachable part of the map or something.
#
# Make sure that the Player will have no way to "escape" the maze.
#
# Legend:
# '#': impassable walls. 'a': patroller A.
# '@': collectable coins. 'b': patroller B.
# 'P': player starting location. 'c': patroller C.
# ' ': boring old maze floor. '+': initial board top-left corner.
#
# Don't forget to specify the initial board scrolling position with '+', and
# take care that it won't cause the board to extend beyond the maze.
# Remember also to update the MAZES_WHAT_LIES_BENEATH array whenever you add
# a new maze.
# Maze #0:
['#########################################################################################',
'# # # # # # @ @ @ @ # @ @ @ #',
'# # ##### ##### # # ##### # # ##### ############# # @ ######### #',
'# @ # # # # # # # # # @ # @ @ @ #',
'# ##### ##### ######### ################# ##### # # # #################',
'# # # @ @ # # # # # # #',
'# @ # # # @ ######### ##### # # # ######### ##### # ############# #',
'# # # # @ # @ @ # # # # # # # # # #',
'# # ############# ##### ######### # ##### ##### ##### # # #########',
'# @ # @ @ @ # # # # @ # # # a # #',
'# ##### ##### # @ # ##### # # ############# # ##################### #',
'# # @ @ # # # # # # @ @ # # # @ @ @ @ # #',
'# @ # ##### # @ # ##### ##### ######### # ##### ##### ######### #####',
'# # # # @ # # # # @ @ # # # # @ #',
'# # @ # # ######### ##### ######### ############################# ##### @ #',
'# @ # # # # # # # # # # @ # #',
'# # # # # # ################# # @ # ##### # ######### ##### # #',
'# @ # # # # # # # # # # # @ # @ #',
'######### ############# # ##### # # ##### # ######### # # ##### #',
'# # # # # # # # @ # # # # @ # @ #',
'# # ############# # ###+##### # # # ######### # # # # ##### @ #',
'# # # # b # # # # # # # @ # #',
'# ######### # ######### # # ##### # # ##### ##### # ##### # #',
'# # # @ # # P # # # # # # @ # @ #',
'# # # @ ##################################### # ##################### # # #',
'# # # @ # @ # # # # # @ #',
'# # ######### @ # # # # ################# ######### ######### #########',
'# # # # @ # @ # # # # # # #',
'# # ##### ############# ######### ##### ################# # # ##### #',
'# # # # # # # # # # # #',
'# ##### ############# ##### # ##### ##### ##### # ############# # #',
'# # # # # # # # # # #',
'##### # ######### ##### ######### ############# # ######### # #########',
'# # # @ # # # # # # # #',
'# ############# ##### # ##### # # ##### # ##### # # ##### # #',
'# # @ # @ # # # # # # # # #',
'##### # ######### ######### ######### ##################################### #',
'# # # @ # @ # @ @ # # @ @ @ @ # @ # @ @ # #',
'# ##### @ # ##### # ##### ############# ######### # # @ # # ##### #',
'# # # @ @ # @ @ # # @ # @ # # @ # @ # #',
'# # ##### ################# # # # ##### # # ################# #####',
'# # # @ @ # @ # # # # @ # # # # #',
'# ##### ######### # # # ##### ##### ######### # # ############# #',
'# # @ # # # c #',
'#########################################################################################'],
# Maze #1
['##############################',
'# #',
'# @ @ @ @ @ @ #',
'# @ @ @ @ @ @ #',
'# @ @ @ @ @ @ #',
'# @ @ @ @ @ @ #',
'# @ @ @ @ @ @ #',
'# @ @ @ @ @ @ #',
'# #',
'######### a #########',
'########## b ##########',
'# #',
'# @ @ @ @ @ @ #',
'# @ @ @ @ @ @ #',
'# @ @ @ @ @ @ #',
'# @ @ @ @ @ @ #',
'# @ @ @ @ @ @ #',
'# @ @ @ @ @ @ #',
'# #',
'+###### c #######',
'# #',
'# @ @ @ @ @ @ #',
'# @ @ @ @ @ @ #',
'# @ @ @ @ @ @ #',
'# @ @ @ @ @ @ #',
'# @ @ @ @ @ @ #',
'# @ @ @ @ @ @ #',
'# P #',
'##############################'],
# Maze #2
[' + ',
' ################################################################################### ',
' # @ @ @ @ @ @ @ @ @ @ P # ',
' # ########################################################################### # ',
' # @ # # # ',
' # # # # ',
' # @ # ###################################################### # ',
' # # # # ',
' # @ # # ###################################################### ',
' # # # # ',
' # @ # # # ',
' # # # ###################################################### ',
' # @ # # # ',
' # # ###################################################### # ',
' # @ # # # ',
' # # # # ',
' # @ # ############################## # ',
' # # ## # # ',
' # @ # # @@@@@ ######### # # ',
' # # # @@@@@@@@@@@ # # # # ',
' # @ ########### ##@@@@@@@@@@@@@@@@@## # # # ',
' # # @ @ @ # ##@@@@@@@@@@@@@@@@@@@## # # # ',
' # @ # a # ##@@@@@@@@@@@@@@@@@@@@@## # # # ',
' # # b # ##@@@@@@@@@@@@@@@@@@@@@@@## # # # ',
' # @ # c # ##@@@@@@@@@@@@@@@@@@@@@@@## # # # ',
' # ####### # ##@@@@@@@@@@@@@@@@@@@@@## # # # ',
' # @ @ @ # ##@@@@@@@@@@@@@@@@@@@## # # ',
' ############### ##################### ######### ',
' '],
]
# pylint: enable=line-too-long
MAZES_WHAT_LIES_BENEATH = [
# What lies below '+' characters in MAZES_ART?
# Unlike the what_lies_beneath argument to ascii_art_to_game, only single
# characters are supported here for the time being.
'#', # Maze #0
'#', # Maze #1
' ', # Maze #2
]
STAR_ART = [' . . . ',
' . . . ',
' . . .',
' . . . . ',
'. . . . .',
' . . .',
' . . ',
' . . . ',
' . . . ',
' . . . . ']
# These colours are only for humans to see in the CursesUi.
COLOUR_FG = {' ': (0, 0, 0), # Inky blackness of SPAAAACE
'.': (949, 929, 999), # These stars are full of lithium
'@': (999, 862, 110), # Shimmering golden coins
'#': (764, 0, 999), # Walls of the SPACE MAZE
'P': (0, 999, 999), # This is you, the player
'a': (999, 0, 780), # Patroller A
'b': (145, 987, 341), # Patroller B
'c': (987, 623, 145)} # Patroller C
COLOUR_BG = {'.': (0, 0, 0), # Around the stars, inky blackness etc.
'@': (0, 0, 0)}
def make_game(level):
"""Builds and returns a Scrolly Maze game for the selected level."""
# A helper object that helps us with Scrolly-related setup paperwork.
scrolly_info = prefab_drapes.Scrolly.PatternInfo(
MAZES_ART[level], STAR_ART,
board_northwest_corner_mark='+',
what_lies_beneath=MAZES_WHAT_LIES_BENEATH[level])
walls_kwargs = scrolly_info.kwargs('#')
coins_kwargs = scrolly_info.kwargs('@')
player_position = scrolly_info.virtual_position('P')
patroller_a_position = scrolly_info.virtual_position('a')
patroller_b_position = scrolly_info.virtual_position('b')
patroller_c_position = scrolly_info.virtual_position('c')
return ascii_art.ascii_art_to_game(
STAR_ART, what_lies_beneath=' ',
sprites={
'P': ascii_art.Partial(PlayerSprite, player_position),
'a': ascii_art.Partial(PatrollerSprite, patroller_a_position),
'b': ascii_art.Partial(PatrollerSprite, patroller_b_position),
'c': ascii_art.Partial(PatrollerSprite, patroller_c_position)},
drapes={
'#': ascii_art.Partial(MazeDrape, **walls_kwargs),
'@': ascii_art.Partial(CashDrape, **coins_kwargs)},
# The base Backdrop class will do for a backdrop that just sits there.
# In accordance with best practices, the one egocentric MazeWalker (the
# player) is in a separate and later update group from all of the
# pycolab entities that control non-traversable characters.
update_schedule=[['#'], ['a', 'b', 'c', 'P'], ['@']],
z_order='abc@#P')
class PlayerSprite(prefab_sprites.MazeWalker):
"""A `Sprite` for our player, the maze explorer.
This egocentric `Sprite` requires no logic beyond tying actions to
`MazeWalker` motion action helper methods, which keep the player from walking
on top of obstacles.
"""
def __init__(self, corner, position, character, virtual_position):
"""Constructor: player is egocentric and can't walk through walls."""
super(PlayerSprite, self).__init__(
corner, position, character, egocentric_scroller=True, impassable='#')
self._teleport(virtual_position)
def update(self, actions, board, layers, backdrop, things, the_plot):
del backdrop, things, layers # Unused
if actions == 0: # go upward?
self._north(board, the_plot)
elif actions == 1: # go downward?
self._south(board, the_plot)
elif actions == 2: # go leftward?
self._west(board, the_plot)
elif actions == 3: # go rightward?
self._east(board, the_plot)
elif actions == 4: # do nothing?
self._stay(board, the_plot)
class PatrollerSprite(prefab_sprites.MazeWalker):
"""Wanders back and forth horizontally, killing the player on contact."""
def __init__(self, corner, position, character, virtual_position):
"""Constructor: changes virtual position to match the argument."""
super(PatrollerSprite, self).__init__(corner, position, character, '#')
self._teleport(virtual_position)
# Choose our initial direction based on our character value.
self._moving_east = bool(ord(character) % 2)
def update(self, actions, board, layers, backdrop, things, the_plot):
del actions, layers, backdrop # Unused.
# We only move once every two game iterations.
if the_plot.frame % 2:
self._stay(board, the_plot)
return
# MazeWalker would make certain that we don't run into a wall, but only
# if the sprite and the wall are visible on the game board. So, we have to
# look after this ourselves in the general case.
pattern_row, pattern_col = things['#'].pattern_position_prescroll(
self.virtual_position, the_plot)
next_to_wall = things['#'].whole_pattern[
pattern_row, pattern_col + (1 if self._moving_east else -1)]
if next_to_wall: self._moving_east = not self._moving_east
# Make our move. If we're now in the same cell as the player, it's instant
# game over!
(self._east if self._moving_east else self._west)(board, the_plot)
if self.virtual_position == things['P'].virtual_position:
the_plot.terminate_episode()
class MazeDrape(prefab_drapes.Scrolly):
"""A scrolling `Drape` handling the maze scenery.
This `Drape` requires no logic beyond tying actions to `Scrolly` motion
action helper methods. Our job as programmers is to make certain that the
actions we use have the same meaning between all `Sprite`s and `Drape`s in
the same scrolling group (see `protocols/scrolling.py`).
"""
def update(self, actions, board, layers, backdrop, things, the_plot):
del backdrop, things, layers # Unused
if actions == 0: # is the player going upward?
self._north(the_plot)
elif actions == 1: # is the player going downward?
self._south(the_plot)
elif actions == 2: # is the player going leftward?
self._west(the_plot)
elif actions == 3: # is the player going rightward?
self._east(the_plot)
elif actions == 4: # is the player doing nothing?
self._stay(the_plot)
class CashDrape(prefab_drapes.Scrolly):
"""A scrolling `Drape` handling all of the coins.
This `Drape` ties actions to `Scrolly` motion action helper methods, and once
again we take care to map the same actions to the same methods. A little
extra logic updates the scrolling pattern for when the player touches the
coin, credits reward, and handles game termination.
"""
def update(self, actions, board, layers, backdrop, things, the_plot):
# If the player has reached a coin, credit one reward and remove the coin
# from the scrolling pattern. If the player has obtained all coins, quit!
player_pattern_position = self.pattern_position_prescroll(
things['P'].position, the_plot)
if self.whole_pattern[player_pattern_position]:
the_plot.log('Coin collected at {}!'.format(player_pattern_position))
the_plot.add_reward(100)
self.whole_pattern[player_pattern_position] = False
if not self.whole_pattern.any(): the_plot.terminate_episode()
if actions == 0: # is the player going upward?
self._north(the_plot)
elif actions == 1: # is the player going downward?
self._south(the_plot)
elif actions == 2: # is the player going leftward?
self._west(the_plot)
elif actions == 3: # is the player going rightward?
self._east(the_plot)
elif actions == 4: # is the player doing nothing?
self._stay(the_plot)
elif actions == 5: # does the player want to quit?
the_plot.terminate_episode()
def main(argv=()):
# Build a Scrolly Maze game.
game = make_game(int(argv[1]) if len(argv) > 1 else 0)
# Make a CursesUi to play it with.
ui = human_ui.CursesUi(
keys_to_actions={curses.KEY_UP: 0, curses.KEY_DOWN: 1,
curses.KEY_LEFT: 2, curses.KEY_RIGHT: 3,
-1: 4,
'q': 5, 'Q': 5},
delay=100, colour_fg=COLOUR_FG, colour_bg=COLOUR_BG)
# Let the game begin!
ui.play(game)
if __name__ == '__main__':
main(sys.argv)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.