id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
71775 | <reponame>fsadannn/nauta-cli
# -*- coding: utf-8 -*-
import sys
from cx_Freeze import setup, Executable
base = None
if sys.platform == 'win32':
base = 'Win32GUI'
base2 = None
if sys.platform == 'win32':
base2 = 'Console'
options = {
'build_exe': {
'includes': ['atexit', 'nauta'],
'packages': ['parsel','lxml','requests','pyqt5.QtWidgets','pyqt5.QtCore','termcolor',
'threading', 'qtawesome', 'six', 'pipes', 'w3lib', 'cssselect', 'tinydb',
'ctypes','colorama'],
'include_msvcr': True,
'optimize': 2,
'excludes': ['tkinter']
}
}
executables = [
Executable('nauta_gui.py', base=base,
targetName='nauta_gui.exe',),
Executable('main.py', base=base2,
targetName='nauta_cli.exe',)
]
setup(name='Nauta',
version='2.0.0',
description='gui and cli for control nauta connection and accounts',
options=options,
executables=executables
)
| StarcoderdataPython |
6416425 | import pandas as pd
import anndata as ad
from bin.interpreting_sc.analyse_features import get_cell_weights
import scanpy as sc
from kme.tools.config import load_config
from bin.interpreting_sc.analyse_features import example_based, load_gene_names
def make_andata(config_path, markers_path):
df, labels, markers = get_cell_weights(
config_path, markers_path, test=True)
labels_df = pd.Series([str(label) for label in labels.int().tolist()])
labels_df = labels_df.astype('category')
obs_meta = pd.DataFrame({
'n_genes': df.shape[1],
'labels': labels_df
})
adata = ad.AnnData(df, obs=obs_meta)
label_names = ['CD8+', 'Megakaryocyte', 'CD4+',
'Naive CD4+, Myeloid', 'Naive CD8+', 'B-cell', 'NK, Regulatory, Act. CD8+']
adata.rename_categories('labels', label_names)
mp = sc.pl.MatrixPlot(adata, markers, groupby='labels', categories_order=['B-cell', 'Megakaryocyte', 'CD4+', 'Naive CD4+, Myeloid', 'CD8+', 'Naive CD8+', 'NK, Regulatory, Act. CD8+'],
standard_scale='var', cmap='Reds')
mp.show()
def plot_heatmap(config_path, train_loader, group_ind):
# This might take a while to run
medoit_data = example_based(config_path, no_clusters=5, test=False)
data = medoit_data['Group' + str(group_ind)]
config = load_config(config_path)
path_picked_genes = config["dataset_params"]["dataset_args"]["path_picked_genes"]
all_gene_names = load_gene_names(path_picked_genes)
# Keep only the "important" genes
markers = ['CD3D', 'CD8A', 'CD8B', 'CCR10', 'TNFRSF18',
'CD4', 'ID3', 'CD79A', 'PF4', 'NKG7',
'S100A8', 'S100A9']
high_var_genes = ['MALAT1', 'B2M', 'FTL', 'GNLY', 'CD74', 'ACTB', 'CCL5']
markers = markers + high_var_genes
markers_id = [all_gene_names.index(marker) for marker in markers]
cells_small = []
for cell in data:
cell_small = [cell.squeeze().tolist()[i] for i in markers_id]
cells_small.append(cell_small)
label_names = ['CD8+', 'Megakaryocyte', 'CD4+',
'Naive CD4+, Myeloid', 'Naive CD8+', 'B-cell', 'NK, Regulatory, Act. CD8+']
# For each gene subtract the minimum and divide each by its maximum value on the training dataset
# Find vmin and vmax for each gene
matrix = pd.DataFrame(0, index=range(
len(train_loader.dataset)), columns=all_gene_names)
for i, sample in enumerate(train_loader.dataset):
matrix.loc[i, all_gene_names] = sample[:-1].tolist()
vmax = matrix.max().iloc[markers_id]
vmin = matrix.min().iloc[markers_id]
df = pd.DataFrame(cells_small, columns=markers)
df_norm = df.sub(vmin)
df_norm = df.div(vmax)
labels_df = pd.Series([str(label) for label in range(5)])
labels_df = labels_df.astype('category')
obs_meta = pd.DataFrame({
'n_genes': df.shape[1],
'labels': labels_df
})
adata = ad.AnnData(df_norm, obs=obs_meta)
adata.var_names_make_unique()
mp = sc.pl.MatrixPlot(adata, markers, groupby='labels', cmap='Reds')
mp.show()
| StarcoderdataPython |
11203873 | # ***************************************************************
# Copyright (c) 2021 Jittor. All Rights Reserved.
# Maintainers:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import os
import hashlib
import urllib.request
from tqdm import tqdm
from jittor_utils import lock
import gzip
import tarfile
import zipfile
def ensure_dir(dir_path):
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
def _progress():
pbar = tqdm(total=None)
def bar_update(block_num, block_size, total_size):
""" reporthook
@block_num: the num of downloaded data block
@block_size: the size of data block
@total_size: the total size of remote file
"""
if pbar.total is None and total_size:
pbar.total = total_size
progress_bytes = block_num * block_size
pbar.update(progress_bytes - pbar.n)
return bar_update
@lock.lock_scope()
def download_url_to_local(url, filename, root_folder, md5):
ensure_dir(root_folder)
file_path = os.path.join(root_folder, filename)
if check_file_exist(file_path, md5):
return
else:
print('Downloading ' + url + ' to ' + file_path)
urllib.request.urlretrieve(
url, file_path,
reporthook=_progress()
)
if not check_file_exist(file_path, md5):
raise RuntimeError("File downloads failed.")
def check_file_exist(file_path, md5):
if not os.path.isfile(file_path):
return False
if md5 is None:
return True
return check_md5(file_path, md5)
def calculate_md5(file_path, chunk_size=1024 * 1024):
md5 = hashlib.md5()
with open(file_path, 'rb') as f:
for chunk in iter(lambda: f.read(chunk_size), b''):
md5.update(chunk)
return md5.hexdigest()
def check_md5(file_path, md5, **kwargs):
return md5 == calculate_md5(file_path, **kwargs)
def check_integrity(fpath, md5=None):
if not os.path.isfile(fpath):
return False
if md5 is None:
return True
return check_md5(fpath, md5)
def _is_tarxz(filename):
return filename.endswith(".tar.xz")
def _is_tar(filename):
return filename.endswith(".tar")
def _is_targz(filename):
return filename.endswith(".tar.gz")
def _is_tgz(filename):
return filename.endswith(".tgz")
def _is_gzip(filename):
return filename.endswith(".gz") and not filename.endswith(".tar.gz")
def _is_zip(filename):
return filename.endswith(".zip")
def extract_archive(from_path, to_path=None, remove_finished=False):
if to_path is None:
to_path = os.path.dirname(from_path)
if _is_tar(from_path):
with tarfile.open(from_path, 'r') as tar:
tar.extractall(path=to_path)
elif _is_targz(from_path) or _is_tgz(from_path):
with tarfile.open(from_path, 'r:gz') as tar:
tar.extractall(path=to_path)
elif _is_tarxz(from_path):
# .tar.xz archive only supported in Python 3.x
with tarfile.open(from_path, 'r:xz') as tar:
tar.extractall(path=to_path)
elif _is_gzip(from_path):
to_path = os.path.join(to_path, os.path.splitext(os.path.basename(from_path))[0])
with open(to_path, "wb") as out_f, gzip.GzipFile(from_path) as zip_f:
out_f.write(zip_f.read())
elif _is_zip(from_path):
with zipfile.ZipFile(from_path, 'r') as z:
z.extractall(to_path)
else:
raise ValueError("Extraction of {} not supported".format(from_path))
if remove_finished:
os.remove(from_path)
def download_and_extract_archive(url, download_root, extract_root=None, filename=None,
md5=None, remove_finished=False):
download_root = os.path.expanduser(download_root)
if extract_root is None:
extract_root = download_root
if not filename:
filename = os.path.basename(url)
download_url_to_local(url, filename, download_root, md5)
archive = os.path.join(download_root, filename)
print("Extracting {} to {}".format(archive, extract_root))
extract_archive(archive, extract_root, remove_finished)
| StarcoderdataPython |
9784802 | from flask_restplus import Namespace, fields
class AuthorDto:
api = Namespace('author', description='Manage authors')
author = api.model('author', {
'id': fields.String(required=True, description='author id'),
'name': fields.String(required=True, description='author name')
})
| StarcoderdataPython |
8125287 | from fixture.session import SessionHelper
from steps.homepage_steps import HomepageActions
from steps.catalog_steps import CatalogActions
from steps.results_steps import ResultsPageActions
import webium.settings
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
import webium.settings
import logging
LOGGER = logging.getLogger(__name__)
class Application:
def __init__(self, browser, base_url, config):
# Set browser
if browser == "firefox":
self.driver = webdriver.Firefox()
elif browser == "chrome":
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--disable-application-cache")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--window-size=1420,1080')
self.driver = webdriver.Chrome("/usr/local/bin/chromedriver", options=chrome_options)
elif browser == "ie":
self.driver = webdriver.Ie()
else:
raise ValueError("Unrecognized browser %s" % browser)
# Sets a sticky timeout to implicitly wait for an element to be found
self.driver.implicitly_wait(60)
webium.settings.wait_timeout = 30
# Invokes the window manager-specific 'full screen' operation
LOGGER.info("Expand browser to full screen")
self.driver.maximize_window()
# Delete all cookies in the scope of the session
self.driver.delete_all_cookies()
# Initialize pages
LOGGER.info("Started execution test")
self.session = SessionHelper(self)
self.homepage = HomepageActions(self)
self.searching = CatalogActions(self)
self.results = ResultsPageActions(self)
self.base_url = "https://www.shop.by/"
def open_home_page(self):
LOGGER.info("Open url '%s'", self.base_url)
driver = self.driver
driver.get(self.base_url)
# Stop the browser
def destroy(self):
LOGGER.info("Quits the driver and closes every associated window.")
self.driver.quit()
def is_valid(self):
try:
self.current_url()
LOGGER.info("Browser is valid")
return True
except WebDriverException:
return False
def current_url(self):
return self.driver.current_url
| StarcoderdataPython |
9643355 | #
# @lc app=leetcode.cn id=19 lang=python3
#
# [19] 删除链表的倒数第N个节点
#
# https://leetcode-cn.com/problems/remove-nth-node-from-end-of-list/description/
#
# algorithms
# Medium (32.36%)
# Total Accepted: 35.9K
# Total Submissions: 109.2K
# Testcase Example: '[1,2,3,4,5]\n2'
#
# 给定一个链表,删除链表的倒数第 n 个节点,并且返回链表的头结点。
#
# 示例:
#
# 给定一个链表: 1->2->3->4->5, 和 n = 2.
#
# 当删除了倒数第二个节点后,链表变为 1->2->3->5.
#
#
# 说明:
#
# 给定的 n 保证是有效的。
#
# 进阶:
#
# 你能尝试使用一趟扫描实现吗?
#
#
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def get_length(self, head: ListNode):
c = 0
pre = head
while pre.next is not None:
c += 1
pre = pre.next
return c
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
c = self.get_length(head)
if c <= 1:
head = head.next
return head
index = c-n
i = 0
pre = head
while pre.next is not None:
if i == index:
pre.next = pre.next.next
break
pre = pre.next
i += 1
return head
| StarcoderdataPython |
300377 | <gh_stars>1-10
from __future__ import division
import os.path
from .listdataset import ListDataset
import numpy as np
import random
import flow_transforms
import pdb
import glob
from tqdm import tqdm
from tqdm import trange
try:
import cv2
except ImportError as e:
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("default", category=ImportWarning)
warnings.warn("failed to load openCV, which is needed"
"for KITTI which uses 16bit PNG images", ImportWarning)
def index_items(sample_list, train_index, val_index):
train_samples = [sample_list[idx] for idx in train_index]
val_samples = [sample_list[idx] for idx in val_index]
return train_samples, val_samples
def make_dataset(path):
# we train and val seperately to tune the hyper-param and use all the data for the final training
#train_list_path = os.path.join(dir, 'train.txt') # use train_Val.txt for final report
#val_list_path = os.path.join(dir, 'val.txt')
gt_nii_img = glob.glob(path + '*GG/seg/*nii.gz')
t1_nii_img = [item.replace('/seg/','/t1/').replace('_seg.', '_t1.') for item in gt_nii_img]
t1ce_nii_img = [item.replace('/seg/','/t1ce/').replace('_seg.', '_t1ce.') for item in gt_nii_img]
t2_nii_img = [item.replace('/seg/','/t2/').replace('_seg.', '_t2.') for item in gt_nii_img]
flair_nii_img = [item.replace('/seg/','/flair/').replace('_seg.', '_flair.') for item in gt_nii_img]
total_index = list(range(len(gt_nii_img)))
#total_index = list(range(20))
random.shuffle(total_index)
train_num = int(len(total_index) * 0.75*0.85)
val_num = int(len(total_index) * 0.75*0.15)
train_index = total_index[:train_num]
val_index = total_index[train_num:(train_num+val_num)]
train_gt_list, val_gt_list = index_items(gt_nii_img, train_index, val_index)
train_t1_list, val_t1_list = index_items(t1_nii_img, train_index, val_index)
train_t1ce_list, val_t1ce_list = index_items(t1ce_nii_img, train_index, val_index)
train_t2_list, val_t2_list = index_items(t2_nii_img, train_index, val_index)
train_flair_list, val_flair_list = index_items(flair_nii_img, train_index, val_index)
train_dict = {
't1':train_t1_list,
't1ce':train_t1ce_list,
't2':train_t2_list,
'flair':train_flair_list,
'gt':train_gt_list
}
val_dict = {
't1':val_t1_list,
't1ce':val_t1ce_list,
't2':val_t2_list,
'flair':val_flair_list,
'gt':val_gt_list
}
return train_dict, val_dict
def BSD_loader(data_dict):
# cv2.imread is faster than io.imread usually
nonzero_index = []
def pick_slice(seg_im):
h,w,slice_num = seg_im.shape
seg_im = np.reshape(seg_im, (-1, slice_num))
sum_value = np.sum(seg_im, axis=0)
nonzero_index = np.where(sum_value > 0)
return nonzero_index[0]
seg_list = data_dict['gt']
t1_list = data_dict['t1']
t1ce_list = data_dict['t1ce']
t2_list = data_dict['t2']
flair_list = data_dict['flair']
nii_num = len(seg_list)
im_list = []
gt_list = []
for i in trange(nii_num):
seg_im = nib.load(seg_list[i]).get_fdata()
nonzero_index = pick_slice(seg_im)
seg_slice = seg_im[:,:,nonzero_index]
seg_slice = np.transpose(seg_slice, (2, 0, 1)).astype(np.uint8)
t1_im = nib.load(t1_list[i]).get_fdata()
t1_slice = t1_im[:,:,nonzero_index]
t1_slice = np.transpose(t1_slice, (2, 0, 1))
#t1_slice = (t1_slice / np.max(t1_slice) * 255).astype(np.uint8)
t1ce_im = nib.load(t1ce_list[i]).get_fdata()
t1ce_slice = t1ce_im[:,:,nonzero_index]
t1ce_slice = np.transpose(t1ce_slice, (2, 0, 1))
#t1ce_slice = (t1ce_slice / np.max(t1ce_slice) * 255).astype(np.uint8)
t2_im = nib.load(t2_list[i]).get_fdata()
t2_slice = t2_im[:,:,nonzero_index]
t2_slice = np.transpose(t2_slice, (2, 0, 1))
#t2_slice = (t2_slice / np.max(t2_slice) * 255).astype(np.uint8)
flair_im = nib.load(flair_list[i]).get_fdata()
flair_slice = flair_im[:,:,nonzero_index]
flair_slice = np.transpose(flair_slice, (2, 0, 1))
#flair_slice = (flair_slice / np.max(flair_slice) * 255).astype(np.uint8)
img_data = np.concatenate([t1_slice[:,:,:,np.newaxis], t1ce_slice[:,:,:,np.newaxis], t2_slice[:,:,:,np.newaxis], flair_slice[:,:,:,np.newaxis]], axis=-1)
img_data = (img_data / np.max(img_data)*255).astype(np.uint8)
im_list.append(img_data)
gt_list.append(seg_slice[:,:,:,np.newaxis])
im_data = np.concatenate(im_list, axis=0)
gt_data = np.concatenate(gt_list, axis=0)
brain = (im_data[:,:,:,:1] > 0) * 3
mask = 1 - (gt_data > 0) * 1
normal_brain = brain * mask
gt_data = normal_brain.astype(np.uint8) + gt_data.astype(np.uint8)
return im_data, gt_data
#return np.transpose(im_data, (0,3,1,2)), np.transpose(gt_data, (0,3,1,2))
def BSD500(root, transform=None, target_transform=None, val_transform=None,
co_transform=None, split=None):
train_dict, val_dict= make_dataset(root)
if val_transform ==None:
val_transform = transform
train_dataset = ListDataset(root, 'bsd500', train_dict, transform,
target_transform, co_transform,
loader=BSD_loader, datatype = 'train')
val_dataset = ListDataset(root, 'bsd500', val_dict, val_transform,
target_transform, flow_transforms.CenterCrop((240,240)),
loader=BSD_loader, datatype = 'val')
return train_dataset, val_dataset
def mix_datasets(args):
train_dataset = ListDataset(args, 'train')
val_dataset = ListDataset(args, 'val')
return train_dataset, val_dataset
| StarcoderdataPython |
9633697 | <filename>map_ops/walk.py<gh_stars>0
from typing import Any, Callable
__all__ = ["walk"]
def walk(
d1: dict,
d2: dict,
initializer: Callable[[dict, dict], dict] = None,
on_missing: Callable[[Any], Any] = None,
on_match: Callable[[Any, Any], Any] = None,
on_mismatch: Callable[[Any, Any], Any] = None,
list_strategy: Callable[[Any, Any], Any] = None,
) -> dict:
"""Generalized function for pairwise traversal of dicts
Args:
d1: A Python dict
d1: Python dict
initializer: A Callable to tell `walk` what to
compare `d1` to while traversing
on_missing: A Callable to tell `walk` how to handle
a key present in `d1` but not `d2`
on_match: A Callable to tell `walk` how to
handle same keys with differing values
on_mismatch: A Callable to tell `walk` how to
handle same keys with differing types
list_strategy: A Callable to tell `walk` how to
handle any lists it encounters
Returns:
A Python dict
"""
if not initializer:
initializer = lambda x, _: x
output = initializer(d1, d2)
for k, v in d1.items():
try:
res = None
if k not in d2:
if not on_missing:
output[k] = v
else:
# allow ANY falsy values
output[k] = on_missing(v)
elif type(v) != type(d2[k]):
if not on_mismatch:
pass
else:
output[k] = on_mismatch(v, d2[k])
elif isinstance(v, dict):
res = walk(
d1=v, # type: ignore
d2=d2[k], # type: ignore
initializer=initializer,
on_missing=on_missing,
on_match=on_match,
on_mismatch=on_mismatch,
list_strategy=list_strategy,
)
if res:
output[k] = res
elif isinstance(v, (set, list, tuple)):
if not list_strategy:
output[k] = v
else:
# allow ONLY [] falsy value
_res = list_strategy(v, d2[k])
if _res is None:
pass
else:
output[k] = _res
else:
if on_match:
output[k] = on_match(v, d2[k])
except Exception as e:
e.args = (k, e.args)
raise e
return output
| StarcoderdataPython |
1679109 | import sys
def to_utf8(value):
"""
Converts value to string encoded into utf-8
:param value:
:return:
"""
if sys.version_info[0] < 3:
if not isinstance(value, basestring):
value = unicode(value)
if type(value) == str:
value = value.decode("utf-8", errors="ignore")
return value.encode('utf-8', 'ignore')
else:
return str(value)
| StarcoderdataPython |
4820947 | <gh_stars>1-10
"""
This is basically poached from
urbansim.scripts.cache_to_hdf5.py
But it makes it easier to load a few tables into a notebook without
having to copy/convert the entire cache into a h5 file.
"""
import glob
import os
import numpy as np
import pandas as pd
def opus_cache_to_df(dir_path):
"""
Convert a directory of binary array data files to a Pandas DataFrame.
The typical usage is to load in legacy opus cache data.
Parameters
----------
dir_path : str
Full path to the table directory.
Returns:
--------
pandas.DataFrame
"""
table = {}
for attrib in glob.glob(os.path.join(dir_path, '*')):
attrib_name, attrib_ext = os.path.splitext(os.path.basename(attrib))
if attrib_ext == '.lf8':
attrib_data = np.fromfile(attrib, np.float64)
table[attrib_name] = attrib_data
elif attrib_ext == '.lf4':
attrib_data = np.fromfile(attrib, np.float32)
table[attrib_name] = attrib_data
elif attrib_ext == '.li2':
attrib_data = np.fromfile(attrib, np.int16)
table[attrib_name] = attrib_data
elif attrib_ext == '.li4':
attrib_data = np.fromfile(attrib, np.int32)
table[attrib_name] = attrib_data
elif attrib_ext == '.li8':
attrib_data = np.fromfile(attrib, np.int64)
table[attrib_name] = attrib_data
elif attrib_ext == '.ib1':
attrib_data = np.fromfile(attrib, np.bool_)
table[attrib_name] = attrib_data
elif attrib_ext.startswith('.iS'):
length_string = int(attrib_ext[3:])
attrib_data = np.fromfile(attrib, ('a' + str(length_string)))
table[attrib_name] = attrib_data
else:
print('Array {} is not a recognized data type'.format(attrib))
df = pd.DataFrame(table)
return df
| StarcoderdataPython |
1717341 | <filename>src/maskers.py
#takes polygon coordinates and creates an image mask
import numpy as np
import mahotas
import Polygon, Polygon.IO, Polygon.Utils
from copy import deepcopy
def polymask(imgf,aoic,logger): #aoic list like [[x1,y1,x2,y2,x3,y3...],[x1,y1,x2,y2,x3,y3...]] or [x1,y1,x2,y2,x3,y3...]
logger.set('Producing polygonic image mask...')
ql = []
if isinstance(imgf,list):
for i,imgfn in enumerate(imgf):
try:
img = mahotas.imread(imgfn)
if len(img.shape) != 3:
fail
imgf = deepcopy(imgfn)
break
except:
logger.set('Invalid image file: '+imgfn)
if i == len(imgf)-1:
logger.set('Invalid image file(s). Polygonic image mask can not be created.')
return False
if isinstance(imgf,str):
img = mahotas.imread(imgf)
else:
img = deepcopy(imgf)
if aoic != [0,0,0,0,0,0,0,0]:
if isinstance(aoic, dict):
aoi = []
for k in aoic:
aoi.append(aoic[k])
aoic = aoi
if not isinstance(aoic[0], list):
aoic = [aoic]
logger.set('Number of polygons: ' + str(len(aoic)))
for p in aoic:
pl = []
for i in range(0,len(p),2):
pl.append((round(p[i]*img.shape[1]),round(p[i+1]*img.shape[0])))
ql.append(Polygon.Polygon(pl))
mask = np.zeros(img.shape,'uint8')
for i in range(mask.shape[0]): #y axis
for j in range(mask.shape[1]): #x axis
for q in ql:
if q.isInside(j,i):
mask[i][j]=[1,1,1]
else:
logger.set('No polygonic masking selected.')
mask = np.ones(img.shape,'uint8')
logger.set('Number of unmasked pixels: ' + str(np.sum(mask.transpose(2,0,1)[0])))
return mask
def thmask(img,th):
img = img.transpose(2,0,1)
mask = np.zeros(img.shape,'uint8')
mask2 = (img[0]>=th[8])*(img[0]<=th[9])*(img[1]>=th[10])*(img[1]<=th[11])*(img[2]>=th[12])*(img[2]<=th[13])
if len(th) <= 16:
th += [0.0,255.0]
mask2 *= (img[0]>=th[16])*(img[1]>=th[16])*(img[2]>=th[16])*(img[0]<=th[17])*(img[1]<=th[17])*(img[2]<=th[17])
img = None
mask[0] = mask2
mask[1] = mask2
mask[2] = mask2
mask = mask.transpose(1,2,0)
return mask
def exmask(img,th=255.0): #burned pixels
img = img.transpose(2,0,1)
mask = np.zeros(img.shape,'uint8')
mask2 = (img[0]<th)*(img[1]<th)*(img[2]<th)
mask[0] = mask2
mask[1] = mask2
mask[2] = mask2
mask = mask.transpose(1,2,0)
img = None
mask2 = None
return mask
def scsmask(img,mask,logger,enabled=True):
from snow import salvatoriCoreSingle
maskout = np.ones(img.shape,'uint8')
if enabled:
(mask2,th) = salvatoriSnowDetect(img,mask,logger,0,0,1)
mask2 = mask2 == 1
mask2 = mask2 == False
maskout = maskout.transpose(2,0,1)
maskout[0] *= mask2
maskout[1] *= mask2
maskout[2] *= mask2
maskout = maskout.transpose(1,2,0)
else:
th = -1
return (maskout, th)
def invertMask(mask):
return (mask == 0).astype(mask.dtype)
def findrefplate(*args):
return False
| StarcoderdataPython |
1652377 | from Const import Const
class Token(Const):
"""
The Token class represents tokens in the language.
For simple syntax analysis, a token object need only
include a code for the token’s type,
such as was used in earlier examples in this chapter.
However, a token can include other attributes,
such as an identifier name, a constant value,
and a data type, as we will see in later chapters.
"""
def __init__(self, code, value):
self.code = code
self.value = value
def __str__(self):
"""
convert a token into a descriptive string.
the format is "[type(value)]", where (value) is an optional part for
identifier, numeric literal, and unexpected token.
the only exception is a newline, which is directly converted to string "newline".
this decision was made because raw \n character might spoil the entire output format.
"""
if self.code == Const.NEWLINE:
return "[newline]"
name = "[" + self.code
if self.code in (Const.numericalLiteral, Const.ID, Const.UET):
name += "(" + self.value + ")"
name += "]"
return name
| StarcoderdataPython |
3348193 | <reponame>dmartinpro/microhomie
import settings
from homie.constants import FALSE, TRUE, BOOLEAN
from homie.device import HomieDevice
from homie.node import HomieNode
from homie.property import HomieNodeProperty
from machine import Pin
# reversed values for the esp8266 boards onboard led
ONOFF = {FALSE: 1, TRUE: 0, 1: FALSE, 0: TRUE}
class LED(HomieNode):
def __init__(self, name="Onboard LED", pin=2):
super().__init__(id="led", name=name, type="LED")
self.pin = pin
self.led = Pin(pin, Pin.OUT, value=0)
self.power_property = HomieNodeProperty(
id="power",
name="LED Power",
settable=True,
datatype=BOOLEAN,
default=TRUE,
)
self.add_property(self.power_property, self.on_power_msg)
def on_power_msg(self, topic, payload, retained):
self.led(ONOFF[payload])
self.power_property.data = ONOFF[self.led()]
def main():
# Homie device setup
homie = HomieDevice(settings)
# Add LED node to device
homie.add_node(LED())
# run forever
homie.run_forever()
if __name__ == "__main__":
main()
| StarcoderdataPython |
11358248 | from datetime import date
import re
from invoke import task
from emmet import __version__
@task
def setver(c):
# Calendar versioning (https://calver.org/), with a patch segment
# for release of multiple version in a day (should be rare).
new_ver = date.today().isoformat().replace("-", ".")
if __version__.startswith(new_ver):
if __version__ == new_ver:
new_ver += ".1"
else:
year, month, day, patch = new_ver.split('.')
patch = str(int(patch) + 1)
new_ver = ".".join(year, month, day, patch)
with open("emmet/__init__.py", "r") as f:
lines = [re.sub('__version__ = .+',
'__version__ = "{}"'.format(new_ver),
l.rstrip()) for l in f]
with open("emmet/__init__.py", "w") as f:
f.write("\n".join(lines))
with open("setup.py", "r") as f:
lines = [re.sub('version=([^,]+),',
'version="{}",'.format(new_ver),
l.rstrip()) for l in f]
with open("setup.py", "w") as f:
f.write("\n".join(lines))
print("Bumped version to {}".format(new_ver))
@task
def publish(c):
c.run("rm dist/*.*", warn=True)
c.run("python setup.py sdist bdist_wheel")
c.run("twine upload dist/*")
| StarcoderdataPython |
1972214 | def get_checkout_inlines():
from .admin import DiscountInline
return [DiscountInline]
def calculate_discounts(*args, **kwargs):
from .util import calculate_discounts
return calculate_discounts(*args, **kwargs)
def save_discounts(*args, **kwargs):
from .util import save_discounts
return save_discounts(*args, **kwargs)
def get_context(request):
from .util import vouchers_context
return vouchers_context(request)
| StarcoderdataPython |
5019336 | <filename>api/jobs/batch.py
"""
Batch
"""
import bson
import copy
import datetime
from .. import config
from ..dao.containerstorage import AcquisitionStorage, AnalysisStorage
from .jobs import Job
from .queue import Queue
from ..web.errors import APINotFoundException, APIStorageException
from . import gears
log = config.log
BATCH_JOB_TRANSITIONS = {
# To <------- #From
'failed': 'running',
'complete': 'running',
'running': 'pending',
'cancelled': 'running'
}
def get_all(query, projection=None):
"""
Fetch batch objects from the database
"""
return config.db.batch.find(query, projection)
def get(batch_id, projection=None, get_jobs=False):
"""
Fetch batch object by id, include stats and job objects as requested
"""
if isinstance(batch_id, str):
batch_id = bson.ObjectId(batch_id)
batch_job = config.db.batch.find_one({'_id': batch_id}, projection)
if batch_job is None:
raise APINotFoundException('Batch job {} not found.'.format(batch_id))
if get_jobs:
jobs = []
for jid in batch_job.get('jobs', []):
job = Job.get(jid)
jobs.append(job)
batch_job['jobs'] = jobs
return batch_job
def find_matching_conts(gear, containers, container_type):
"""
Give a gear and a list of containers, find files that:
- have no solution to the gear's input schema (not matched)
- have multiple solutions to the gear's input schema (ambiguous)
- match the gear's input schema 1 to 1 (matched)
Containers are placed in one of the three categories in order.
A container with 2 possible files for one input and none for the other
will be marked as 'not matched', not ambiguous.
"""
matched_conts = []
not_matched_conts = []
ambiguous_conts = []
for c in containers:
files = c.get('files')
if files:
suggestions = gears.suggest_for_files(gear, files)
# Determine if any of the inputs are ambiguous or not satisfied
ambiguous = False # Are any of the inputs ambiguous?
not_matched = False
for files in suggestions.itervalues():
if len(files) > 1:
ambiguous = True
elif len(files) == 0:
not_matched = True
break
# Based on results, add to proper list
if not_matched:
not_matched_conts.append(c)
elif ambiguous:
ambiguous_conts.append(c)
else:
# Create input map of file refs
inputs = {}
for input_name, files in suggestions.iteritems():
inputs[input_name] = {'type': container_type, 'id': str(c['_id']), 'name': files[0]}
c['inputs'] = inputs
matched_conts.append(c)
else:
not_matched_conts.append(c)
return {
'matched': matched_conts,
'not_matched': not_matched_conts,
'ambiguous': ambiguous_conts
}
def insert(batch_proposal):
"""
Simple database insert given a batch proposal.
"""
time_now = datetime.datetime.utcnow()
batch_proposal['created'] = time_now
batch_proposal['modified'] = time_now
return config.db.batch.insert(batch_proposal)
def update(batch_id, payload):
"""
Updates a batch job, being mindful of state flow.
"""
time_now = datetime.datetime.utcnow()
bid = bson.ObjectId(batch_id)
query = {'_id': bid}
payload['modified'] = time_now
if payload.get('state'):
# Require that the batch job has the previous state
query['state'] = BATCH_JOB_TRANSITIONS[payload.get('state')]
result = config.db.batch.update_one({'_id': bid}, {'$set': payload})
if result.modified_count != 1:
raise Exception('Batch job not updated')
def run(batch_job):
"""
Creates jobs from proposed inputs, returns jobs enqueued.
"""
proposal = batch_job.get('proposal')
if not proposal:
raise APIStorageException('The batch job is not formatted correctly.')
proposed_inputs = proposal.get('inputs', [])
proposed_destinations = proposal.get('destinations', [])
gear_id = batch_job['gear_id']
gear = gears.get_gear(gear_id)
gear_name = gear['gear']['name']
config_ = batch_job.get('config')
origin = batch_job.get('origin')
tags = proposal.get('tags', [])
tags.append('batch')
if gear.get('category') == 'analysis':
analysis_base = proposal.get('analysis', {})
if not analysis_base.get('label'):
time_now = datetime.datetime.utcnow()
analysis_base['label'] = {'label': '{} {}'.format(gear_name, time_now)}
an_storage = AnalysisStorage()
acq_storage = AcquisitionStorage()
jobs = []
job_ids = []
job_defaults = {
'config': config_,
'gear_id': gear_id,
'tags': tags,
'batch': str(batch_job.get('_id')),
'inputs': {}
}
for inputs in proposed_inputs:
job_map = copy.deepcopy(job_defaults)
job_map['inputs'] = inputs
if gear.get('category') == 'analysis':
analysis = copy.deepcopy(analysis_base)
# Create analysis
acquisition_id = inputs.values()[0].get('id')
session_id = acq_storage.get_container(acquisition_id, projection={'session': 1}).get('session')
analysis['job'] = job_map
result = an_storage.create_el(analysis, 'sessions', session_id, origin, None)
analysis = an_storage.get_el(result.inserted_id)
an_storage.inflate_job_info(analysis)
job = analysis.get('job')
job_id = bson.ObjectId(job.id_)
else:
job = Queue.enqueue_job(job_map, origin)
job_id = job.id_
jobs.append(job)
job_ids.append(job_id)
for dest in proposed_destinations:
job_map = copy.deepcopy(job_defaults)
job_map['destination'] = dest
if gear.get('category') == 'analysis':
analysis = copy.deepcopy(analysis_base)
# Create analysis
analysis['job'] = job_map
result = an_storage.create_el(analysis, 'sessions', bson.ObjectId(dest['id']), origin, None)
analysis = an_storage.get_el(result.inserted_id)
an_storage.inflate_job_info(analysis)
job = analysis.get('job')
job_id = bson.ObjectId(job.id_)
else:
job = Queue.enqueue_job(job_map, origin)
job_id = job.id_
jobs.append(job)
job_ids.append(job_id)
update(batch_job['_id'], {'state': 'running', 'jobs': job_ids})
return jobs
def cancel(batch_job):
"""
Cancels all pending jobs, returns number of jobs cancelled.
"""
pending_jobs = config.db.jobs.find({'state': 'pending', '_id': {'$in': batch_job.get('jobs')}})
cancelled_jobs = 0
for j in pending_jobs:
job = Job.load(j)
try:
Queue.mutate(job, {'state': 'cancelled'})
cancelled_jobs += 1
except Exception: # pylint: disable=broad-except
# if the cancellation fails, move on to next job
continue
update(batch_job['_id'], {'state': 'cancelled'})
return cancelled_jobs
def check_state(batch_id):
"""
Returns state of batch based on state of each of its jobs
are complete or failed
"""
batch = get(str(batch_id))
if batch.get('state') == 'cancelled':
return None
batch_jobs = config.db.jobs.find({'_id':{'$in': batch.get('jobs', [])}, 'state': {'$nin': ['complete', 'failed', 'cancelled']}})
non_failed_batch_jobs = config.db.jobs.find({'_id':{'$in': batch.get('jobs', [])}, 'state': {'$ne': 'failed'}})
if batch_jobs.count() == 0:
if non_failed_batch_jobs.count() > 0:
return 'complete'
else:
return 'failed'
else:
return None
def get_stats():
"""
Return the number of jobs by state.
"""
raise NotImplementedError()
def resume():
"""
Move cancelled jobs back to pending.
"""
raise NotImplementedError()
def delete():
"""
Remove:
- the batch job
- it's spawned jobs
- all the files it's jobs produced.
"""
raise NotImplementedError()
| StarcoderdataPython |
3209344 | <gh_stars>1-10
from src.alert.messenger import TelegramMessenger
from src.record.microphone import audio_files
def run(messenger: TelegramMessenger, timeout: int):
"""
Records a bunch of non-silent samples for `timeout` minutes.
All of the non-silent samples will be saved in data/live/.
Notifies you when it is started, nearly finished, and finished.
"""
messenger.send_alert("👀 Starting to collect recording samples.", force_send=True)
for _ in audio_files(timeout, messenger):
pass
messenger.send_alert("✅ I've stopped recording samples.", force_send=True)
| StarcoderdataPython |
319063 | <gh_stars>1-10
# @author: <NAME>
# @description: Utilities for hotspotd
# @license: MIT
import logging
import logging.config
import pickle
import socket
import subprocess
import sys
from os import path
import six
from .config import LOG_CONFIG
logging.config.dictConfig(LOG_CONFIG)
logger = logging.getLogger('hotspotd.utils')
install_path = path.abspath(__file__)
install_dir = path.dirname(install_path)
def killall(process):
execute_shell('pkill ' + process)
def is_process_running(process):
cmd = 'pgrep ' + process
return execute_shell(cmd)
def check_sysfile(filename):
if path.exists('/usr/sbin/' + filename) or path.exists('/sbin/' + filename): # noqa
return True
else:
return False
def set_sysctl(setting, value):
return execute_shell('sysctl -w ' + setting + '=' + value)[1]
def execute_shell(command, error=''):
logger.debug("CMD: {}".format(command))
return execute(command, wait=True, shellexec=True, errorstring=error)
def execute(command='', errorstring='', wait=True, shellexec=False, ags=None):
try:
p = subprocess.Popen(command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if wait and p.wait() == 0:
return True, p.communicate()[0]
else:
return False, p.communicate()[0]
except subprocess.CalledProcessError as err:
logger.error(err)
except Exception as err:
logger.error(err)
def validate_ip(addr):
try:
socket.inet_pton(socket.AF_INET, addr)
return True # legal
except socket.error as err:
logger.error(err)
return False # Not legal
def select_interface(interfaces):
for num, interface in enumerate(interfaces):
print("{} {}".format(num, interface))
while True:
interface_num = six.moves.input("Enter number to select interface: ")
try:
isinstance(int(interface_num), six.integer_types)
interface_num = int(interface_num)
interfaces[interface_num]
except ValueError:
logger.error("Invalid entry: {}. Integer is expected".format(
interface_num))
continue
except IndexError:
logger.error("Invalid entry. Valid entries are {}".format(
range(len(interfaces))))
continue
return interfaces[interface_num]
def wireless_interfaces():
w_interfaces = list()
status, output = execute_shell('iwconfig')
for line in output.splitlines():
if "IEEE 802.11" in line.decode("utf-8"):
w_interfaces.append(line.split()[0].decode("utf-8"))
return w_interfaces if len(w_interfaces) > 0 else None
def other_interfaces(wlan=None):
o_interfaces = list()
status, output = execute_shell('ip -o -4 -6 link show up')
for line in output.splitlines():
o_interfaces.append(line.decode("utf-8").split(':')[1].strip())
# Remove loopback device
if 'lo' in o_interfaces:
o_interfaces.remove('lo')
# Remove wlan interface used if any. This will also adds
# ability to use one wireless interface for broadcast and
# other providing Internet access for system having multiple
# WIFI modules
if wlan and wlan in o_interfaces:
o_interfaces.remove(wlan)
return o_interfaces if len(o_interfaces) > 0 else None
def configure():
logger.info('Initiating configuration..')
wiface = wireless_interfaces()
if wiface:
if len(wiface) > 1:
logger.info('Following wireless interfaces were detected, please select one.') # noqa
wlan = select_interface(wiface)
else:
logger.info("Wireless interface: {}".format(wiface[0]))
wlan = wiface[0]
else:
message = 'Wireless interface could not be found on your system. \
Please enable WIFI adapter.'
# `nmcli radio wifi on`
# On nmcli tool, version 0.9.8.8, the command is `nmcli nm wifi on`
logger.error(message)
sys.exit()
iface = other_interfaces(wlan)
if iface:
if len(iface) > 1:
logger.info("Found interface(s): {}. \nPlease select one.".format(
', '.join(iface)))
ppp = select_interface(iface)
else:
logger.info("Interface: {}".format(wiface[0]))
ppp = iface[0]
else:
message = 'No network interface found to interface with LAN'
logger.error(message)
sys.exit()
while True:
ipaddress = six.moves.input('Enter an IP address for your AP [192.168.45.1]: ') # noqa
if ipaddress is None or ipaddress == '':
ipaddress = '192.168.45.1'
elif validate_ip(ipaddress) is False:
continue
break
# TODO: Get netmask from user
netmask = '255.255.255.0'
# Configure SSID, password, etc.
ssid = six.moves.input('Enter SSID [joe_ssid]: ')
if ssid == '':
ssid = 'joe_ssid'
password = six.moves.input('Enter 10 digit password [1234567890]: ')
if password == '':
password = '<PASSWORD>'
data = {'wlan': wlan,
'inet': ppp,
'ipaddress': ipaddress,
'netmask': netmask,
'ssid': ssid,
'password': password}
with open(path.join(install_dir, 'cfg/hostapd.conf')) as sample_hostapd: # noqa
with open(path.join(install_dir, 'hostapd.conf'), 'w') as configfile: # noqa
subs = {"wlan0": wlan,
"joe_ssid": ssid,
"1234567890": password}
for line in sample_hostapd:
for pattern in six.iterkeys(subs):
if pattern in line:
line = line.replace(pattern, subs[pattern])
configfile.write(line)
pickle.dump(data, open(path.join(install_dir, 'hotspotd.data'), 'wb'), -1) # noqa
logger.info("Following data was saved in file hotspotd.data")
logger.debug("File: {}/hostapd.conf".format(install_dir))
print(data) # Don't want to go password in logs
logger.info('Settings saved. Run "hotspotd start" to start the router.')
def pre_start():
# oper = platform.linux_distribution()
# if oper[0].lower()=='ubuntu' and oper[2].lower()=='trusty':
# trusty patch
# print 'applying hostapd workaround for ubuntu trusty.'
# 29-12-2014: Rather than patching individual distros, lets make it a
# default.
# Start from most recent versions of `nmcli`
# nmcli tool, version 1.8.2-1.fc26
status, output = execute_shell('nmcli radio wifi off')
if status is False:
# nmcli tool, version 0.9.8.8
execute_shell('nmcli nm wifi off')
# Fedora 26 notes:
# - Need to install rfkill on f26
# - Need to disable wpa_supplicant service
# `rfkill list` (output)
# 0: tpacpi_bluetooth_sw: Bluetooth
# Soft blocked: yes
# Hard blocked: no
# 1: phy0: Wireless LAN
# Soft blocked: yes
# Hard blocked: no
#
# `rfkill unblock wifi`
# Soft blocked: yes
# Hard blocked: no
# 1: phy0: Wireless LAN
# Soft blocked: no
# Hard blocked: no
#
# `hostapd -B /usr/lib/python2.7/site-packages/hotspotd/hostapd.conf`
# Configuratio file: /usr/lib/python2.7/site-packages/hotspotd/hostapd.conf
# Using interface wlp3s0 with hwaddr 3a:96:67:2d:e5:4a and ssid "nubia"
# wlp3s0: interface state UNINITIALIZED->ENABLED
# wlp3s0: AP-ENABLED
execute_shell('rfkill unblock wifi')
execute_shell('sleep 1')
def check_dependencies():
message = ' executable not found. Make sure you have \
install the package.'
if check_sysfile('hostapd') is False:
logger.error("hostapd {}".format(message))
sys.exit()
elif check_sysfile('dnsmasq') is False:
logger.error("dnsmasq {}".format(message))
sys.exit()
def load_data():
if path.exists(path.join(install_dir, 'hotspotd.data')):
return pickle.load(
open(path.join(install_dir, 'hotspotd.data'),
'rb'))
logger.debug("Reason: Could not load file: {}".format(
path.join(install_dir, 'hotspotd.data')))
logger.error("Looks like hotspotd was never configured.\nCheck status using `hotspotd status`") # noqa
sys.exit()
def start_router():
data = load_data()
wlan = data['wlan']
ppp = data['inet']
ipaddress = data['ipaddress']
netmask = data['netmask']
logger.info('Starting hotspot')
check_dependencies()
pre_start()
# TODO: `ifconfig` is deprecated in favor of `ip`. Try to use `ip`
s = 'ifconfig ' + wlan + ' up ' + ipaddress + ' netmask ' + netmask
logger.info('created interface: mon.' + wlan + ' on IP: ' + ipaddress)
status, output = execute_shell(s)
execute_shell('sleep 2')
i = ipaddress.rindex('.')
ipparts = ipaddress[0:i]
# stop dnsmasq if already running.
killall('dnsmasq')
# stop hostapd if already running.
killall('hostapd')
# enable forwarding in sysctl.
logger.info('enabling forward in sysctl.')
set_sysctl('net.ipv4.ip_forward', '1')
# enable forwarding in iptables.
logger.info('creating NAT using iptables: ' + wlan + '<->' + ppp)
execute_shell('iptables -P FORWARD ACCEPT')
# add iptables rules to create the NAT.
execute_shell('iptables --table nat --delete-chain')
execute_shell('iptables --table nat -F')
execute_shell('iptables --table nat -X')
execute_shell('iptables -t nat -A POSTROUTING -o ' + ppp + ' -j MASQUERADE') # noqa
execute_shell('iptables -A FORWARD -i ' + ppp + ' -o ' + wlan + ' -j ACCEPT -m state --state RELATED,ESTABLISHED') # noqa
execute_shell('iptables -A FORWARD -i ' + wlan + ' -o ' + ppp + ' -j ACCEPT') # noqa
# allow traffic to/from wlan
execute_shell('iptables -A OUTPUT --out-interface ' + wlan + ' -j ACCEPT')
execute_shell('iptables -A INPUT --in-interface ' + wlan + ' -j ACCEPT')
# start dnsmasq
logger.info('Running dnsmasq')
s = 'dnsmasq --dhcp-authoritative --interface=' + wlan + ' --dhcp-range=' + ipparts + '.20,' + ipparts + '.100,' + netmask + ',4h' # noqa
execute_shell(s)
# start hostapd
logger.info('Running hostapd')
s = 'hostapd -B ' + path.join(install_dir, 'hostapd.conf')
execute_shell('sleep 2')
execute_shell(s)
logger.info('hotspot is running.')
def stop_router():
data = load_data()
wlan = data['wlan']
ppp = data['inet']
logger.info('Stopping hotspot')
logger.debug('Bringing down interface: {}'.format(wlan))
execute_shell('ifconfig mon.' + wlan + ' down')
# TODO: Find some workaround. killing hostapd brings down the
# wlan0 interface in ifconfig.
# stop hostapd
# if cli.is_process_running('hostapd')>0:
# cli.writelog('stopping hostapd')
# cli.execute_shell('pkill hostapd')
# Stop dependent services
logger.info('Stopping dnsmasq')
killall('dnsmasq')
logger.info('Stopping hostapd')
killall('hostapd')
# Delete forwarding in iptables.
logger.info('Delete forward rules in iptables.')
execute_shell('iptables -D FORWARD -i ' + ppp + ' -o ' + wlan + ' -j ACCEPT -m state --state RELATED,ESTABLISHED') # noqa
execute_shell('iptables -D FORWARD -i ' + wlan + ' -o ' + ppp + ' -j ACCEPT') # noqa
# delete iptables rules that were added for wlan traffic.
execute_shell('iptables -D OUTPUT --out-interface ' + wlan + ' -j ACCEPT')
execute_shell('iptables -D INPUT --in-interface ' + wlan + ' -j ACCEPT')
execute_shell('iptables --table nat --delete-chain')
execute_shell('iptables --table nat -F')
execute_shell('iptables --table nat -X')
# disable forwarding in sysctl.
logger.info('disabling forward in sysctl.')
set_sysctl('net.ipv4.ip_forward', '0')
# cli.execute_shell('ifconfig ' + wlan + ' down' + IP + ' netmask ' + Netmask) # noqa
# cli.execute_shell('ip addr flush ' + wlan)
logger.info('hotspot has stopped.')
| StarcoderdataPython |
169339 | <filename>test/integration/test_command.py
import os.path
import re
from six import assertRegex
from . import *
class TestCommand(IntegrationTest):
def __init__(self, *args, **kwargs):
IntegrationTest.__init__(
self, os.path.join(examples_dir, '07_commands'), *args, **kwargs
)
def test_hello(self):
assertRegex(self, self.build('hello'),
re.compile(r'^\s*hello$', re.MULTILINE))
def test_world(self):
assertRegex(self, self.build('world'),
re.compile(r'^\s*world$', re.MULTILINE))
def test_script(self):
assertRegex(self, self.build('script'),
re.compile(r'^\s*hello, world!$', re.MULTILINE))
self.assertExists(output_file('file'))
def test_alias(self):
output = self.build('hello-world')
assertRegex(self, output, re.compile(r'^\s*hello$', re.MULTILINE))
assertRegex(self, output, re.compile(r'^\s*world$', re.MULTILINE))
@skip_if_backend('msbuild')
class TestRunExecutable(IntegrationTest):
def __init__(self, *args, **kwargs):
IntegrationTest.__init__(self, 'run_executable', *args, **kwargs)
def test_env_run(self):
self.assertExists(output_file('file.txt'))
def test_cxx(self):
assertRegex(self, self.build('cxx'),
re.compile(r'^\s*hello from c\+\+!$', re.MULTILINE))
def test_java(self):
assertRegex(self, self.build('java'),
re.compile(r'^\s*hello from java!$', re.MULTILINE))
def test_java_classlist(self):
assertRegex(self, self.build('java-classlist'),
re.compile(r'^\s*hello from java!$', re.MULTILINE))
def test_python(self):
assertRegex(self, self.build('python'),
re.compile(r'^\s*hello from python!$', re.MULTILINE))
| StarcoderdataPython |
1992414 | import pandas as pd
import datetime
today = datetime.date.today()
def load_today(db):
q = db.PaperDB.select().where(db.PaperDB.pubdate==today)
df = pd.DataFrame(list(q.dicts()))
return df
def load_this_week(db):
delta = (today+datetime.timedelta(days=1)) - datetime.timedelta(days=8)
q = db.PaperDB.select().where(db.PaperDB.pubdate.between(delta,today))
df = pd.DataFrame(list(q.dicts()))
return df
def load_this_month(db):
delta = (today+datetime.timedelta(days=1)) - datetime.timedelta(days=31)
q = db.PaperDB.select().where(db.PaperDB.pubdate.between(delta,today))
df = pd.DataFrame(list(q.dicts()))
return df
def load_year(db,year):
q = db.PaperDB.select().where(db.PaperDB.pubyear == year)
papers = pd.DataFrame(list(q.dicts()))
return papers
def load_between_years(db,y1,y2):
q = db.PaperDB.select().where(db.PaperDB.pubyear.between(y1,y2))
df = list(q.dicts())
return df
def load_all(db):
q = db.PaperDB.select()
df = pd.DataFrame(list(q.dicts()))
return df
def check_first_last_affiliation(row,string_contains):
if string_contains in row[0] or string_contains in row[-1]:
return True
else:
return False
def filter_paper_contains(pm,string_contains,first_and_last_affiliation=True):
print("Getting affiliates from %s..." % string_contains)
if first_and_last_affiliation:
m_papers = pm.papers_period['affiliations'].str.split(";").apply(check_first_last_affiliation,args=(string_contains,))
else:
m_papers = pm.papers_period['affiliations'].str.contains(string_contains)
m_journal = pm.papers_period['journal_lower'].apply(lambda x: any([k.lower() == x for k in jrl.filter_journals]))
m_combined = (m_papers & m_journal)
if m_combined.any():
papers = pm.papers_period[m_combined]
vectors = pm.vectors_period.loc[papers.index]
dfout = pm.calculate_similarities(papers,vectors)
else:
dfout = pd.DataFrame()
return dfout
| StarcoderdataPython |
4900026 |
detected_emotions = ['Angry', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
emotions_values = {'Angry': 0, 'Disgust': 1, 'Fear': 2, 'Happy': 3,
'Sad': 4, 'Surprise': 5, 'Neutral': 6}
model_weights_path = 'model/weights/fer2013_weights.h5'
verbose = True
| StarcoderdataPython |
255887 | import splunk.admin as admin
class ConfigApp(admin.MConfigHandler):
def setup(self):
if self.requestedAction == admin.ACTION_EDIT:
for arg in ['location', 'optimize', 'proxy']:
self.supportedArgs.addOptArg(arg)
def handleList(self, confInfo):
conf_dict = self.readConf("pyden")
if conf_dict is not None:
for stanza, settings in conf_dict.items():
if stanza == "appsettings":
for key, val in settings.items():
if key in ['optimize']:
if int(val) == 1:
val = '1'
else:
val = '0'
if key in ['location', 'proxy'] and val in [None, '']:
val = ''
confInfo[stanza].append(key, val)
def handleEdit(self, confInfo):
if self.callerArgs.data['proxy'][0] in [None, '']:
self.callerArgs.data['proxy'][0] = ''
if int(self.callerArgs.data['optimize'][0]) == 1:
self.callerArgs.data['optimize'][0] = '1'
else:
self.callerArgs.data['optimize'][0] = '0'
if self.callerArgs.data['location'][0] in [None, '']:
self.callerArgs.data['location'][0] = ''
self.writeConf('pyden', 'appsettings', self.callerArgs.data)
# initialize the handler
admin.init(ConfigApp, admin.CONTEXT_NONE)
| StarcoderdataPython |
200236 | <gh_stars>0
from setuptools import dist, setup, Extension
# bootstrap numpy; can we workaround this?
dist.Distribution().fetch_build_eggs(["numpy>=1.14.5"])
# should be fine now
import numpy as np
def readme():
with open("README.rst") as f:
return f.read()
setup(
name="egrm",
version="0.1",
author="<NAME>",
author_email="<EMAIL>",
description="Expected Genetic Relationship Matrix",
long_description=readme(),
long_description_content_type="text/markdown",
url="https://github.com/Ephraim-usc/egrm.git",
packages=["egrm"],
python_requires=">=3",
install_requires=[
"numpy>=1.14.5",
"pandas>=1.2.0",
"tskit",
"tqdm",
],
scripts=[
"bin/trees2egrm",
],
ext_modules=[
Extension("matrix", ["src/matrix.c"], include_dirs=[np.get_include()]),
],
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
keywords="genetics genome SNP coalescence",
)
| StarcoderdataPython |
6560417 | '''
Test 1 preaccept, 1 sni, and 1 cert callback (with delay)
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
Test.Summary = '''
Test different combinations of TLS handshake hooks to ensure they are applied consistently.
'''
Test.SkipUnless(Condition.HasProgram("grep", "grep needs to be installed on system for this test to work"))
ts = Test.MakeATSProcess("ts", select_ports=False)
server = Test.MakeOriginServer("server")
request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
# desired response form the origin server
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
ts.addSSLfile("ssl/server.pem")
ts.addSSLfile("ssl/server.key")
ts.Variables.ssl_port = 4443
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'ssl_hook_test',
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
# enable ssl port
'proxy.config.http.server_ports': '{0}:ssl'.format(ts.Variables.ssl_port),
'proxy.config.ssl.client.verify.server': 0,
'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2',
})
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
ts.Disk.remap_config.AddLine(
'map https://example.com:4443 http://127.0.0.1:{0}'.format(server.Variables.Port)
)
Test.prepare_plugin(os.path.join(Test.Variables.AtsTestToolsDir, 'plugins', 'ssl_hook_test.cc'), ts, '-cert=1 -sni=1 -preaccept=1')
tr = Test.AddTestRun("Test one sni, one preaccept, and one cert hook")
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts, ready=When.PortOpen(ts.Variables.ssl_port))
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.Processes.Default.Command = 'curl -k -H \'host:example.com:{0}\' https://127.0.0.1:{0}'.format(ts.Variables.ssl_port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "gold/preaccept-1.gold"
ts.Streams.stderr = "gold/ts-preaccept1-sni1-cert1.gold"
snistring = "SNI callback 0"
preacceptstring = "Pre accept callback 0"
certstring = "Cert callback 0"
ts.Streams.All = Testers.ContainsExpression(
"\A(?:(?!{0}).)*{0}(?!.*{0}).*\Z".format(snistring), "SNI message appears only once", reflags=re.S | re.M)
# the preaccept may get triggered twice because the test framework creates a TCP connection before handing off to traffic_server
ts.Streams.All += Testers.ContainsExpression("\A(?:(?!{0}).)*{0}.*({0})?(?!.*{0}).*\Z".format(
preacceptstring), "Pre accept message appears only once or twice", reflags=re.S | re.M)
ts.Streams.All += Testers.ContainsExpression("\A(?:(?!{0}).)*{0}(?!.*{0}).*\Z".format(certstring),
"Cert message appears only once", reflags=re.S | re.M)
tr.Processes.Default.TimeOut = 5
tr.TimeOut = 5
| StarcoderdataPython |
3413758 | #!/usr/bin/env python
'''
I was in the middle of implementing string-based binary long division,
then just tried it in the console. Feels like cheating, but the problem is
easy with the right tools, I guess.
'''
print sum(int(s) for s in "{}".format(2**1000))
| StarcoderdataPython |
8082942 | <filename>eepackages/utils.py
from ctypes import ArgumentError
from itertools import repeat
import multiprocessing
import os
from typing import Any, Callable, Dict, Optional
import requests
import shutil
from pathlib import Path
import math
import ee
import ee
from retry import retry
# /***
# * The script computes surface water mask using Canny Edge detector and Otsu thresholding
# * See the following paper for details: http://www.mdpi.com/2072-4292/8/5/386
# *
# * Author: <NAME> (<EMAIL>)
# * Contributors: <NAME> (<EMAIL>) - re-implemented otsu() using ee.Array
# *
# * Usage:
# *
# * var thresholding = require('users/gena/packages:thresholding')
# *
# * var th = thresholding.computeThresholdUsingOtsu(image, scale, bounds, cannyThreshold, cannySigma, minValue, ...)
# *
# */
# /***
# * Return the DN that maximizes interclass variance in B5 (in the region).
# */
def otsu(histogram):
histogram = ee.Dictionary(histogram)
counts = ee.Array(histogram.get('histogram'))
means = ee.Array(histogram.get('bucketMeans'))
size = means.length().get([0])
total = counts.reduce(ee.Reducer.sum(), [0]).get([0])
sum = means.multiply(counts).reduce(ee.Reducer.sum(), [0]).get([0])
mean = sum.divide(total)
indices = ee.List.sequence(1, size)
# Compute between sum of squares, where each mean partitions the data.
def f(i):
aCounts = counts.slice(0, 0, i)
aCount = aCounts.reduce(ee.Reducer.sum(), [0]).get([0])
aMeans = means.slice(0, 0, i)
aMean = aMeans.multiply(aCounts).reduce(
ee.Reducer.sum(), [0]).get([0]).divide(aCount)
bCount = total.subtract(aCount)
bMean = sum.subtract(aCount.multiply(aMean)).divide(bCount)
return aCount.multiply(aMean.subtract(mean).pow(2)).add(bCount.multiply(bMean.subtract(mean).pow(2)))
bss = indices.map(f)
# Return the mean value corresponding to the maximum BSS.
return means.sort(bss).get([-1])
# /***
# * Compute a threshold using Otsu method (bimodal)
# */
def computeThresholdUsingOtsu(image, scale, bounds, cannyThreshold, cannySigma, minValue, debug=False, minEdgeLength=None, minEdgeGradient=None, minEdgeValue=None):
# clip image edges
mask = image.mask().gt(0).clip(bounds).focal_min(
ee.Number(scale).multiply(3), 'circle', 'meters')
# detect sharp changes
edge = ee.Algorithms.CannyEdgeDetector(image, cannyThreshold, cannySigma)
edge = edge.multiply(mask)
if minEdgeLength:
connected = edge.mask(edge).lt(
cannyThreshold).connectedPixelCount(200, True)
edgeLong = connected.gte(minEdgeLength)
# if debug:
# print('Edge length: ', ui.Chart.image.histogram(connected, bounds, scale, buckets))
# Map.addLayer(edge.mask(edge), {palette:['ff0000']}, 'edges (short)', false);
edge = edgeLong
# buffer around NDWI edges
edgeBuffer = edge.focal_max(ee.Number(scale), 'square', 'meters')
if minEdgeValue:
edgeMin = image.reduceNeighborhood(
ee.Reducer.min(), ee.Kernel.circle(ee.Number(scale), 'meters'))
edgeBuffer = edgeBuffer.updateMask(edgeMin.gt(minEdgeValue))
# if debug:
# Map.addLayer(edge.updateMask(edgeBuffer), {palette:['ff0000']}, 'edge min', false);
if minEdgeGradient:
edgeGradient = image.gradient().abs().reduce(
ee.Reducer.max()).updateMask(edgeBuffer.mask())
edgeGradientTh = ee.Number(edgeGradient.reduceRegion(
ee.Reducer.percentile([minEdgeGradient]), bounds, scale).values().get(0))
# if debug:
# print('Edge gradient threshold: ', edgeGradientTh)
# Map.addLayer(edgeGradient.mask(edgeGradient), {palette:['ff0000']}, 'edge gradient', false);
# print('Edge gradient: ', ui.Chart.image.histogram(edgeGradient, bounds, scale, buckets))
edgeBuffer = edgeBuffer.updateMask(edgeGradient.gt(edgeGradientTh))
edge = edge.updateMask(edgeBuffer)
edgeBuffer = edge.focal_max(
ee.Number(scale).multiply(1), 'square', 'meters')
imageEdge = image.mask(edgeBuffer)
# if debug:
# Map.addLayer(imageEdge, {palette:['222200', 'ffff00']}, 'image edge buffer', false)
# compute threshold using Otsu thresholding
buckets = 100
hist = ee.Dictionary(ee.Dictionary(imageEdge.reduceRegion(
ee.Reducer.histogram(buckets), bounds, scale)).values().get(0))
threshold = ee.Algorithms.If(hist.contains(
'bucketMeans'), otsu(hist), minValue)
threshold = ee.Number(threshold)
if debug:
# // experimental
# // var jrc = ee.Image('JRC/GSW1_0/GlobalSurfaceWater').select('occurrence')
# // var jrcTh = ee.Number(ee.Dictionary(jrc.updateMask(edge).reduceRegion(ee.Reducer.mode(), bounds, scale)).values().get(0))
# // var water = jrc.gt(jrcTh)
# // Map.addLayer(jrc, {palette: ['000000', 'ffff00']}, 'JRC')
# // print('JRC occurrence (edge)', ui.Chart.image.histogram(jrc.updateMask(edge), bounds, scale, buckets))
# Map.addLayer(edge.mask(edge), {palette:['ff0000']}, 'edges', true);
print('Threshold: ', threshold)
# print('Image values:', ui.Chart.image.histogram(image, bounds, scale, buckets));
# print('Image values (edge): ', ui.Chart.image.histogram(imageEdge, bounds, scale, buckets));
# Map.addLayer(mask.mask(mask), {palette:['000000']}, 'image mask', false);
if minValue is not None:
return threshold.max(minValue)
else:
return threshold
def focalMin(image: ee.Image, radius: float):
erosion: ee.Image = image.Not().fastDistanceTransform(
radius).sqrt().lte(radius).Not()
return erosion
def focalMax(image: ee.Image, radius: float):
dilation: ee.Image = image.fastDistanceTransform(radius).sqrt().lte(radius)
return dilation
def focalMaxWeight(image: ee.Image, radius: float):
distance: ee.Image = image.fastDistanceTransform(radius).sqrt()
dilation: ee.Image = distance.where(distance.gte(radius), radius)
dilation = ee.Image(radius).subtract(dilation).divide(radius)
return dilation
@retry(tries=10, delay=5, backoff=10)
def _download_image(
index: int,
image_download_method: Callable,
image_list: ee.List,
name_prefix: str,
out_dir: Optional[Path],
download_kwargs: Optional[Dict[str, Any]]
) -> None:
"""
Hidden function to be used with download_image_collection. For the multiprocessing module, this
function must be on the main level in the file (not within function).
"""
if not out_dir:
out_dir: Path = Path.cwd() / "output"
if not download_kwargs:
download_kwargs: Dict[str, str] = {}
img: ee.Image = ee.Image(image_list.get(index))
url: str = image_download_method(img, download_kwargs)
r: requests.Response = requests.get(url, stream=True)
# File format chain
format: Optional[str] = download_kwargs.get("format")
if format:
if format == "GEO_TIFF":
extention: str = ".tif"
elif format == "NPY":
extention: str = ".npy"
elif format == "PNG":
extention: str = ".png"
elif format == "JPG":
extention: str = ".jpg"
else:
extention: str = ".tif.zip"
elif image_download_method == ee.Image.getDownloadURL:
extention: str = ".tif.zip"
elif image_download_method == ee.Image.getThumbURL:
extention: str = ".png"
else:
raise RuntimeError(
f"image download method {image_download_method} unknown.")
# Image naming chain
img_props: Dict[str, Any] = img.getInfo()['properties']
t0: Optional[int] = img_props.get("system:time_start")
img_index: Optional[int] = img_props.get("system:index")
if t0:
file_id: str = t0
elif img_index:
file_id: str = img_index
else:
file_id: str = index
# File name
filename: Path = out_dir / f"{name_prefix}{file_id}{extention}"
with open(filename, 'wb') as out_file:
shutil.copyfileobj(r.raw, out_file)
print("Done: ", index)
def _batch_download_ic(
ic: ee.ImageCollection,
img_download_method: Callable,
name_prefix: str,
out_dir: Optional[Path],
pool_size: int,
download_kwargs: Optional[Dict[str, Any]]
):
"""
does the actual work batch downloading images in an ee.ImageCollection using the python
multiprocessing module. Takes the img download method as a callable to implement different
ee.Image methods.
"""
if not out_dir.exists():
os.mkdir(out_dir)
ee.Initialize(opt_url='https://earthengine-highvolume.googleapis.com')
num_images: int = ic.size().getInfo()
image_list: ee.List = ic.toList(num_images)
with multiprocessing.Pool(pool_size) as pool:
pool.starmap(_download_image, zip(
range(num_images),
repeat(img_download_method),
repeat(image_list),
repeat(name_prefix),
repeat(out_dir),
repeat(download_kwargs)
))
# Reset to default API url
ee.Initialize()
def download_image_collection(
ic: ee.ImageCollection,
name_prefix: str = "ic_",
out_dir: Optional[Path] = None,
pool_size: int = 25,
download_kwargs: Optional[Dict[str, Any]] = None
) -> None:
"""
Download images in image collection. Only works for images in the collection that are < 32M
and grid dimension < 10000, documented at
https://developers.google.com/earth-engine/apidocs/ee-image-getdownloadurl.
args:
ic (ee.ImageCollection): ImageCollection to download.
name_prefix (str): prefix for the filename of the downloaded objects.
out_dir (Optional(Path)): pathlib object referring to output dir.
pool_size (int): multiprocessing pool size.
download_kwargs (Optional(Dict(str, Any))): keyword arguments used in
[getDownloadUrl](https://developers.google.com/earth-engine/apidocs/ee-image-getdownloadurl).
"""
_batch_download_ic(ic, ee.Image.getDownloadURL,
name_prefix, out_dir, pool_size, download_kwargs)
def download_image_collection_thumb(
ic: ee.ImageCollection,
name_prefix: str = "ic_",
out_dir: Optional[Path] = None,
pool_size: int = 25,
download_kwargs: Optional[Dict[str, Any]] = None
) -> None:
"""
Download thumb images in and image collection. Only works for images in the collection that are < 32M
and grid dimension < 10000, documented at
https://developers.google.com/earth-engine/apidocs/ee-image-getthumburl.
args:
ic (ee.ImageCollection): ImageCollection to download.
name_prefix (str): prefix for the filename of the downloaded objects.
out_dir (Optional(Path)): pathlib object referring to output dir.
pool_size (int): multiprocessing pool size.
download_kwargs (Optional(Dict(str, Any))): keyword arguments used in
[getDownloadUrl](https://developers.google.com/earth-engine/apidocs/ee-image-getthumburl).
"""
_batch_download_ic(ic, ee.Image.getThumbURL, name_prefix,
out_dir, pool_size, download_kwargs)
def radians(img):
"""Converts image from degrees to radians"""
return img.toFloat().multiply(math.pi).divide(180)
def hillshade(az, ze, slope, aspect):
"""Computes hillshade"""
azimuth = radians(ee.Image.constant(az))
zenith = radians(ee.Image.constant(90).subtract(ee.Image.constant(ze)))
return azimuth \
.subtract(aspect).cos().multiply(slope.sin()).multiply(zenith.sin()) \
.add(zenith.cos().multiply(slope.cos()))
def hillshadeRGB(image, elevation, weight=1, height_multiplier=5, azimuth=0, zenith=45,
contrast=0, brightness=0, saturation=1, castShadows=False, customTerrain=False):
"""Styles RGB image using hillshading, mixes RGB and hillshade using HSV<->RGB transform"""
hsv = image.visualize().unitScale(0, 255).rgbToHsv()
z = elevation.multiply(ee.Image.constant(height_multiplier))
terrain = ee.Algorithms.Terrain(z)
slope = radians(terrain.select(['slope'])).resample('bicubic')
aspect = radians(terrain.select(['aspect'])).resample('bicubic')
if customTerrain:
raise NotImplementedError(
'customTerrain argument is not implemented yet')
hs = hillshade(azimuth, zenith, slope, aspect).resample('bicubic')
if castShadows:
hysteresis = True
neighborhoodSize = 256
hillShadow = ee.Algorithms.HillShadow(elevation, azimuth,
ee.Number(90).subtract(zenith), neighborhoodSize, hysteresis).float()
hillShadow = ee.Image(1).float().subtract(hillShadow)
# opening
# hillShadow = hillShadow.multiply(hillShadow.focal_min(3).focal_max(6))
# cleaning
hillShadow = hillShadow.focal_mode(3)
# smoothing
hillShadow = hillShadow.convolve(ee.Kernel.gaussian(5, 3))
# transparent
hillShadow = hillShadow.multiply(0.7)
hs = hs.subtract(hillShadow).rename('shadow')
intensity = hs.multiply(ee.Image.constant(weight)) \
.add(hsv.select('value').multiply(ee.Image.constant(1)
.subtract(weight)))
sat = hsv.select('saturation').multiply(saturation)
hue = hsv.select('hue')
result = ee.Image.cat(hue, sat, intensity).hsvToRgb() \
.multiply(ee.Image.constant(1).float().add(contrast)).add(ee.Image.constant(brightness).float())
if customTerrain:
mask = elevation.mask().focal_min(2)
result = result.updateMask(mask)
return result
def getIsolines(image, levels=None):
"""Adds isolines to an ee.Image"""
def addIso(image, level):
crossing = image.subtract(level).focal_median(3).zeroCrossing()
exact = image.eq(level)
return ee.Image(level).float().mask(crossing.Or(exact)).set({'level': level})
if not levels:
levels = ee.List.sequence(0, 1, 0.1)
levels = ee.List(levels)
isoImages = ee.ImageCollection(levels.map(
lambda l: addIso(image, ee.Number(l))))
return isoImages
| StarcoderdataPython |
1810251 | <reponame>yuanchi2807/ray<filename>python/ray/data/impl/fast_repartition.py<gh_stars>1-10
import ray
from ray.data.block import BlockAccessor
from ray.data.impl.block_list import BlockList
from ray.data.impl.plan import ExecutionPlan
from ray.data.impl.progress_bar import ProgressBar
from ray.data.impl.remote_fn import cached_remote_fn
from ray.data.impl.shuffle import _shuffle_reduce
from ray.data.impl.stats import DatasetStats
def fast_repartition(blocks, num_blocks):
from ray.data.dataset import Dataset
wrapped_ds = Dataset(
ExecutionPlan(blocks, DatasetStats(stages={}, parent=None)), 0, lazy=False
)
# Compute the (n-1) indices needed for an equal split of the data.
count = wrapped_ds.count()
dataset_format = wrapped_ds._dataset_format()
indices = []
cur_idx = 0
for _ in range(num_blocks - 1):
cur_idx += count / num_blocks
indices.append(int(cur_idx))
assert len(indices) < num_blocks, (indices, num_blocks)
if indices:
splits = wrapped_ds.split_at_indices(indices)
else:
splits = [wrapped_ds]
# TODO(ekl) include stats for the split tasks. We may also want to
# consider combining the split and coalesce tasks as an optimization.
# Coalesce each split into a single block.
reduce_task = cached_remote_fn(_shuffle_reduce).options(num_returns=2)
reduce_bar = ProgressBar("Repartition", position=0, total=len(splits))
reduce_out = [
reduce_task.remote(*s.get_internal_block_refs())
for s in splits
if s.num_blocks() > 0
]
# Early-release memory.
del splits, blocks, wrapped_ds
new_blocks, new_metadata = zip(*reduce_out)
new_blocks, new_metadata = list(new_blocks), list(new_metadata)
new_metadata = reduce_bar.fetch_until_complete(new_metadata)
reduce_bar.close()
# Handle empty blocks.
if len(new_blocks) < num_blocks:
from ray.data.impl.arrow_block import ArrowBlockBuilder
from ray.data.impl.pandas_block import PandasBlockBuilder
from ray.data.impl.simple_block import SimpleBlockBuilder
num_empties = num_blocks - len(new_blocks)
if dataset_format == "arrow":
builder = ArrowBlockBuilder()
elif dataset_format == "pandas":
builder = PandasBlockBuilder()
else:
builder = SimpleBlockBuilder()
empty_block = builder.build()
empty_meta = BlockAccessor.for_block(empty_block).get_metadata(
input_files=None, exec_stats=None
) # No stats for empty block.
empty_blocks, empty_metadata = zip(
*[(ray.put(empty_block), empty_meta) for _ in range(num_empties)]
)
new_blocks += empty_blocks
new_metadata += empty_metadata
return BlockList(new_blocks, new_metadata), {}
| StarcoderdataPython |
17347 | <reponame>coblo/smartlicense<filename>smartlicense/settings/__init__.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Django settings for smartlicense project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
from os.path import dirname, abspath, join
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = dirname(dirname(dirname(abspath(__file__))))
SCRATCH_DIR = join(BASE_DIR, '.scratch')
SCRACTH_DB = join(SCRATCH_DIR, 'scratch.sqlite3')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ADMINS = [('admin', '<EMAIL>')]
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'martor',
'suit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_markup',
'django_object_actions',
'smartlicense.apps.SmartLicenseConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'smartlicense.urls'
MEDIA_ROOT = SCRATCH_DIR
MEDIA_URL = '/media/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [join(BASE_DIR, 'smartlicense', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'smartlicense.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': SCRACTH_DB,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
# Mator
MARTOR_ENABLE_CONFIGS = {
'imgur': 'false', # to enable/disable imgur/custom uploader.
'mention': 'false', # to enable/disable mention
'jquery': 'true',
# to include/revoke jquery (require for admin default django)
}
# Custom project settings
NODE_IP = '127.0.0.1'
NODE_PORT = '9718'
NODE_USER = 'testuser'
NODE_PWD = '<PASSWORD>'
STREAM_SMART_LICENSE = 'smart-license'
STREAM_SMART_LICENSE_ATTESTATION = 'smart-license'
STREAM_ISCC = 'iscc'
SUIT_CONFIG = {
'ADMIN_NAME': 'Smart License Demo',
'CONFIRM_UNSAVED_CHANGES': False,
'MENU_OPEN_FIRST_CHILD': True,
'SEARCH_URL': 'admin:smartlicense_mediacontent_changelist',
'LIST_PER_PAGE': 18,
'MENU': (
{'label': 'Smart Licenses', 'models': (
{'model': 'smartlicense.mediacontent'},
{'model': 'smartlicense.smartlicense'},
)},
{'label': 'Transactions', 'models': (
{'model': 'smartlicense.attestation'},
{'model': 'smartlicense.tokentransaction'},
)},
{'label': 'Configuration', 'models': (
{'model': 'smartlicense.template'},
{'model': 'smartlicense.rightsmodule'},
{'model': 'smartlicense.activationmode'},
)}
)
}
# Make sure deployment overrides settings
try:
from smartlicense.settings.config import *
except Exception:
print(
'No custom configuration found. Create a smartlicense/settings/config.py')
import sys
sys.exit(0)
| StarcoderdataPython |
5048357 | class Json_Data():
#ACCEPTS A STRING
def search_results_json(item, function, result):
json_data = "[{\"function\": \""+ function +"\"}, {\"result\": \""+ str(result) +"\"}, "
if(function == "find"):
item = item.split(", \n")
for item_type in item:
json_data += "{\"item\": \"" + item_type + "\"}, "
size = len(json_data) - 2
json_data = json_data[:size] + json_data[size+2:]
json_data += " ]"
elif(function == "search"):
json_data += "{\"item\": \"" + item + "\"}]"
return json_data
| StarcoderdataPython |
6492967 | <filename>lib/Engine.py
from re import search
from requests import get
from github import Github
from time import time,sleep
from pybase64 import b64decode
from bs4 import BeautifulSoup
from termcolor import colored
from lib.Functions import shannon_entropy
from lib.Globals import hexchar, base64char, search_regex
from lib.Globals import github_access_token, Headers, Color
class Engine:
def __init__(self):
self.conn = Github(github_access_token)
self.query = []
self.orchestra = {'repo': False, 'code': False, 'commit': False}
def git_rate_limit(self):
T = time()
left_to_try = self.conn.rate_limiting[0]
if left_to_try <= 2:
death_sleep = int(int(self.conn.rate_limiting_resettime)-int(T) + int(3))
if death_sleep < 2:
death_sleep = 7
sleep(death_sleep)
else:
return
sleep(0.2)
def return_query(self, input_wordlist: list, argv) -> list:
print(f"{Color.information} Generating payloads")
if argv.repository:
dork_type = "repo:" + argv.repository
elif argv.user:
dork_type = "user:" + argv.user
elif argv.domain:
dork_type = argv.domain.split('.')[0] if not argv.full_domain else argv.domain
else:
assert False, "Error occured"
for line in input_wordlist:
print(f"{Color.information} Generating payload for: {colored(line, color='cyan')}")
git_query = dork_type + " " + line + " " + "in:readme,description,name"
self.query.append(git_query.lstrip(' '))
return self.query
def search_orchestrator(self, query: str) -> dict:
try:
repo_search = self.conn.search_repositories(query)
repo_count = repo_search.totalCount
self.orchestra['repo'] = True
self.git_rate_limit()
except Exception as E:
print("{} ERROR: {}".format(Color.bad,E))
try:
code_search = self.conn.search_code(query)
code_count = code_search.totalCount
self.orchestra['code'] = True
self.git_rate_limit()
except Exception as E:
print("{} ERROR: {}".format(Color.bad,E))
try:
commit_search = self.conn.search_commits(query)
commit_count = code_search.totalCount
self.orchestra['code'] = True
self.git_rate_limit()
except Exception as E:
print("{} ERROR: {}".format(Color.bad,E))
return self.orchestra
def search_repo(self, query: str) -> list:
repo_temp_list = []
if self.orchestra['repo']:
print(f"{Color.information} Searching for data in Repositories!")
repo_search = self.conn.search_repositories(query)
self.git_rate_limit()
for unit_repo in repo_search:
repo = self.conn.get_repo(unit_repo.full_name)
temp_x = "Fetching data from this repo: {}\n".format(colored(repo.full_name, color='cyan'))
repo_temp_list.append(temp_x.lstrip(' '))
print(Color.good + " " + temp_x.rstrip('\n'))
repo_list = repo.get_contents("")
while repo_list:
repo_file = repo_list.pop(0)
if repo_file.type == "dir":
repo_list.extend(repo.get_contents(repo_file.path))
else:
try:
repo_file_lines = b64decode(repo_file.content).decode('UTF-8').split('\n')
except Exception as E:
print(E,E.__class__)
continue
temp_x = "File: {}\n".format(colored(repo_file, color='cyan'))
repo_temp_list.append(temp_x.lstrip(' '))
line_searched = False
for repoline in repo_file_lines:
for regex in search_regex:
if search(regex, repoline):
temp_x = f"{colored(repoline, color='red')} "
temp_x += "<--- File from repo regex \n".rjust(150-len(temp_x))
repo_temp_list.append(temp_x.lstrip(' '))
line_searched = True
if line_searched:
line_searched = False
continue
for repoword in repoline.split(' '):
temp_x = f"{colored(repoword, color='red')} "
temp_x += "<--- From Repo entropy! \n".rjust(150-len(temp_x))
if shannon_entropy(repoword, base64char) >= float(4.5):
repo_temp_list.append(temp_x.lstrip(' '))
if shannon_entropy(repoword, hexchar) >= float(4.1):
repo_temp_list.append(temp_x.lstrip(' '))
self.orchestra['repo'] = False
return repo_temp_list
def search_code(self, query: str) -> list:
code_temp_list = []
if self.orchestra['code']:
print(f"{Color.information} Searching for data in Codes")
code_search = self.conn.search_code(query)
for unit_code in code_search:
temp_x = "Name:{}, Repo:{}, URL: {}\n".format(colored(unit_code.name, color='cyan'), colored(unit_code.repository.full_name, color='cyan'), colored(unit_code.download_url, color='cyan'))
print("{} Searching for code in {} from repository {}".format(Color.good, colored(unit_code.name, color='cyan'), colored(unit_code.repository.full_name, color='cyan')))
code_temp_list.append(temp_x.lstrip(' '))
self.git_rate_limit()
code = b64decode(unit_code.content).decode('UTF-8').split('\n')
line_searched = False
for code_line in code:
for regex in search_regex:
if search(regex, code_line):
temp_x = f"{colored(code_line, color='red')} "
temp_x += " <--- File from code regex \n".rjust(150-len(temp_x))
code_temp_list.append(temp_x.lstrip(' '))
line_searched = True
if line_searched:
line_searched = False
continue
for code_word in code_line.split(' '):
temp_x = f"{colored(code_word, color='red')} "
temp_x += "<--- From code entropy! \n".rjust(150-len(temp_x))
if shannon_entropy(code_word, base64char) >= float(4.6):
code_temp_list.append(temp_x.lstrip(' '))
if shannon_entropy(code_word, hexchar) >= float(4):
code_temp_list.append(temp_x.lstrip(' '))
self.orchestra['code'] = False
return code_temp_list
def search_commit(self, query: str) -> list:
commit_temp_list = []
if self.orchestra['commit']:
print(f"{Color.information} Searching for data in Commits")
commit_search = self.conn.search_commit(query)
for unit_commit in commit_search:
commit_url = unit_commit.html_url
temp_x = "Repo:{} Commit:{}\n".format(colored("/".join(commit_url.split('/')[3:5]), color='cyan'), colored(commit_url.split('/')[6:][-1]), color='cyan')
print(Color.good + " " + temp_x.rstrip('\n'))
commit_temp_list.append(temp_x.lstrip(' '))
self.git_rate_limit()
commit_response = get(commit_url)
commit_soup = BeautifulSoup(commit_response.content, 'html.parser')
commit_data = commit_soup.find_all("span")
line_searced = False
for commit_line in commit_data:
for regex in search_regex:
if search(regex, commit_line):
temp_x = f"{colored(commit_line, color='red')}"
temp_x += "<--- File from commit regex \n".rjust(148-len(temp_x))
commit_temp_list.append(temp_x.lstrip(' '))
line_searched = True
if line_searched:
line_searched = False
continue
for commit_word in commit_line.split(' '):
temp_x = f"{colored(commit_word, color='red')} "
temp_x += "<--- From commit entropy! \n".rjust(148-len(temp_x))
if shannon_entropy(commit_word, base64char) >= float(4.6):
code_temp_list.append(temp_x.lstrip(' '))
if shannon_entropy(commit_word, hexchar) >= float(4.1):
code_temp_list.append(temp_x.lstrip(' '))
self.orchestra['commit'] = False
return commit_temp_list
| StarcoderdataPython |
12824241 | # 装饰器作用:对已有函数进行额外功能扩展,本质上是一个闭包函数,也是一个函数嵌套
# 装饰器的执行时机:装饰器在加载代码时已经执行
# 装饰器特点:
# 1.不修改已有函数的源代码
# 2.不修改已有函数调用方式
# 3.给已有函数添加额外功能
def decorator(func): # 如果一个闭包函数有且仅有一个函数参数,那这个闭包函数称为装饰器
def inner():
# 在内部函数对已有函数进行调用
print("装饰器已经执行了") # 测试装饰器的执行时机:作为模块导入时,不用执行comment函数,装饰器在加载代码时已经执行
print("添加登录验证")
func()
return inner
@decorator # 等价于comment = decorator(comment), comment=inner
def comment():
print("发表评论")
# 要让comment函数实现以下功能
# 1.添加登录验证
# 2.发表评论
# 3.调用方式不变
# 调用装饰器对comment进行装饰,此时comment=inner当然可以用魔法糖@简写
# comment = decorator(comment)
comment()
| StarcoderdataPython |
11605 | <reponame>BeltetonJosue96/Ejercicio3Python
class Ciclo:
def __init__(self):
self.cicloNew = ()
self.respu = ()
self.a = ()
self.b = ()
self.c = ()
def nuevoCiclo(self):
cicloNew = []
print(" ")
print("Formulario de ingreso de ciclos")
print("-----------------------------------")
respu = input("¿Quiere resgistrar un ciclo? (S/F): ")
while respu == "S" or respu == "s":
print("Ingrese el numero de semestre (1 o 2): ")
a = int(input())
print("Ingrese año: ")
b = int(input())
cicloNew.append((a, b))
respu = input("¿Quiere resgistrar otro ciclo? (S/F): ")
print(" ")
print("Datos guardados")
print("-----------------------------------")
for x in range(len(cicloNew)):
print("[Numero de semestre: ", cicloNew[x][0], "] [año: ", cicloNew[x][1],"]")
print(" ")
print(" ")
print(" ")
return None
Ciclo().nuevoCiclo() | StarcoderdataPython |
348486 | <reponame>tirmisula/Hybrid-NN-Movie-Recommendation-System<gh_stars>0
import os
try:
os.environ["PYSPARK_PYTHON"]="/usr/local/Cellar/python3/3.6.3/bin//python3"
except:
pass
os.environ["PYSPARK_PYTHON"]="/usr/local/Cellar/python3/3.7.3/bin/python3"
import pyspark
import random
import pandas as pd
import math
import spark_split
import numpy as np
from pyspark.mllib.recommendation import ALS
from pyspark.mllib.recommendation import MatrixFactorizationModel
def dcg_at_k(r, k=10, method=0):
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0
def ndcg_at_k(r, k=10, method=0):
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if not dcg_max:
return 0
return dcg_at_k(r, k, method) / dcg_max
conf = pyspark.SparkConf().setAppName("App")
#############High memory cost IF dataset is large################
conf = (conf.setMaster('local[*]')
.set('spark.executor.memory', '4G')
.set('spark.driver.memory', '45G')
.set('spark.driver.maxResultSize', '10G'))
########################################################
sc = pyspark.SparkContext(conf=conf)
def obsolete():
test_large_ratings_file="testset.csv"
test_large_ratings_raw_data = sc.textFile(test_large_ratings_file)
test_large_ratings_raw_data_header = test_large_ratings_raw_data.take(1)[0]
test_large_ratings_data = test_large_ratings_raw_data.filter(lambda line: line!=test_large_ratings_raw_data_header)\
.map(lambda line: line.split(",")).map(lambda tokens: (int(tokens[0]),int(tokens[1]),float(tokens[2]))).cache()
train_large_ratings_file="trainset.csv"
train_large_ratings_raw_data = sc.textFile(train_large_ratings_file)
train_large_ratings_raw_data_header = train_large_ratings_raw_data.take(1)[0]
train_large_ratings_data = train_large_ratings_raw_data.filter(lambda line: line!=train_large_ratings_raw_data_header)\
.map(lambda line: line.split(",")).map(lambda tokens: (int(tokens[0]),int(tokens[1]),float(tokens[2]))).cache()
def dat2csv():
'''
processing .dat file to csv file
'''
ratings_title = ['UserID', 'MovieID', 'ratings', 'timestamps']
ratings = pd.read_table('ml-1m/ratings.dat', sep='::',
header=None, names=ratings_title, engine='python')
ratings = ratings.filter(regex='UserID|MovieID|ratings')
ratings.to_csv('ml-1m/ratings.csv', index=False, sep=',')
ratings_title = ['movieId','title','genres']
ratings = pd.read_table('ml-1m/movies.dat', sep='::',
header=None, names=ratings_title, engine='python')
ratings.to_csv('ml-1m/movies.csv', index=False, sep=',')
large_ratings_file="ml-1m/ratings.csv"
large_ratings_raw_data = sc.textFile(large_ratings_file)
large_ratings_raw_data_header = large_ratings_raw_data.take(1)[0]
large_ratings_data = large_ratings_raw_data.filter(lambda line: line!=large_ratings_raw_data_header)\
.map(lambda line: line.split(",")).map(lambda tokens: (int(tokens[0]),int(tokens[1]),float(tokens[2]))).cache()
# large_ratings_data = large_ratings_data.map(lambda x: (x[0],(x[1],x[2])))
# ll = sorted(large_ratings_data.groupByKey().mapValues(list).take(10))
# training_RDD, test_RDD = train_large_ratings_data, test_large_ratings_data
# df = large_ratings_data.toDF(["userID", "movieID","rating"])
# df.write.partitionBy("userID")
# llpp = large_ratings_data.partitionBy(283228, lambda k: k[0])
def nonStratifiedSplit():
'''
If data set is big enough
'''
training_RDD, test_RDD = large_ratings_data.randomSplit([8, 2], seed=0)
return training_RDD, test_RDD
def stratifiedSplit():
'''
split data Each user's data is porpotional in training and testing
'''
spark= pyspark.sql.SparkSession.builder.getOrCreate()
data_DF = spark.createDataFrame(large_ratings_data).toDF('UserID', 'MovieID', 'ratings')
DFL = spark_split.spark_stratified_split(
data_DF,
ratio=0.8,
min_rating=1,
filter_by="user",
col_user='UserID',
col_item='MovieID',
col_rating='ratings',
seed=42,
)
training_DF, test_DF = DFL[0], DFL[1]
if True:
training_RDD, test_RDD = training_DF.rdd.map(tuple).map(lambda x: (x[0],x[1],x[2])), \
test_DF.rdd.map(tuple).map(lambda x: (x[0],x[1],x[2]))
return training_RDD, test_RDD
########################################################
training_RDD, test_RDD = stratifiedSplit()
########################################################
# print('training_RDD\n')
# print(training_RDD.take(10))
# print('test_RDD\n')
# print(test_RDD.take(10))
# print('training_RDD_\n')
# print(training_RDD_.take(10))
# print('test_RDD_\n')
# print(test_RDD_.take(10))
def ALS_fit():
'''
Alternating Least Square train the training set and full set,\\
Save the model.
'''
# num_factors=10 num_iter=75 reg=0.05 learn_rate=0.005
complete_model = ALS.train(training_RDD, 10, seed=5,
iterations=10, lambda_=0.1)
# # Save and load model
complete_model.save(sc, "trainsetCollaborativeFilterSmall_")
complete_model = ALS.train(large_ratings_data, 8, seed=5,
iterations=10, lambda_=0.1)
# # Save and load model
complete_model.save(sc, "fullsetCollaborativeFilterSmall_")
fullset_model = MatrixFactorizationModel.load(sc, "fullsetCollaborativeFilterSmall_")
trainset_model = MatrixFactorizationModel.load(sc, "trainsetCollaborativeFilterSmall_")
def eval_rmse_mae():
'''
Print RMSE and MAE for testing set
'''
test_for_predict_RDD = test_RDD.map(lambda x: (x[0], x[1]))
predictions = trainset_model.predictAll(test_for_predict_RDD).map(lambda r: ((r[0], r[1]), r[2]))
rates_and_preds = test_RDD.map(lambda r: ((int(r[0]), int(r[1])), float(r[2]))).join(predictions)
print('predictions\n')
print(predictions.take(10))
print('rates_and_preds\n')
print(rates_and_preds.take(10))
rmse_error = math.sqrt(rates_and_preds.map(lambda r: (r[1][0] - r[1][1])**2).mean())
mae_error = rates_and_preds.map(lambda r: abs(r[1][0] - r[1][1])).mean()
print ('For testing data the RMSE is %s' % (rmse_error))
print ('For testing data the MAE is %s' % (mae_error))
complete_movies_file="ml-1m/movies.csv"
complete_movies_raw_data = sc.textFile(complete_movies_file)
complete_movies_raw_data_header = complete_movies_raw_data.take(1)[0]
complete_movies_data = complete_movies_raw_data.filter(lambda line: line!=complete_movies_raw_data_header)\
.map(lambda line: line.split(",")).map(lambda tokens: (int(tokens[0]),tokens[1],tokens[2])).cache()
complete_movies_titles = complete_movies_data.map(lambda x: (int(x[0]),x[1]))
def get_counts_and_averages(ID_and_ratings_tuple):
'''
movie reviews number and it's average score
'''
nratings = len(ID_and_ratings_tuple[1])
return ID_and_ratings_tuple[0], (nratings, float(sum(x for x in ID_and_ratings_tuple[1]))/nratings)
movie_ID_with_ratings_RDD = (large_ratings_data.map(lambda x: (x[1], x[2])).groupByKey())
movie_ID_with_avg_ratings_RDD = movie_ID_with_ratings_RDD.map(get_counts_and_averages)
movie_rating_counts_RDD = movie_ID_with_avg_ratings_RDD.map(lambda x: (x[0], x[1][0]))
def obsolete2():
new_user_ID = 1
# new_user_ratings = large_ratings_data.take(16)
new_user_ratings = large_ratings_data.filter(lambda x: x[0]==new_user_ID).collect()
new_user_ratings_ids = map(lambda x: x[1], new_user_ratings)
new_user_unrated_movies_RDD = (complete_movies_data.filter(lambda x: x[0] not in new_user_ratings_ids)\
.map(lambda x: (new_user_ID, x[0])))
print(new_user_unrated_movies_RDD.take(10))
new_user_recommendations_RDD = fullset_model.predictAll(new_user_unrated_movies_RDD)
print(new_user_recommendations_RDD.take(10))
new_user_recommendations_rating_RDD = new_user_recommendations_RDD.map(lambda x: (x.product, x.rating))
new_user_recommendations_rating_title_and_count_RDD = \
new_user_recommendations_rating_RDD.join(complete_movies_titles).join(movie_rating_counts_RDD)
print(new_user_recommendations_rating_title_and_count_RDD.take(3))
recm_RDD = new_user_recommendations_rating_title_and_count_RDD.map(lambda x: (x[1][0][0], (x[0], x[1][0][1], x[1][1])))
print(recm_RDD.sortByKey(ascending=False).take(10))
# (149988, ( (6.329273922211785, '<NAME>. (2009)') , 3) )
# large_ratings_data.union
# For PPT
print('user=12,movies=100 rating: {} \n'.format(trainset_model.predict(12,100)))
def recmForUser(userID=1):
'''
Given a userID\\
Return top 10 movies\\
Format: (rating,(movieID,movieTitle,movie rated times))
'''
# sc.parallelize(trainset_model.recommendProducts(12,10),10).map().join(complete_movies_data)
new_user_recommendations_rating_RDD = sc.parallelize(trainset_model.recommendProducts(user=userID,num=10))
new_user_recommendations_rating_RDD = new_user_recommendations_rating_RDD.map(lambda x: (x.product, x.rating))
new_user_recommendations_rating_title_and_count_RDD = \
new_user_recommendations_rating_RDD.join(complete_movies_titles).join(movie_rating_counts_RDD)
recm_RDD = new_user_recommendations_rating_title_and_count_RDD.map(lambda x: (x[1][0][0], (x[0], x[1][0][1], x[1][1])))
print(recm_RDD.sortByKey(ascending=False).take(10))
# print(trainset_model.recommendProducts(user=1996,num=10))
Precision = []
Recall = []
Fmeasure = []
ndcg = 0
# For fast test_RDD computing
# test_RDD = test_RDD.map(lambda x: (x[0], (x[1],x[2]))).groupByKey().cache()
def itemRecmMeasure():
'''
return measure score\\
NDCG,Precision,Recall,F2
'''
N = 100
global ndcg
# For fast test_RDD computing
test_RDD_ = test_RDD.map(lambda x: (x[0], (x[1],x[2]))).groupByKey().cache()
for UserID in range(1,N,1) :
print('process {}\n'.format(UserID))
dcg = np.zeros(10)
# For all users recommend, Compute precision recall F2 ...
try:
L10 = trainset_model.recommendProducts(user=UserID,num=10)
except:
# userID not in model, userID all in test
continue
userProductL = [x.product for x in L10]
# count testing item in it
# testing items for this user
# testing_itemsL = test_RDD.filter(lambda x: x[0]==UserID).map(lambda x: x[1]).collect()
testing_itemsL = list(map(lambda x: x[0],list(test_RDD_.filter(lambda x: x[0]==UserID).map(lambda x: x[1]).collect()[0])))
# testing_itemsL = list(map(lambda x: x[0],list(test_RDD.lookup(UserID).map(lambda x: x[1]).collect()[0])))
if len(testing_itemsL)==0:
# userID all in train
continue
# testing_itemsL ratings threshold
test_hit = set(userProductL).intersection(set(testing_itemsL))
# for hit_movie in test_hit:
i=0
for result in userProductL:
if result in testing_itemsL:
rate = trainset_model.predict(UserID,result)
if rate >= 4:
dcg[i] = 2
elif rate >=3:
dcg[i] = 1
else:
dcg[i] = 0
i += 1
dcg_user = ndcg_at_k(dcg)
print('dcg_user {} \n'.format(dcg_user))
ndcg = ndcg + dcg_user
# percentage testing_itemsL in userProductL
recmedtestitem4userLen = len(set(userProductL).intersection(set(testing_itemsL)))
Precision.append(recmedtestitem4userLen / 10)
# print('Precision {} \n'.format(Precision))
# calculate all testing items len for this user
Recall.append(recmedtestitem4userLen / len(testing_itemsL))
# compute F
if Precision[-1] + Recall[-1] == 0:
Fmeasure.append(-1)
continue
Fmeasure.append( (2 * Precision[-1] * Recall[-1]) / (Precision[-1] + Recall[-1]) )
print('NDCG {} \n'.format(ndcg/N))
print('Precision {} \n'.format(np.mean(Precision)))
print('Recall {} \n'.format(np.mean(Recall)))
print('F2 {} \n'.format(np.mean(list(filter(lambda x: x!=-1,Fmeasure))) ))
# ALS_fit()
eval_rmse_mae()
recmForUser(userID=1996)
itemRecmMeasure() | StarcoderdataPython |
8193859 | <reponame>coproc/PolyPieces<gh_stars>0
#------------ CODE INIT ------------
# make sure imports from ../src work
import os, sys
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(FILE_DIR, os.pardir, 'src'))
# simulate console output of expressions
_p_ = None
# print result of last expression
def _p():
global _p_
if _p_ is not None:
print(_p_.__repr__())
_p_ = None
#------------ CODE INIT ------------
from Polynomial import symbol
x = symbol()
poly = 3*x - 1
_p_= poly*poly
_p()
poly_3 = poly**3; print(poly_3)
_p_= poly_3(1)
_p()
from PolyPieces import PolyPieceFunc
# define density for uniform distribution over the interval [0,1]
uniformDensity = PolyPieceFunc((1, [0,1]))
# this is the Irwin-Hall distribution for n=1
print(uniformDensity)
_p()
_p()
# compute density of the sum of two uniformly distributed random variables (by convolution)
uniformDensitySum2 = uniformDensity.conv(uniformDensity)
# this is the Irwin-Hall distribution for n=2
print(uniformDensitySum2)
_p()
_p()
_p()
# compute density of the sum of two uniformly distributed random variables (by convolution)
uniformDensitySum2 = uniformDensity.conv(uniformDensity)
# and now the Irwin-Hall distributions for n=3,4
print(uniformDensitySum2.conv(uniformDensity))
_p()
_p()
_p()
_p()
print(uniformDensitySum2.conv(uniformDensity).conv(uniformDensity))
_p()
_p()
_p()
_p()
_p()
| StarcoderdataPython |
8070368 | <filename>train_procgen/test_select.py
"""
To run test: (default we do 50 batch rollouts)
$ python train_procgen/test_select.py --start_level 50 -id 0 --load_id 0 --use "randcrop"
$ python train_procgen/test_select.py --start_level 50 -id 0 --load_id 0 --use "cutout"
$
"""
import os
from os.path import join
import json
import numpy as np
import tensorflow as tf
from baselines.common.mpi_util import setup_mpi_gpus
from procgen import ProcgenEnv
from baselines.common.vec_env import (
VecExtractDictObs,
VecMonitor,
VecFrameStack,
VecNormalize
)
from baselines import logger
from mpi4py import MPI
import argparse
## random_ppo imports
import train_procgen
from train_procgen.random_ppo import safemean
from train_procgen.crop_ppo import RandCropCnnPolicy, sf01, constfn
from train_procgen.cutout_ppo import CutoutCnnPolicy
from train_procgen.cross_ppo import CrossCnnPolicy
from baselines.common.runners import AbstractEnvRunner
from collections import deque
class TestRunner(AbstractEnvRunner):
def __init__(self, *, env, model, nsteps, gamma, lam):
super().__init__(env=env, model=model, nsteps=nsteps)
self.lam = lam
self.gamma = gamma
##self.obs = rand_crop(self.obs) NO CROPPING OR CUTOUT AT TEST TIME
def run(self):
# Here, we init the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]
mb_states = self.states
epinfos = []
# For n in range number of steps
for _ in range(self.nsteps):
# Given observations, get action value and neglopacs
# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init
actions, values, self.states, neglogpacs = self.model.step(self.obs, self.states, self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
# Take actions in env and look the results
# Infos contains a ton of useful informations
self.obs[:], rewards, self.dones, infos = self.env.step(actions)
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo: epinfos.append(maybeepinfo)
mb_rewards.append(rewards)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, self.states, self.dones)
# discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.nsteps)):
if t == self.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
return (*map(sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)),
mb_states, epinfos)
def main():
num_envs = 64
learning_rate = 5e-4
ent_coef = .01
gamma = .999
lam = .95
nsteps = 256
nminibatches = 8
ppo_epochs = 3
clip_range = .2
total_timesteps = 1_000_000 ## now this counts steps in testing runs
use_vf_clipping = True
## From random_ppo.py
max_grad_norm = 0.5
vf_coef=0.5
L2_WEIGHT = 10e-4
FM_COEFF = 0.002
REAL_THRES = 0.1
parser = argparse.ArgumentParser(description='Process procgen testing arguments.')
parser.add_argument('--env_name', type=str, default='fruitbot')
parser.add_argument('--distribution_mode', type=str, default='easy', choices=["easy", "hard", "exploration", "memory", "extreme"])
parser.add_argument('--num_levels', type=int, default=1000)
## default starting_level set to 50 to test on unseen levels!
parser.add_argument('--start_level', type=int, default=50)
parser.add_argument('--run_id', '-id', type=int, default=0)
parser.add_argument('--load_id', type=int, default=0)
parser.add_argument('--nrollouts', '-nroll', type=int, default=50)
parser.add_argument('--use', type=str, default="randcrop")
args = parser.parse_args()
args.total_timesteps = total_timesteps
if args.nrollouts:
total_timesteps = int(args.nrollouts * num_envs * nsteps)
run_ID = 'run_'+str(args.run_id).zfill(2)
run_ID += '_load{}'.format(args.load_id)
print(args.use)
if args.use == "randcrop":
LOG_DIR = 'log/randcrop/test'
load_model = "log/randcrop/saved_randcrop_v{}.tar".format(args.load_id)
from train_procgen.crop_ppo import Model, Runner
policy = RandCropCnnPolicy
if args.use == "cutout":
LOG_DIR = 'log/cutout/test'
load_model = "log/cutout/saved_cutout_v{}.tar".format(args.load_id)
from train_procgen.cutout_ppo import Model, Runner
policy = CutoutCnnPolicy
if args.use == "cross":
LOG_DIR = 'log/cross/test'
load_model = "log/cross/saved_cross_v{}.tar".format(args.load_id)
from train_procgen.cross_ppo import Model, Runner
policy = CrossCnnPolicy
if args.use == "randcuts":
LOG_DIR = 'log/randcuts/test'
load_model = "log/randcuts/saved_randcuts_v{}.tar".format(args.load_id)
from train_procgen.randcuts_ppo import Model, Runner
policy = CrossCnnPolicy
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
mpi_rank_weight = 0
num_levels = args.num_levels
log_comm = comm.Split(0, 0)
format_strs = ['csv', 'stdout', 'log'] if log_comm.Get_rank() == 0 else []
logpath = join(LOG_DIR, run_ID)
if not os.path.exists(logpath):
os.system("mkdir -p %s" % logpath)
fpath = join(logpath, 'args_{}.json'.format(run_ID))
with open(fpath, 'w') as fh:
json.dump(vars(args), fh, indent=4, sort_keys=True)
print("\nSaved args at:\n\t{}\n".format(fpath))
logger.configure(dir=logpath, format_strs=format_strs)
logger.info("creating environment")
venv = ProcgenEnv(num_envs=num_envs, env_name=args.env_name,
num_levels=num_levels, start_level=args.start_level, distribution_mode=args.distribution_mode)
venv = VecExtractDictObs(venv, "rgb")
venv = VecMonitor(
venv=venv, filename=None, keep_buf=100,
)
venv = VecNormalize(venv=venv, ob=False)
logger.info("creating tf session")
setup_mpi_gpus()
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True #pylint: disable=E1101
sess = tf.compat.v1.Session(config=config)
sess.__enter__()
logger.info("Testing")
## Modified based on random_ppo.learn
env = venv
nenvs = env.num_envs
ob_space = env.observation_space
ac_space = env.action_space
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
nrollouts = total_timesteps // nbatch
model = Model(sess=sess, policy=policy, ob_space=ob_space, ac_space=ac_space,
nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm)
model.load(load_model)
logger.info("Model pramas loaded from saved model: ", load_model)
runner = TestRunner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)
epinfobuf10 = deque(maxlen=10)
epinfobuf100 = deque(maxlen=100)
# tfirststart = time.time() ## Not doing timing yet
# active_ep_buf = epinfobuf100
mean_rewards = []
datapoints = []
for rollout in range(1, nrollouts+1):
logger.info('collecting rollouts {}...'.format(rollout))
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run()
epinfobuf10.extend(epinfos)
epinfobuf100.extend(epinfos)
rew_mean_10 = safemean([epinfo['r'] for epinfo in epinfobuf10])
rew_mean_100 = safemean([epinfo['r'] for epinfo in epinfobuf100])
ep_len_mean_10 = np.nanmean([epinfo['l'] for epinfo in epinfobuf10])
ep_len_mean_100 = np.nanmean([epinfo['l'] for epinfo in epinfobuf100])
logger.info('\n----', rollout)
mean_rewards.append(rew_mean_10)
logger.logkv('eprew10', rew_mean_10)
logger.logkv('eprew100', rew_mean_100)
logger.logkv('eplenmean10', ep_len_mean_10)
logger.logkv('eplenmean100', ep_len_mean_100)
logger.logkv("misc/total_timesteps", rollout*nbatch)
logger.info('----\n')
logger.dumpkvs()
env.close()
print("Rewards history: ", mean_rewards)
return mean_rewards
if __name__ == '__main__':
main()
| StarcoderdataPython |
5006186 | <reponame>rsmonteiro2021/execicios_python<gh_stars>1-10
filename = 'pi_million_digits.txt'
with open(filename) as file_object:
lines = file_object.readlines()
pi_string_v2 = ''
for line in lines:
pi_string_v2 += line.strip()
print(pi_string_v2[:52] + '...')
print(len(pi_string_v2))
birthday = input('Enter your birthday, in the form mmddyy: ')
if birthday in pi_string_v2:
print('Your birthday appears in the first million digits of pi!')
else:
print('Your birthday does not appears in the first million digits of pi!')
| StarcoderdataPython |
193638 | #!/usr/bin/env python
from __future__ import print_function
import os
import sys
from common import update
try:
data = {}
data['update_type'] = 'sample_dataset_transfer_status'
data['sample_dataset_ids'] = sys.argv[3].split(',')
data['new_status'] = sys.argv[4]
except IndexError:
print('usage: %s key url sample_dataset_ids new_state [error msg]' % os.path.basename(sys.argv[0]))
sys.exit(1)
try:
data['error_msg'] = sys.argv[5]
except IndexError:
data['error_msg'] = ''
print(data)
update(sys.argv[1], sys.argv[2], data, return_formatted=True)
| StarcoderdataPython |
11876 | <reponame>ZiegHailo/SMUVI
__author__ = 'zieghailo'
import matplotlib.pyplot as plt
# plt.ion()
def show():
plt.show()
plt.get_current_fig_manager().full_screen_toggle()
def plot_graph(graph):
# plt.ion()
x = [p.x for p in graph.points]
y = [p.y for p in graph.points]
plt.plot(x, y, 'b*')
plt.draw()
def plot_arrows(graph):
for p in graph.points:
x = p.x
y = p.y
for c in p.connections:
cx = c.x
cy = c.y
# ax.arrow(x, y, cx-x, cy-y)
plt.plot([x, cx], [y, cy], 'k')
plt.draw()
def plot_visited(visited):
x = [p.x for p in visited]
y = [p.y for p in visited]
plt.plot(x, y, 'ro', ms=10)
plt.draw()
def plot_connection(start, end):
plt.plot([start.x, end.x], [start.y, end.y], 'g', linewidth=4)
def start_gui(graph):
fig = plt.figure(1)
ax = fig.add_subplot(111)
ax.set_title('click to build line segments')
ax.axis('equal')
line, = ax.plot([0, 100], [0, 100], 'b.') # empty line
pointbuilder = PointBuilder(line, ax, graph)
fig.waitforbuttonpress(0)
class PointBuilder:
def __init__(self, points, ax, graph):
self.points = points
self.ax = ax
self.graph = graph
self.cid = points.figure.canvas.mpl_connect('button_press_event', self)
self.kid = points.figure.canvas.mpl_connect('key_press_event', self)
def __call__(self, event):
print 'click', event
if event.inaxes!=self.points.axes: return
self.graph.add_point(event.xdata, event.ydata)
x = [p.x for p in self.graph.points]
y = [p.y for p in self.graph.points]
plt.cla()
self.graph.build_graph()
plot_arrows(self.graph)
plot_graph(self.graph)
if event.key != 'x':
plt.waitforbuttonpress(0)
if __name__ == "__main__":
start_gui() | StarcoderdataPython |
3413596 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from .common import DistributedOperator
from .common import DistributedOperatorImpl
from .common import register_distributed_operator
from .common import register_distributed_operator_impl
from ..utils import is_dim_shard
from ..utils import is_dim_replicate
from ..utils import is_valid_list_index
from ..utils import compute_compatible_dim_mapping
from ..utils import compute_compatible_dims_mapping
from ..utils import compute_compatible_and_update_dim_mapping
from paddle.fluid import core, unique_name
from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid.framework import Program, Parameter, Variable, program_guard
from paddle.fluid.data_feeder import check_variable_and_dtype, check_dtype
from ..process import new_process_group
from ..utils import _get_comm_group
class DistributedEmbedding(DistributedOperator):
def __init__(self, name):
super(DistributedEmbedding, self).__init__()
self._name = name
register_distributed_operator("lookup_table_v2",
DistributedEmbedding("embedding"))
# RowParallel
class DistributedEmbeddingImpl(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedEmbeddingImpl, self).__init__()
self._name = name
self._forward_implemented = True
self._backward_implemented = False
def is_process_mesh_compatible(self, op_dist_attr):
""" No restriction for now. """
return True
def is_input_compatible(self, op_dist_attr):
op_desc = op_dist_attr.get_owner_op().desc
ids_name = op_desc.input('Ids')[0]
w_name = op_desc.input('W')[0]
ids_dims_mapping = op_dist_attr.get_input_dims_mapping(ids_name)
w_dims_mapping = op_dist_attr.get_input_dims_mapping(w_name)
if is_dim_replicate(w_dims_mapping[-2]) or is_dim_shard(w_dims_mapping[
-1]):
return False
# Other dimensions must be replicate except the batch dimension
for mapping in ids_dims_mapping[1:]:
if is_dim_shard(mapping):
return False
return True
def is_output_compatible(self, op_dist_attr):
op_desc = op_dist_attr.get_owner_op().desc
out_name = op_desc.output('Out')[0]
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
# Other dimensions must be replicate except the batch dimension
for mapping in out_dims_mapping[1:]:
if is_dim_shard(mapping):
return False
return True
def update_dims_mapping(self, op_dist_attr):
changed = False
op_desc = op_dist_attr.get_owner_op().desc
ids_name = op_desc.input('Ids')[0]
w_name = op_desc.input('W')[0]
out_name = op_desc.output('Out')[0]
ids_dims_mapping = op_dist_attr.get_input_dims_mapping(ids_name)
w_dims_mapping = op_dist_attr.get_input_dims_mapping(w_name)
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
for i in range(len(ids_dims_mapping)):
dim_changed = compute_compatible_and_update_dim_mapping(
[ids_dims_mapping, out_dims_mapping], [i, i])
if dim_changed:
changed = True
dim_changed = compute_compatible_and_update_dim_mapping(
[w_dims_mapping, out_dims_mapping], [-1, -1])
if dim_changed:
changed = True
return changed
def forward(self, serial_op):
def static_handle(dst_block,
src_op,
op_dist_attr,
input_name_mapping,
output_name_mapping,
rank_id=0):
assert len(
input_name_mapping
) == 2, "row_parallel_embedding take 2 inputs variable but got {}".format(
input_name_mapping)
assert len(
output_name_mapping
) == 1, "row_parallel_embedding take 2 inputs variable but got {}".format(
output_name_mapping)
assert len(
input_name_mapping['Ids']
) == 1, "row_parallel_embedding input Ids take 1 variable but got {}".format(
input_name_mapping['Ids'])
assert len(
input_name_mapping['W']
) == 1, "row_parallel_embedding input W take 1 variable but got {}".format(
input_name_mapping['W'])
assert len(
output_name_mapping['Out']
) == 1, "row_parallel_embedding input Out take 1 variable but got {}".format(
input_name_mapping['Out'])
Ids_var = dst_block.var(input_name_mapping['Ids'][0])
Weight_var = dst_block.var(input_name_mapping['W'][0])
Out_var = dst_block.var(output_name_mapping['Out'][0])
# got dist attribute info
embedding_row_dim_mapping = op_dist_attr.get_input_dims_mapping(
Weight_var.name)[0]
process_mesh_shape = op_dist_attr.get_process_mesh().topology
process_mesh_group = op_dist_attr.get_process_mesh().process_group
# caculate embedding offset
# TODO generalize here, using cartisian product to allow any dimensional mesh shape
mesh_shape = len(process_mesh_shape)
assert mesh_shape <= 2, "row_parallel_embedding only support 1 or 2 dimensional process mesh, but got {}".format(
process_mesh_shape)
num_partition = process_mesh_shape[embedding_row_dim_mapping]
# TODO generalize here, support any mesh group
if mesh_shape == 1:
relative_idx = process_mesh_group.index(rank_id)
else:
relative_idx = rank_id % num_partition
per_part_size = Weight_var.shape[0]
relative_idx = relative_idx * per_part_size
# TODO caculate ring id
model_parallel_axis, process_mesh = op_dist_attr.get_owner_context(
)._get_model_parallel_info()
group_ranks = _get_comm_group(process_mesh.process_group,
process_mesh.topology,
model_parallel_axis, rank_id)
group = new_process_group(group_ranks)
# append op
check_variable_and_dtype(Ids_var, 'input', ['int32', 'int64'],
'c_embedding')
intermediate_var_0 = dst_block.create_var(
name=unique_name.generate_with_ignorable_key(".".join(
["c_embedding", 'tmp'])),
dtype=Weight_var.dtype,
shape=Out_var.shape,
type=core.VarDesc.VarType.LOD_TENSOR,
persistable=False,
stop_gradient=Out_var.stop_gradient)
check_variable_and_dtype(
Out_var, 'tensor',
['float16', 'float32', 'float64', 'int32', 'int64'],
'c_allreduce_sum')
dst_block.append_op(
type='c_embedding',
inputs={'Ids': [Ids_var],
'W': [Weight_var]},
outputs={'Out': [intermediate_var_0]},
attrs={"start_index": relative_idx})
# use_model_parallel
dst_block.append_op(
type='c_allreduce_sum',
inputs={'X': [intermediate_var_0]},
outputs={'Out': [Out_var]},
attrs={
'ring_id': group.id,
'use_calc_stream': True,
'use_model_parallel': True,
})
if in_dygraph_mode():
raise NotImplementedError(
"Dist op for [{}] with idx [{}] is NOT implemented yet.".format(
"matmul", 0))
else:
return static_handle
register_distributed_operator_impl("lookup_table_v2",
DistributedEmbeddingImpl("row_parallel"))
| StarcoderdataPython |
64063 | <reponame>pandeykiran80/ref
import numpy as np
import toolbox
import pylab
from scipy.signal import butter, lfilter, convolve2d
from scipy.interpolate import RectBivariateSpline as RBS
from scipy.interpolate import interp2d
from scipy.interpolate import interp1d
from scipy.interpolate import griddata
import matplotlib.patches as patches
import numpy.ma as ma
import sys
import warnings
warnings.filterwarnings("ignore")
class DraggablePoint:
lock = None #only one can be animated at a time
def __init__(self, point):
self.point = point
self.press = None
self.background = None
def connect(self):
'connect to all the events we need'
self.cidpress = self.point.figure.canvas.mpl_connect('button_press_event', self.on_press)
self.cidrelease = self.point.figure.canvas.mpl_connect('button_release_event', self.on_release)
self.cidmotion = self.point.figure.canvas.mpl_connect('motion_notify_event', self.on_motion)
def on_press(self, event):
if event.button == 3:
if event.inaxes != self.point.axes: return
if DraggablePoint.lock is not None: return
contains, attrd = self.point.contains(event)
if not contains: return
self.press = (self.point.center), event.xdata, event.ydata
DraggablePoint.lock = self
# draw everything but the selected rectangle and store the pixel buffer
canvas = self.point.figure.canvas
axes = self.point.axes
self.point.set_animated(True)
canvas.draw()
self.background = canvas.copy_from_bbox(self.point.axes.bbox)
# now redraw just the rectangle
axes.draw_artist(self.point)
# and blit just the redrawn area
canvas.blit(axes.bbox)
def on_motion(self, event):
if DraggablePoint.lock is not self:
return
if event.inaxes != self.point.axes: return
self.point.center, xpress, ypress = self.press
dx = event.xdata - xpress
dy = event.ydata - ypress
self.point.center = (self.point.center[0]+dx, self.point.center[1]+dy)
canvas = self.point.figure.canvas
axes = self.point.axes
# restore the background region
canvas.restore_region(self.background)
# redraw just the current rectangle
axes.draw_artist(self.point)
# blit just the redrawn area
canvas.blit(axes.bbox)
def on_release(self, event):
'on release we reset the press data'
if DraggablePoint.lock is not self:
return
self.press = None
DraggablePoint.lock = None
# turn off the rect animation property and reset the background
self.point.set_animated(False)
self.background = None
# redraw the full figure
self.point.figure.canvas.draw()
def disconnect(self):
'disconnect all the stored connection ids'
self.point.figure.canvas.mpl_disconnect(self.cidpress)
self.point.figure.canvas.mpl_disconnect(self.cidrelease)
self.point.figure.canvas.mpl_disconnect(self.cidmotion)
def initialise(file, memmap=False, scan=False):
#intialise empty parameter dictionary
#kwargs stands for keyword arguments
kwargs = {}
#load file
if memmap == True:
ns = np.fromfile(file, dtype=toolbox.su_header_dtype, count=1)['ns']
sutype = toolbox.typeSU(ns)
dataset = np.memmap(file, dtype=sutype)
else:
dataset = toolbox.read(file)
#allocate stuff
#~
ns = kwargs['ns'] = dataset['ns'][0]
dt = kwargs['dt'] = dataset['dt'][0]/1e6
#also add the time vector - it's useful later
kwargs['times'] = np.arange(0, dt*ns, dt)
dataset['trace'] /= np.amax(dataset['trace'])
dataset['tracr'] = np.arange(dataset.size)
kwargs['primary'] = 'cdp'
kwargs['secondary'] = 'offset'
kwargs['cdp'] = np.sort(np.unique(dataset['cdp']))
kwargs['step'] = 1
if scan:
toolbox.scan(dataset)
return dataset, kwargs
def tar(data, **kwargs):
#pull some values out of the
#paramter dictionary
gamma = kwargs['gamma']
t = kwargs['times']
#calculate the correction coeffieicnt
r = np.exp(gamma * t)
#applyt the correction to the data
data['trace'] *= r
return data
def apply_statics(data, **kwargs):
for trace in data:
shift = trace['tstat']/(kwargs['dt']*1000).astype(np.int)
if shift > 0:
trace['trace'][-shift:] = 0
if shift < 0:
trace['trace'][:-shift] = 0
trace['trace'] = np.roll(trace['trace'] , shift)
return data
def build_vels(vels, **kwargs):
from scipy import interpolate
cdps = np.array(kwargs['cdp'])
times = np.array(kwargs['times'])
keys = vels.keys()
x = []
t = []
values = []
for i in vels.items():
cdp = i[0]
picks= i[1]
for pick in picks:
x.append(cdp)
t.append(pick[1])
values.append(pick[0])
grid_x, grid_y = np.meshgrid(cdps, times)
#top left
x.append(min(cdps))
t.append(min(times))
values.append(min(values))
#top right
t.append(min(times))
x.append(max(cdps))
values.append(min(values))
#bottom left
x.append(min(cdps))
t.append(max(times))
values.append(max(values))
#bottom right
t.append(max(times))
x.append(max(cdps))
values.append(max(values))
zi = pylab.griddata(x, t, values, grid_x, grid_y, interp='linear')
return zi.T
def _nmo_calc(tx, vels, offset):
'''calculates the zero offset time'''
t0 = np.sqrt(tx*tx - (offset*offset)/(vels*vels))
return t0
def old_nmo(dataset, **kwargs):
if 'smute' not in kwargs.keys(): kwargs['smute'] = 10000.
ns = kwargs['ns']
dt = kwargs['dt']
tx = kwargs['times']
minCdp = np.amin(dataset['cdp'])
counter = 0
ntraces = dataset.size
print "moving out %d traces" %ntraces
result = dataset.copy()
result['trace'] *= 0
for i in range(dataset.size):
trace = dataset[i]
counter += 1
if counter > 1000:
ntraces -= counter
counter = 0
print ntraces
aoffset = np.abs(trace['offset'].astype(np.float))
cdp = trace['cdp']
vel = kwargs['vels'][cdp - minCdp]
#calculate time shift for each sample in trac
t0 = _nmo_calc(tx, vel, aoffset)
t0 = np.nan_to_num(t0)
#calculate stretch between each sample
stretch = 100.0*(np.pad(np.diff(t0),(0,1), 'reflect')-dt)/dt
mute = kwargs['smute']
filter = [(stretch >0.0) & ( stretch < mute)]
#interpolate
result[i]['trace'] = np.interp(tx, t0, trace['trace']) * filter
return result
def nmo(dataset, **kwargs):
dataset.sort(order='cdp')
cdps = np.unique(dataset['cdp'])
minCdp = cdps[0]
times = kwargs['times']
dt = kwargs['dt']
ns = kwargs['ns']
nt = dataset.shape[0]
traces = np.arange(nt)
cdp_columns = dataset['cdp'] - minCdp
vels = np.zeros_like(dataset['trace'])
for i in range(cdp_columns.size):
vels[i] = kwargs['vels'][cdp_columns[i]]
tx = np.ones(dataset['trace'].shape) * times
offset = dataset['offset'][:, None]
t0 = _nmo_calc(tx, vels, offset)
t0 = np.nan_to_num(t0)
shifts = np.ones(dataset['trace'].shape) * (ns * dt * traces[:, None])
tx += shifts
t0 += shifts
result = np.interp(tx.ravel(), t0.ravel(), dataset['trace'].flatten())
dataset['trace'] = result.reshape(nt, ns)
#calculate stretch between each sample
stretch = 100.0*(np.abs(t0 - np.roll(t0, 1, axis=-1))/dt)
stretch = np.nan_to_num(stretch)
mute = kwargs['smute'] * 1.0
filter = [(stretch >0.0) & ( stretch < mute)][0]
dataset['trace'] *= filter
return dataset
def axis_nmo(dataset, **kwargs):
pass
def _stack_gather(gather):
'''stacks a single gather into a trace.
uses header of first trace. normalises
by the number of nonzero samples'''
pilot = gather[np.argmin(gather['offset'])]
norm = gather['trace'].copy()
norm = np.nan_to_num(norm)
norm = norm **0
norm = np.sum(norm, axis=-2)
pilot['trace'] = np.sum(gather['trace'], axis=-2)/norm
return pilot
def stack(dataset, **kwargs):
cdps = np.unique(dataset['cdp'])
sutype = np.result_type(dataset)
result = np.zeros(cdps.size, dtype=sutype)
for index, cdp in enumerate(cdps):
gather = dataset[dataset['cdp'] == cdp]
trace = _stack_gather(gather)
result[index] = trace
return result
def semb(workspace,**kwargs):
print ''
def onclick(e):
if e.button == 1:
print "(%.1f, %.3f)," %(e.xdata, e.ydata),
w = np.abs(np.diff(ax.get_xlim())[0])/50.
h = np.abs(np.diff(ax.get_ylim())[0])/50.
circ= patches.Ellipse((e.xdata, e.ydata), width=w, height=h, fc='k')
ax.add_patch(circ)
dr = DraggablePoint(circ)
dr.connect()
drs.append(dr)
fig.canvas.draw()
vels = kwargs['velocities']
nvels = vels.size
ns = kwargs['ns']
result = np.zeros((nvels,ns),'f')
loc = np.mean(workspace['cdp'])
for v in range(nvels):
panel = workspace.copy()
kwargs['vels'] = np.ones(kwargs['ns'], 'f') * vels[v]
panel = nmo(panel, None, **kwargs)
norm = panel['trace'].copy()
norm[np.nonzero(norm)] = 1
n = np.sum(norm, axis=0)
a = np.sum(panel['trace'], axis=0)**2
b = n * np.sum(panel['trace']**2, axis=0)
window = kwargs['smoother']*1.0
kernel = np.ones(window)/window
a = np.convolve(a, kernel, mode='same')
b = np.convolve(b, kernel, mode='same')
result[v:] = np.sqrt(a/b)
pylab.imshow(result.T, aspect='auto', extent=(min(vels), max(vels),kwargs['ns']*kwargs['dt'],0.), cmap='jet')
pylab.xlabel('velocity')
pylab.ylabel('time')
pylab.title("cdp = %d" %np.unique(loc))
pylab.colorbar()
print "vels[%d]=" %loc,
fig = pylab.gcf()
ax = fig.gca()
fig.canvas.mpl_connect('button_press_event', onclick)
drs = []
pylab.show()
print ''
print "vels[%d]=" %loc,
for dr in drs:
print "(%.1f, %.3f)," %dr.point.center,
def _lmo_calc(aoffset, velocity):
t0 = -1.0*aoffset/velocity
return t0
def lmo(dataset, **kwargs):
offsets = np.unique(dataset['offset'])
for offset in offsets:
aoffset = np.abs(offset)
shift = _lmo_calc(aoffset, kwargs['lmo'])
shift = (shift*1000).astype(np.int)
inds= [dataset['offset'] == offset]
dataset['trace'][inds] = np.roll(dataset['trace'][inds], shift, axis=-1) #results[inds]
return dataset
def trace_mix(dataset, **kwargs):
ns = kwargs['ns']
window = np.ones(kwargs['mix'], 'f')/kwargs['mix']
for i in range(ns):
dataset['trace'][:,i] = np.convolve(dataset['trace'][:,i], window, mode='same')
return dataset
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
def bandpass(dataset, **kwargs):
# Sample rate and desired cutoff frequencies (in Hz).
fs = 1./kwargs['dt']
lowcut = kwargs['lowcut']
highcut = kwargs['highcut']
dataset['trace'] = butter_bandpass_filter(np.fliplr(dataset['trace']), lowcut, highcut, fs, order=3)
dataset['trace'] = butter_bandpass_filter(np.fliplr(dataset['trace']), lowcut, highcut, fs, order=3)
return dataset
def fk_view(dataset, **kwargs):
mid= dataset.size/2
f = np.abs(np.fft.rfft2(dataset['trace']))
freq = np.fft.rfftfreq(kwargs['ns'], d=kwargs['dt'])
k = np.fft.rfftfreq(dataset.size, d=kwargs['dx'])
kmax = k[-1]
f[:mid] = f[:mid][::-1]
f[mid:] = f[mid:][::-1]
pylab.figure()
pylab.imshow(f.T, aspect='auto', extent=[-1*kmax, kmax, freq[-1], freq[0]])
pylab.colorbar()
def fk_design(dataset, **kwargs):
mid= dataset.size/2
f = np.abs(np.fft.rfft2(dataset['trace']))
freq = np.fft.rfftfreq(kwargs['ns'], d=kwargs['dt'])
k = np.fft.rfftfreq(dataset.size, d=kwargs['dx'])
k = k[:-1]
kmax = k[-1]
k_axis = np.hstack([k, k[::-1]])[:, None]
column, row = np.indices(f.shape)
row = row.astype(np.float)
column = column.astype(np.float)
column.fill(1.0)
row.fill(1.0)
row *= freq
column *= k_axis
m = row/column
m[:mid] = m[:mid][::-1]
m[mid:] = m[mid:][::-1]
mask = m > kwargs['fkVelocity']
m[mask] = 1
m[~mask] = 0
window = kwargs['fkSmooth']
vec= np.ones(window)/(window *1.0)
smoothed_m = np.apply_along_axis(lambda m: np.convolve(m, vec, mode='valid'), axis=-1, arr=m)
valid = smoothed_m.shape[-1]
m[:, :valid] = smoothed_m
pylab.figure()
pylab.imshow(m.T, aspect='auto', extent=[-1*kmax, kmax, freq[-1], freq[0]])
pylab.colorbar()
z = m.copy()
z[:mid] = z[:mid][::-1]
z[mid:] = z[mid:][::-1]
return z
def fk_filter(dataset, **kwargs):
for s in np.unique(dataset['fldr']):
shot = dataset['trace'][dataset['fldr'] == s]
filter = kwargs['fkFilter']
nt = shot.shape[0]
delta = abs(nt - filter.shape[0])
if delta > 0:
shot = np.vstack([shot, np.zeros_like(shot[:delta])])
f = np.fft.rfft2(shot)
result = np.fft.irfft2(f*filter)[:nt]
dataset['trace'] [dataset['fldr'] == s]= 0.0
dataset['trace'] [dataset['fldr'] == s]= result
return dataset
def trim(dataset, **kwargs):
dataset['tstat'] = 0
model = kwargs['model']
cdps = np.unique(model['cdp'])
start, end = (kwargs['gate'] /kwargs['dt']).astype(np.int)
centre = kwargs['ns']/2
m = kwargs['maxshift']
for cdp in cdps:
gather = dataset[dataset['cdp'] == cdp].copy()
gather['trace'][:,:start] = 0
gather['trace'][:,end:] = 0
pilot = model['trace'][model['cdp'] == cdp].ravel()
pilot[:start] = 0
pilot[end:] = 0
result = np.apply_along_axis(lambda m: np.correlate(m, pilot, mode='same'), axis=-1, arr=gather['trace'])
result[:,:centre-m] = 0
result[:,centre+m+1:] = 0
peaks = np.argmax(np.abs(result), axis=-1)
dataset['tstat'][dataset['cdp'] == cdp] = peaks
dataset['tstat'] -= centre.astype(np.int16)
dataset['tstat'] *= -1
return dataset
def xwigb(panel, key='offset'):
'''
looks like suxwigb
'''
axis = np.arange(panel['ns'][0])*panel['dt'][0]*1e-6
traces = panel['trace']
traces /= np.sqrt((traces ** 2).sum(1))[:,np.newaxis]
x, y = np.meshgrid(range(traces.shape[0]), range(traces.shape[1]))
traces += x.T
fig = pylab.figure()
for trace in traces:
pylab.plot(trace, axis,'k')
pylab.gca().invert_yaxis()
pylab.ylabel('Time(s)')
pylab.title('Trace')
pylab.gca().xaxis.tick_top()
pylab.show()
def ximage(data, agc=0):
'''
looks like suximage.
fix this to use the SU
headers for plotting
'''
if agc:
amp_func = agc_func(data=data,window=100)
data /= amp_func
fig = pylab.figure()
pylab.imshow(data.T, aspect='auto', vmin=-1, vmax=1, cmap='gist_yarg') #,
#extent=(min(panel['offset']), max(panel['offset']), panel['ns'][0]*(panel['dt'][0]*1e-6), 0))
pylab.xlabel('Offset')
pylab.ylabel('Time(s)')
pylab.show()
def agc_func(data, window):
vec = np.ones(window)/(window/2.)
func = np.apply_along_axis(lambda m: np.convolve(np.abs(m), vec, mode='same'), axis=1, arr=data)
print func
return func
| StarcoderdataPython |
5126435 | # Generated by Django 2.1.5 on 2019-01-07 07:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0002_archivourl'),
]
operations = [
migrations.AddField(
model_name='url',
name='archivo',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='main.ArchivoUrl'),
),
]
| StarcoderdataPython |
87806 | <reponame>calebschmidt/superfluid<filename>tests.py
# Placeholder for now...
| StarcoderdataPython |
4829205 | # Copyright (c) 2007, 2008, 2009, 2010, 2011, 2012 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic
class MyHiglightStyle(Style):
"""
Port of the default trac highlighter design.
"""
default_style = ''
styles = {
Comment: 'italic #999988',
# Comment.Preproc: 'bold noitalic #999999',
# Comment.Special: 'bold #999999',
Operator: 'bold',
String: '#B81',
String.Escape: '#900',
# String.Regex: '#808000',
Number: '#590 bold',
Keyword: 'bold',
# Keyword.Type: '#445588',
Name.Builtin: '#840',
Name.Function: 'bold #840',
Name.Class: 'bold #900',
Name.Exception: 'bold #A00',
Name.Decorator: '#840',
Name.Namespace: '#900',
# Name.Variable: '#088',
# Name.Constant: '#088',
Name.Tag: '#840',
# Name.Tag: '#000080',
# Name.Attribute: '#008080',
# Name.Entity: '#800080',
# Generic.Heading: '#999999',
# Generic.Subheading: '#aaaaaa',
# Generic.Deleted: 'bg:#ffdddd #000000',
# Generic.Inserted: 'bg:#ddffdd #000000',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: 'bg:#e3d2d2 #a61717'
}
| StarcoderdataPython |
1756301 | <reponame>ZaydH/ams230
import math
import os
import sys
from typing import List, Callable
import numpy as np
class TrustRegion:
UNIFORM_MIN = 10
UNIFORM_MAX = 1000
DELTA_0 = 1
DELTA_MAX = 10000
EPSILON = 10 ** -8
TOLERANCE = 10 ** -12
def __init__(self, n: int):
"""
:param n: Dimension of positive definite, symmetric matrix Q
"""
self._n = n
self.use_gradient = False
self._Q = TrustRegion.build_symmetric_pos_definite(n)
self._x = None # type: np.ndarray
# Initialize to a random vector for {U(0,1)}^n
self._x0 = np.random.uniform(0, 1, (n, 1))
self._k = None
self.calculate_p = None # type: Callable
self._delta = None
self._eta = 0.1
self._B = None
self._p = None # type: np.ndarray
self._rho = None
@staticmethod
def build_symmetric_pos_definite(n: int) -> np.ndarray:
"""
Constructs a symmetric, positive definite matrix with eigenvalues distributed uniformly
between UNIFORM_MIN and UNIFORM_MAX.
:param n: Dimension of the matrix.
:return: Symmetric positive definite matrix with uniform eigenvalues.
"""
q, _ = np.linalg.qr(np.random.rand(n, n))
eig = []
while len(eig) < n:
eig.append(np.random.uniform(TrustRegion.UNIFORM_MIN, TrustRegion.UNIFORM_MAX))
d = np.diag(eig)
return q @ d @ q.T
def run(self) -> List[float]:
self._delta = TrustRegion.DELTA_0
self._k = 0
self._initialize_x()
err = []
while True:
err.append(self.f(self._x))
if self._k % 100 == 0:
print("%d,%.15f" % (self._k, err[-1]))
if err[-1] < TrustRegion.TOLERANCE:
if self._k % 100 != 0:
print("%d,%.15f" % (self._k, err[-1]))
return err
self._calculate_B()
self._p = self.calculate_p(self._B, self.g(), self._delta)
self._calculate_rho()
# Update delta (optionally)
if self._rho < 0.25:
self._delta = 0.25 * self._delta
else:
if (self._rho > 0.75
and abs(np.linalg.norm(self._p) - self._delta) < TrustRegion.EPSILON):
self._delta = min(2 * self._delta, TrustRegion.DELTA_MAX)
else:
pass
# Update x (optionally)
if self._rho > self._eta:
self._x = self._x + self._p
else:
pass
self._k += 1
def f(self, x: np.ndarray) -> float:
"""
Value of the function
f(x) = \log(1 + x Q x^{T})
:param x: Location x used to calculate the cost
:return: Value of function f
"""
quad_prod = x.T @ self._Q @ x
return math.log10(1 + quad_prod)
def g(self) -> np.ndarray:
"""
Calculates the gradient of $f$
:return: Vector for the gradient of $f$
"""
return 2 * (self._Q @ self._x) / (1 + self._x.T @ self._Q @ self._x)
def _initialize_x(self):
"""
Initialize x to a random variable
"""
self._x = np.random.rand(self._n)
def m_k(self, p: np.ndarray) -> float:
"""
Calculates m_k using the specified value of \p p.
:param p: Descent direction.
:return: Value of m_k given \p p and the other state variables
"""
return self.f(self._x) + self.g().T @ p + 0.5 * p.T @ self._Q @ p
# noinspection PyPep8Naming
def _calculate_B(self):
"""
Calculates the approximation of the Hessian B_k
"""
if self.use_gradient:
denom = (1 + self._x.T @ self._Q @ self._x)
q_x = (2 * self._Q @ self._x)
H = 2 * self._Q / denom - q_x @ q_x.T / (denom ** 2)
if np.all(np.linalg.eigvals(H) > 0):
self._B = H
return
self._B = self._Q
def _calculate_rho(self):
"""
Calculates $\rho$ based on equation (4.4) in Nocedal and Wright. It then updates the
$\rho$ parameter of the object.
"""
rho = self.f(self._x) - self.f(self._x + self._p)
rho /= self.m_k(np.zeros(self._n)) - self.m_k(self._p)
self._rho = rho
# noinspection PyPep8Naming
def calculate_cauchy_points(B: np.ndarray, g: np.ndarray, delta: float) -> np.ndarray:
"""
Calculates the descent direction via the Cauchy Points algorithm
:param B: Approximation of the Hessian that is PD
:param g: Gradient at x_k
:param delta: Trust region distance
:return: New direction
"""
if g.T @ B @ g <= 0:
tau = 1
else:
tau = min(1, np.linalg.norm(g) ** 3 / (delta * g.T @ B @ g))
return -1 * tau * delta / np.linalg.norm(g) * g
# noinspection PyPep8Naming
def calculate_dog_leg(B: np.ndarray, g: np.ndarray, delta: float) -> np.ndarray:
"""
Calculates the descent direction via the Dog Leg algorithm
:param B: Approximation of the Hessian that is PD
:param g: Gradient at x_k
:param delta: Trust region distance
:return: New direction
"""
p_b = -1 * np.linalg.inv(B) @ g
p_u = -1 * (np.linalg.norm(g) ** 2) / (g.T @ B @ g) * g
if np.linalg.norm(p_b) <= delta:
return p_b
if np.linalg.norm(p_u) >= delta:
return -1 * delta / np.linalg.norm(g) * g
for tau in np.linspace(1, 2, num=100):
p_c = p_u + (tau - 1) * (p_b - p_u)
if (np.linalg.norm(p_c) ** 2) - (delta ** 2) < TrustRegion.TOLERANCE:
return p_u + (tau - 1)*(p_b - p_u)
if __name__ == "__main__":
tr = TrustRegion(int(sys.argv[1]))
algs = [("cauchy", calculate_cauchy_points), ("dog_leg", calculate_dog_leg)]
for (name, alg) in algs:
for use_grad in [False, True]:
print("\n\nStarting Algorithm: %s with use_grad=%r" % (name, use_grad))
tr.calculate_p = alg
tr.use_gradient = use_grad
f_err = tr.run()
grad_str = "_with_grad" if use_grad else "_no_grad"
with open("data" + os.sep + name + grad_str + ".csv", "w") as fout:
fout.write("x,f(x)")
for i in range(len(f_err)):
if f_err[i] == 0:
continue
fout.write("\n%d,%.15f" % (i, math.log(f_err[i])))
| StarcoderdataPython |
11321456 | <filename>icls/datasets/__init__.py
from .imagenet import ImagenetDataset
__all__ = ["ImagenetDataset"]
| StarcoderdataPython |
6406380 | <gh_stars>100-1000
from typing import Any, Sequence
from multimethod import multimethod
from visions.relations import IdentityRelation, TypeRelation
from visions.types.file import File
from visions.types.type import VisionsBaseType
class Image(VisionsBaseType):
"""**Image** implementation of :class:`visions.types.type.VisionsBaseType`.
(i.e. series with all image files)
Examples:
>>> from pathlib import Path
>>> import visions
>>> x = [Path('/home/user/file.png'), Path('/home/user/test2.jpg')]
>>> x in visions.Image
True
"""
@staticmethod
def get_relations() -> Sequence[TypeRelation]:
relations = [IdentityRelation(File)]
return relations
@staticmethod
@multimethod
def contains_op(item: Any, state: dict) -> bool:
pass
| StarcoderdataPython |
5079043 | <reponame>Bfaschat/EduuRobot
import config
import urllib
bot = config.bot
def prints(msg):
if msg.get('text'):
if msg['text'].startswith('/print ') or msg['text'].startswith('!print '):
try:
bot.sendPhoto(msg['chat']['id'], f"https://api.thumbnail.ws/api/{config.keys['screenshots']}/thumbnail/get?url={urllib.parse.quote_plus(msg['text'][7:])}&width=1280",
reply_to_message_id=msg['message_id'])
except Exception as e:
bot.sendMessage(msg['chat']['id'], f'Ocorreu um erro ao enviar a print, favor tente mais tarde.\nDescrição do erro: {e.description}',
reply_to_message_id=msg['message_id'])
return True
| StarcoderdataPython |
1722405 | """
【Python生成器】生成器小案例 2019/10/07 14:51
"""
# from collections import Iterable,Iterator
#TODO: 生成器中的return语句会触发StopIterator异常:
# def my_gen(start):
# while start < 10:
# yield start
# start += 1
# return 'hello world!'
# ret = my_gen(1)
# print(next(ret))
# try:
# next(ret)
# except Exception as error:
# print('error msg:')
# print(error)
# TODO: 斐波那契数列
# TODO: 第一轮
# 0 1 1 2 3 5 8 13 21 34
# a b
# c
# TODO: 第二轮
# 0 1 1 2 3 5 8 13 21 34
# a b
# c
#TODO: 普通while循环实现
def basis_my_gen(count):
index = 1
a,b = 0,1
while index <= count:
print(b, end=" ")
# TODO: 临时变量c保存变量b的值
c = b
b = a + b
a = c
index += 1
basis_my_gen(10)
print("")
print('='*30)
#TODO: 生成器版本实现斐波那契数列
def fib(count):
index = 1
a,b = 0,1
while index <= count:
yield b
c = b
b = a + b
a = c
index += 1
ret = fib(10)
for x in ret:
print(x, end=" ")
print("")
print('='*30)
print('多任务切换示例:')
def qq_music(duration):
time = 0
while time <= duration:
print('QQ音乐播放第%d分钟...'%(time, ))
yield None
time += 1
def youku_movie(duration):
time = 0
while time <= duration:
print('优酷电影播放第%d分钟...'%(time, ))
yield None
time += 1
def main():
music = qq_music(15)
movie = youku_movie(60)
# TODO: 音乐、电影是否播放结束
music_isend = False
movie_isend = False
while True:
try:
next(music)
except Exception:
music_isend = True
try:
next(movie)
except Exception:
movie_isend = True
if music_isend and movie_isend:
print('多任务切换执行完毕....')
break
if __name__ == '__main__':
main() | StarcoderdataPython |
3576714 | #!/usr/bin/python
# -*- coding:utf-8 _*-
# # FileName : book_urls
# # Author : <NAME> <<EMAIL>>
# # Created : 2018/2/7
# # Copyright : 2018-2020
# # Description :
import os
def getallfiles(path):
allfile = []
allname = []
for dirpath, dirnames, filenames in os.walk(path):
for name in filenames:
allname.append(name)
allfile.append(os.path.join(dirpath, name,))
return zip(allname,allfile)
def get_FileSize(filePath):
fsize = os.path.getsize(filePath)
fsize = fsize/float(1024*1024)
return round(fsize,2)
if __name__ == '__main__':
path = "F:\\书籍\\"
allfile = getallfiles(path)
for file in allfile:
print(file[0],file[1],get_FileSize(file[1]))
| StarcoderdataPython |
6560019 | """
Shell Sort
Approach: Divide and Conquer
Complexity:
Best case -> O(nlogn)
Worst case -> O(n2)
"""
def swap(a, b, c):
t = a # a
a = b # b
b = c # c
c = t
def sort_quick_partition(input_arr, i, j, p):
if input_arr[i] > input_arr[p]:
temp = input_arr[i]
input_arr[i] = input_arr[j]
input_arr[j] = input_arr[p]
input_arr[p] = temp
j -= 1
p -= 1
else:
i += 1
def sort_quick(input_arr):
print("""""""""""""""""""""""""")
print("input " + str(input_arr))
print("""""""""""""""""""""""""")
le = len(input_arr)
i = 0
j = le - 2
p = le - 1
is_sorted = False
while not is_sorted:
if i == j:
sort_quick_partition
sort_quick_partition
else:
print("pass " + str(le - j - 1))
print(str(input_arr))
print("i => " + str(input_arr[i]) + " j => " + str(input_arr[j]) + " p => " + str(input_arr[p]))
print("i -> " + str(i) + " j -> " + str(j) + " p -> " + str(p))
print("\n")
sort_quick_partition(input_arr, i, j, p)
print("""""""""""""""""""""""""")
print("result " + str(input_arr))
print("""""""""""""""""""""""""")
if __name__ == '__main__':
arr = [21, 4, 1, 3, 9, 20, 25, 6, 21, 14]
sort_quick(arr)
| StarcoderdataPython |
11271526 | '''
3-Crie um programa que leia dois números e mostre a soma entre eles.
'''
n1 = int(input("Primeiro numero: "))
n2 = int(input("Segundo numero: "))
soma = n1 + n2
print("A soma entre {} e {} = {}".format(n1, n2, soma))
| StarcoderdataPython |
5097167 | a = 1
b = 2
print(b)
print(A)
| StarcoderdataPython |
6580563 | <filename>SIR_models.py
import os
import numpy as np
import pandas as pd
from scipy.integrate import solve_ivp
from scipy.optimize import minimize
import matplotlib.pyplot as plt
from datetime import timedelta, datetime
import datetime as dt
yellow = (240/255, 203/255, 105/255)
grey = (153/255, 153/255, 153/255)
faded_grey = (153/255, 153/255, 153/255, .25)
red = (220/255, 83/255, 86/255)
class SIR(object):
def __init__(self,
country='Brazil',
nth=100,
daysPredict=150,
alpha=[.5, .5],
parameter_bounds={},
constraints_bounds={},
force_parameters={},
# betaBounds=(0.00000001, 2.0),
# gammaBounds=(0.00000001, 2.0),
# S0pbounds=(10000, 10e6),
# R0bounds=None,
hospitalization_rate=0.05,
adjust_recovered=True,
cut_sample_date=None,
dir = os.path.join(
".",
"COVID-19",
"csse_covid_19_data",
"csse_covid_19_time_series",
)
):
self.all_attributes = locals()
del self.all_attributes['self']
self.constraints_bounds = constraints_bounds
self.country = country
self.nth = nth # minimum number of cases to start modelling
self.daysPredict = daysPredict
self.alpha = alpha
self.parameter_bounds = parameter_bounds
self.force_parameters = force_parameters
self.cut_sample_date = cut_sample_date
self.dir = dir
if not os.path.exists("Exports"): os.mkdir("Exports")
initial_guesses = {
'beta': .2,
'gamma': .07,
'S0p': .05
}
if hasattr(self, 'initial_guesses'):
self.initial_guesses = {**initial_guesses, **self.initial_guesses}
else:
self.initial_guesses = {
'beta': .2,
'gamma': .07,
'S0p': .05
}
self.hospitalization_rate = hospitalization_rate
self.adjust_recovered = adjust_recovered
self.load_data()
self.end_data = self.confirmed.index.max()
self.quarantine_loc = float(self.confirmed.index.get_loc(self.quarantine_date))
self.model_type = 'SIR'
def load_CSSE(self,):
confirmed = pd.read_csv(
os.path.join(
self.dir,
"time_series_covid19_confirmed_global.csv"
)
)
confirmed = confirmed.drop(confirmed.columns[[0, 2, 3]], axis=1).set_index('Country/Region').T
confirmed.index = pd.to_datetime(confirmed.index)
self.confirmed = confirmed[self.country]
deaths = pd.read_csv(
os.path.join(
self.dir,
"time_series_covid19_deaths_global.csv"
)
)
deaths = deaths.drop(deaths.columns[[0, 2, 3]], axis=1).set_index('Country/Region').T
deaths.index = pd.to_datetime(deaths.index)
self.fatal = deaths[self.country]
recovered = pd.read_csv(
os.path.join(
self.dir,
"time_series_covid19_recovered_global.csv"
)
)
recovered = recovered.drop(recovered.columns[[0, 2, 3]], axis=1).set_index('Country/Region').T
recovered.index = pd.to_datetime(recovered.index)
self.recovered = recovered[self.country]
def load_population(self, dir="Population.xlsx"):
"""
This function loads the country's population from an excel spreadsheet that should have a list of countries
on the first column and the population on the second. The sheet headers should be ´Country´ and ´Population´.
The function saves the population to ´self.country_population´
:param dir: path do file
:return: None
"""
df = pd.read_excel(dir).set_index('Country')
self.country_population = df.loc[self.country][0]
def load_quarantine_date(self, dir="Quarantine_dates.xlsx"):
"""
This function loads the country's quarantine date from an excel spreadsheet that should have a list of countries
on the first column and the population on the second. The sheet headers should be ´Country´ and ´Quarantine´.
The function saves the population to ´self.quarantine_date´
:param dir: path do file
:return: None
"""
df = pd.read_excel(dir).set_index('Country')
if self.country in df.index:
self.quarantine_date = df.loc[self.country][0]
self.quarantine_loc = float(self.confirmed.index.get_loc(self.quarantine_date))
else:
self.quarantine_date = self.confirmed.index[-1]
def load_data(self):
"""
New function to use our prop data
"""
self.load_CSSE()
self.load_population()
# Adjust recovered curve
if self.adjust_recovered:
self.recovered = self.smoothCurve(self.recovered)
# find date in which nth case is reached
nth_index = self.confirmed[self.confirmed >= self.nth].index[0]
self.load_quarantine_date()
quarantine_index = pd.Series(False, index=self.confirmed.index)
quarantine_index[quarantine_index.index >= self.quarantine_date] = True
self.quarantine_index = quarantine_index.loc[nth_index:]
self.confirmed = self.confirmed.loc[nth_index:]
self.fatal = self.fatal.loc[nth_index:]
self.recovered = self.recovered.loc[nth_index:]
self.initialize_parameters()
#True data series
self.R_actual = self.fatal + self.recovered
self.I_actual = self.confirmed - self.R_actual
self.build_optimization_inputs()
def cut_sample(self):
cutDate = self.cut_sample_date
if cutDate:
if not isinstance(cutDate, dt.datetime):
cutDate = self.I_actual.index[-1] + dt.timedelta(days=-cutDate)
# cutDate = self.F_actual.index[-1] + dt.timedelta(days=-days)
self.I_actual = self.I_actual.loc[:cutDate].copy()
self.R_actual = self.R_actual.loc[:cutDate].copy()
# self.F_actual = self.F_actual.loc[:self.cut_sample_date]
def set_default_bounds(self):
"""
Sets the default values for unprovided bounds
:return:
"""
if 'R0' not in self.constraints_bounds.keys():
self.constraints_bounds['R0'] = (0, 20)
if 'beta' not in self.parameter_bounds.keys():
self.parameter_bounds['beta'] = (.01, .5)
if 'gamma' not in self.parameter_bounds.keys():
self.parameter_bounds['gamma'] = (.01, .2)
if 'S0p' not in self.parameter_bounds.keys():
self.parameter_bounds['S0p'] = (.01, .12)
def build_optimization_inputs(self):
"""
Since we allow parameters to be forced, we need a function to create the optimization inputs.
Also, checks bounds that weren't provided and set them to default values.
:return:
"""
self.set_default_bounds()
# optimization takes two arrays, one of initial values and one of bounds (same order)
self.variable_parameters_list = []
self.optimization_initial_values = []
self.optimization_bounds = []
for param in self.parameter_bounds.keys():
if param not in self.force_parameters.keys():
self.variable_parameters_list.append(param)
if 'beta' in param:
self.optimization_initial_values.append(self.initial_guesses['beta'])
elif 'gamma' in param:
self.optimization_initial_values.append(self.initial_guesses['gamma'])
elif 'omega' in param:
self.optimization_initial_values.append(self.initial_guesses['gamma'])
elif 'S0p' in param:
self.optimization_initial_values.append(self.initial_guesses['S0p'])
else:
self.optimization_initial_values.append(self.initial_guesses[param])
self.optimization_bounds.append(self.parameter_bounds[param])
self.model_params = self.wrap_parameters(self.optimization_initial_values)
# constraints
self.constraints = [
{'type': 'ineq', 'fun': self.const_lowerBoundR0},
{'type': 'ineq', 'fun': self.const_upperBoundR0},
]
self.add_constraints()
def add_constraints(self):
"""
This function is intended to be overridden by subclasses
"""
pass
def wrap_parameters(self, point):
"""
Gets a list-like array and transform it to a parameter dictionary
:param point: list-like array
:return: dictionary containing the parameters names as keys
"""
dic = {}
for i in range(0, len(self.variable_parameters_list)):
param = self.variable_parameters_list[i]
dic[param] = point[i]
return {**dic, **self.force_parameters}
def calculateS0(self, S0p):
# return self.country_population * S0p - self.I_0 - self.H_0 - self.R_0 - self.F_0
return self.country_population * S0p - self.I_0 - self.R_0
def smoothCurve(self, df):
df[df.diff() <= 0] = np.nan
# df.loc[dt.datetime(2020, 4, 9)] = np.nan
df.interpolate('linear', inplace=True)
return df
def initialize_parameters(self):
self.R_0 = self.recovered[0] + self.fatal[0]
self.I_0 = (self.confirmed.iloc[0] - self.R_0)
def extend_index(self, index, new_size):
new_values = pd.date_range(start=index[-1], periods=new_size)
new_index = index.join(new_values, how='outer')
return new_index
def calculate_r0(self):
"""
Using the ´self.params´ dictionary, calculates R0
:return: R0
"""
gamma = self.calculate_gamma()
return self.params['beta'] / gamma
def calculate_gamma(self):
"""
Using the ´self.params´ dictionary, calculates gamma.
:return: R0
"""
if hasattr(self, 'model_params'):
gamma = self.model_params['gamma']
else:
gamma = .07
return gamma
def calculate_rmse(self, actual, forecast, cutDate, verbose=False):
# Separate only the according values on the Dfs
mse_F_actual = actual.loc[cutDate:].copy().diff()
mse_F_forecast = forecast.loc[cutDate:].copy().diff()
# get the size of it
T = mse_F_actual.shape[0]
mse = (((mse_F_forecast - mse_F_actual) ** 2).sum() / T) ** .5
if verbose:
print("MSE: {mse}".format(mse=mse))
return mse
def estimate(self, verbose=True, options=None, loss_func=None):
if not loss_func:
loss_func = self.loss
optimal = minimize(
loss_func,
self.optimization_initial_values,
args=(),
method='SLSQP',
bounds=self.optimization_bounds,
constraints=self.constraints,
options=options,
)
self.optimizer = optimal
params = self.wrap_parameters(optimal.x)
self.params = {**self.force_parameters, **params}
self.R0 = self.calculate_r0()
if verbose:
self.print_parameters()
def model(self, t, y):
S = y[0]
I = y[1]
R = y[2]
S0_model = self.calculateS0(self.model_params['S0p'])
ret = [-self.model_params['beta'] * S * I / S0_model, # S
self.model_params['beta']* S * I / S0_model - self.model_params['gamma'] * I, # I
self.model_params['gamma'] * I] # R
return ret
def loss(self, point):
"""
RMSE between actual confirmed cases and the estimated infectious people with given beta and gamma.
"""
size = self.I_actual.shape[0]
self.model_params = self.wrap_parameters(point)
S0 = self.calculateS0(self.model_params['S0p'])
# solution = solve_ivp(SIR, [0, size], [S_0, self.I_0, self.R_0], t_eval=np.arange(0, size, 1), vectorized=True)
solution = solve_ivp(self.model, [0, size], [S0, self.I_0, self.R_0], t_eval=np.arange(0, size, 1), vectorized=True)
# Put more emphasis on recovered people
alpha = self.alpha
l1 = np.sqrt(np.mean((solution.y[1] - self.I_actual) ** 2))
l2 = np.sqrt(np.mean((solution.y[2] - self.R_actual) ** 2))
return alpha[0] * l1 + alpha[1] * l2
def loss_outOfSample(self, point):
"""
Alternative loss function to be use with the out-of-sample RMSE estimation method.
This takes *exclusively* the S0p parameter, predicts out of sample and returns the RMSE.
:param point: parameters array
:return: RMSE
"""
params = self.wrap_parameters(point)
self.model_params = params
self.params = params
cutDate = self.F_actual.index[-1] + dt.timedelta(days=-self.outOfSample_days)
self.predict()
forecast = self.df.copy()
# Calculate MSE
self.mse = self.calculate_rmse(self.F_actual, forecast.F, cutDate)
return self.mse
def predict(self,):
"""
Predict how the number of people in each compartment can be changed through time toward the future.
The model is formulated with the given beta and gamma.
"""
predict_range = self.daysPredict
# print(self.confirmed.index)
new_index = self.extend_index(self.confirmed.index, predict_range)
size = len(new_index)
self.model_params = self.params
self.quarantine_loc = float(self.confirmed.index.get_loc(self.quarantine_date))
S0 = self.calculateS0(self.model_params['S0p'])
prediction = solve_ivp(self.model, [0, size], [S0, self.I_0, self.R_0],
t_eval=np.arange(0, size, 1))
df = pd.DataFrame({
'I_Actual': self.I_actual.reindex(new_index),
'R_Actual': self.R_actual.reindex(new_index),
'S': prediction.y[0],
'I': prediction.y[1],
'R': prediction.y[2]
}, index=new_index)
self.df = df
def train(self, options=None, loss_func=None, verbose=True):
"""
Run the optimization to estimate parameters fitting real cases
"""
if self.variable_parameters_list:
self.estimate(options=options, loss_func=loss_func, verbose=verbose)
else:
self.params = self.force_parameters.copy()
self.model_params = self.params.copy()
self.predict()
def rolling_estimation(self):
"""
Re-estimates the model for an increasing-only window for every data point.
:return: pandas dataframe containing the historical parameters
"""
params_list = []
I_actual = self.I_actual.copy()
R_actual = self.R_actual.copy()
F_actual = self.F_actual.copy()
for date in self.confirmed.index:
self.I_actual = I_actual.loc[:date]
self.R_actual = R_actual.loc[:date]
self.F_actual = F_actual.loc[:date]
self.estimate(verbose=False)
params_list.append(self.params)
self.rolling_parameters = pd.DataFrame(params_list)
self.rolling_parameters.index = self.I_actual.index
return self.rolling_parameters
def outOfSample_forecast(self, cutDate=None, plot=True, days=14, k=1):
if not cutDate:
cutDate = self.F_actual.index[-1] + dt.timedelta(days=-days)
I_actual = self.I_actual.copy()
R_actual = self.R_actual.copy()
F_actual = self.F_actual.copy()
self.I_actual = I_actual.loc[:cutDate]
self.R_actual = R_actual.loc[:cutDate]
self.F_actual = F_actual.loc[:cutDate]
self.estimate(verbose=False)
self.predict()
self.forecast = self.df.copy()
self.I_actual = I_actual.copy()
self.R_actual = R_actual.copy()
self.F_actual = F_actual.copy()
# Calculate MSE
self.mse = self.calculate_rmse(self.F_actual, self.forecast.F, cutDate, verbose=True)
if plot:
self.outOfSample_plot(cutDate, days=days, k=k)
return self.mse
def plot_forecast(self, ax, diff, window, scenarios, cutDate=None, verbose=False):
if not cutDate:
cutDate = self.F_actual.index[-1] + dt.timedelta(days=-window)
scenarios_forecast = []
# Estimate a new scenario for each `S0p` in `scenarios`
# estimate optimal parameters
new_args = self.all_attributes.copy()
new_args['cut_sample_date'] = cutDate
estimator = self.create_new_object(self.model_type, new_args)
estimator.train(verbose=verbose)
optimal_parameters = estimator.params.copy()
for scenario in scenarios:
new_args = self.all_attributes.copy()
new_args['cut_sample_date'] = cutDate
new_args['force_parameters'] = optimal_parameters.copy()
# new_args['parameter_bounds']['delta'] = (optimal_parameters['delta'], optimal_parameters[
# 'delta']) # little trick because fixing all parameters crashes
# del new_args['force_parameters']['delta']
# new_args['parameter_bounds']['lambda'] = (optimal_parameters['lambda'], optimal_parameters[
# 'lambda']) # little trick because fixing all parameters crashes
# del new_args['force_parameters']['lambda']
new_args['force_parameters']['S0p'] = scenario
estimator = self.create_new_object(self.model_type, new_args)
estimator.train(verbose=verbose)
scenario_forecast = estimator.df.copy().F
# if we are looking for past out of sample forecasts, we don't need the entire projection window.
# If we are looking for future forecasts, we can leave the entire window.
if window > 0:
scenario_forecast.reindex(self.F_actual.index)
scenarios_forecast.append(scenario_forecast)
# # Calculate MSE
# self.mse = self.calculate_rmse(self.F_actual, self.forecast.F, cutDate, verbose=True)
########## PLOT ################
actual = self.F_actual.copy()
if diff:
actual = actual.diff()
for i in range(len(scenarios_forecast)):
scenarios_forecast[i] = scenarios_forecast[i].diff()
forecast_outOfSample = scenarios_forecast[1].loc[cutDate:(cutDate + dt.timedelta(days=window))].copy()
forecast_inSample = scenarios_forecast[1].loc[:cutDate].copy()
lowerBound = scenarios_forecast[0].loc[cutDate:(cutDate + dt.timedelta(days=window))].copy()
upperBound = scenarios_forecast[2].loc[cutDate:(cutDate + dt.timedelta(days=window))].copy()
# plot true data
actual.plot(color=yellow, marker='o', ax=ax, label='True data')
# plot forecast
forecast_outOfSample.plot(color=grey, marker='o', ax=ax, label='Out-of-sample forecast')
forecast_inSample.plot(color=grey, ax=ax, label='In-sample forecast')
# plot forecast scenarios (margins
ax.fill_between(forecast_outOfSample.index, lowerBound, upperBound,
facecolor=faded_grey)
ax.legend()
df = pd.concat([actual, forecast_outOfSample, forecast_inSample, lowerBound, upperBound], axis=1)
df.columns = ['Actual', 'Forecast_outOfSample', 'Forecast_inSample', 'lowerBound', 'upperBound']
df.to_excel(os.path.join(
".",
"Exports",
f"{self.country}_forecast_{window}days_diff_{diff}.xlsx",
))
def outOfSample_forecast_scenarios(self, cutDate=None, days=[7, 14], scenarios=[.005, .01, .015], verbose=False, figsize=(15,10)):
method = 'Standard Scenarios'
if scenarios == 'estimate':
method = 'Estimated Scenarios'
new_args = self.all_attributes.copy()
new_args['cut_sample_date'] = cutDate
estimator = self.create_new_object(self.model_type, new_args)
estimator.train(verbose=verbose)
S0p_estimate = estimator.params.copy()['S0p']
if S0p_estimate >= 0.006:
scenarios = [S0p_estimate-0.005, S0p_estimate, S0p_estimate + 0.005]
else:
scenarios = [S0p_estimate * .8, S0p_estimate, S0p_estimate * 1.2]
print(scenarios)
n_subplots = len(days)
fig, axes = plt.subplots(nrows=n_subplots, ncols=2, figsize=figsize)
for i in range(n_subplots):
window = days[i]
axes[i, 0].set_title('Fatalities forecast - {window} days ahead'.format(window=window))
axes[i, 1].set_title('Daily fatalities forecast - {window} days ahead'.format(window=window))
self.plot_forecast(ax=axes[i, 0], diff=False, window=window, scenarios=scenarios, cutDate=cutDate, verbose=verbose)
self.plot_forecast(ax=axes[i, 1], diff=True, window=window, scenarios=scenarios, cutDate=cutDate, verbose=verbose)
plt.tight_layout()
fig.suptitle('{model} - {country} - Out-of-sample forecasts\nMethod: {method}'.format(model=self.model_type,
country=self.country, method=method),
fontsize=16, y=1.05)
plt.savefig(
os.path.join(
".",
"Exports",
f"{self.country}_outOfSample_forecast.png"
),
bbox_inches='tight'
)
return True
def outOfSample_forecast_S0(self, cutDate=None, plot=True, days=14, k=1):
if not cutDate:
cutDate = self.F_actual.index[-1] + dt.timedelta(days=-days)
new_args = self.all_attributes.copy()
new_args['cut_sample_date'] = cutDate
estimator = self.create_new_object(self.model_type, new_args)
estimator.train_S0(days=days, S0_initial_guess=self.S0_initial_guess,)
self.forecast = estimator.df.copy()
self.forecast = self.forecast.reindex(self.F_actual.index)
# Calculate MSE
self.mse = self.calculate_rmse(self.F_actual, self.forecast.F, cutDate, verbose=True)
if plot:
self.outOfSample_plot(cutDate, days=days, k=k)
return self.mse
def create_new_object(self, name='SIR', data= None):
"""
Auxiliary method to create new model instances from within the code. Because of inheritance,
we need to be able to know which model we are instantiating
:param name: `string` the same as a class name
:param data: `**kwargs`
:return: `SIR`-like object
"""
if name == 'SIR':
return SIR(**data)
if name == 'SIRFH':
return SIRFH(**data)
if name == 'SIRFH_Sigmoid':
return SIRFH_Sigmoid(**data)
def train_S0(self, options=None, days=7, S0_initial_guess=.01):
self.S0_initial_guess = S0_initial_guess
# Step #1 - Train with initial S0 guess (usually around 5%) - cut sample?
# Problem: If initial guess is too bad, parameters may not well behave
# Potential solution: joint RMSE optimization with more restrict bounds
# Step #2 - Lock parameters and minimize out of sample RMSE with respect to S0
# Step #3 - Train full model to make final projections
# Step #1 - Train with initial S0 guess (usually around 5%)
new_args = self.all_attributes.copy()
new_args['force_parameters']['S0p'] = S0_initial_guess
other_params_estimator = self.create_new_object(self.model_type, new_args)
# Step #2 - Lock parameters and minimize out of sample RMSE with respect to S0
other_params_estimator.train(verbose=False)
new_params = other_params_estimator.params
del new_params['S0p']
new_args = self.all_attributes.copy()
new_args['force_parameters'] = new_params
s0_estimator = self.create_new_object(self.model_type, new_args)
s0_estimator.outOfSample_days = days
s0_estimator.train(loss_func=s0_estimator.loss_outOfSample, verbose=False, options=options)
# Step #3 - estimate final model with final S0 estimate
new_args = self.all_attributes.copy()
new_args['force_parameters']['S0p'] = s0_estimator.params['S0p']
final_estimator = self.create_new_object(self.model_type, new_args)
final_estimator.train(options=options)
self.df = final_estimator.df
self.params = final_estimator.params
self.model_params = final_estimator.params
def train_S0_joint(self, options=None, days=7, ):
# Step #1 - Train with initial S0 guess (usually around 5%) - cut sample?
# Problem: If initial guess is too bad, parameters may not well behave
# Potential solution: joint RMSE optimization with more restrict bounds
# Step #2 - Lock parameters and minimize out of sample RMSE with respect to S0
# Step #3 - Train full model to make final projections
self.outOfSample_days = days
self.train(loss_func=self.loss_outOfSample)
# # Step #1 - Train with initial S0 guess (usually around 5%)
# new_args = self.all_attributes
# new_args['force_parameters']['S0p'] = S0_initial_guess
#
# other_params_estimator = self.create_new_object(self.model_type, new_args)
#
# # Step #2 - Lock parameters and minimize out of sample RMSE with respect to S0
# other_params_estimator.train()
# new_params = other_params_estimator.params
# del new_params['S0p']
#
# new_args = self.all_attributes
# new_args['force_parameters'] = new_params
#
# s0_estimator = self.create_new_object(self.model_type, new_args)
# s0_estimator.outOfSample_days = days
# s0_estimator.train(loss_func=s0_estimator.loss_outOfSample)
# self.model_params = new_params
############## CONSTRAINT METHODS ################
def const_lowerBoundR0(self, point):
"constraint has to be R0 > bounds(0) value, thus (R0 - bound) > 0"
# self.const_lowerBoundR0_S0opt.__code__.co_varnames
# print(**kwargs)
# print(locals())
params = self.wrap_parameters(point)
lowerBound = self.constraints_bounds['R0'][0]
gamma = self.calculate_gamma()
return (params['beta']/gamma) - lowerBound
def const_upperBoundR0(self, point):
# print(locals())
# print(**kwargs)
# self.const_upperBoundR0_S0opt.__code__.co_varnames
params = self.wrap_parameters(point)
upperBound = self.constraints_bounds['R0'][1]
gamma = self.calculate_gamma()
return upperBound - (params['beta']/gamma)
############## VISUALIZATION METHODS ################
def I_fit_plot(self):
line_styles = {
'I_Actual': '--',
'R_Actual': '--',
}
self.df[['I_Actual', 'I']].loc[:self.end_data].plot(style=line_styles)
def R_fit_plot(self):
line_styles = {
'I_Actual': '--',
'R_Actual': '--',
}
self.df[['R_Actual', 'R']].loc[:self.end_data].plot(style=line_styles)
def main_plot(self):
fig, ax = plt.subplots(figsize=(15, 10))
ax.set_title(self.country)
line_styles ={
'I_Actual': '--',
'R_Actual': '--',
}
# color = {
# 'I_Actual': '#FF0000',
# # 'R_Actual': '--',
# }
self.df.plot(ax=ax, style=line_styles, )
def rollingPlot(self, export=False, parameters_list=None):
if parameters_list:
rolling_parameters = self.rolling_parameters[parameters_list]
else:
rolling_parameters = self.rolling_parameters
axes = rolling_parameters.plot()
fig = axes.get_figure()
axes.axvline(x=self.quarantine_date, color='red', linestyle='--', label='Quarentine')
if export:
rolling_parameters.to_excel('export_rolling.xlsx')
def outOfSample_plot(self, cutDate=None, days=14, diff=False, k=1):
if not cutDate:
cutDate = self.F_actual.index[-1] + dt.timedelta(days=-days)
actual = self.F_actual.loc[:].copy()
forecast_outOfSample = self.forecast.loc[cutDate:(cutDate + dt.timedelta(days=days))].F.copy()
forecast_inSample = self.forecast.loc[:cutDate].F.copy()
std = actual.diff().std()
if diff:
actual = actual.diff()
forecast_outOfSample = forecast_outOfSample.diff()
forecast_inSample = forecast_inSample.diff()
# plot true data
axes = actual.plot(color=yellow, marker='o')
fig = axes.get_figure()
# plot forecast
forecast_outOfSample.plot(color=grey, marker='o')
forecast_inSample.plot(color=grey)
# pd.DataFrame([forecast_outOfSample, forecast_inSample])
# plot forecast scenarios (margins
axes.fill_between(forecast_outOfSample.index, forecast_outOfSample - k * std, forecast_outOfSample + k * std, facecolor=faded_grey)
# return forecast_outOfSample
def print_parameters(self):
for param in self.params.keys():
print("{param}: {value}".format(param=param, value=self.params[param]))
print("S0: {value}".format(value=self.calculateS0(self.params['S0p'])))
print('R0:{R0}'.format(R0=self.R0))
class SIRFH(SIR):
"""
This SIR extension split the infected compartiment into non-hospital and hospital cases and the recovered group
into recovered and fatal
$$\frac{dS}{dt} = - \frac{\beta IS}{N}$$
$$\frac{dIN}{dt} = (1 - \rho) \times \frac{\beta IS}{N} - \gamma_{IN} IN$$
$$\frac{dIH}{dt} = \rho \times \frac{\beta IS}{N} - (1-\delta) \times \gamma_{IH} IH - \delta \times \omega_{IH} IH$$
$$\frac{dR}{dt} = \gamma_{IN} IN + (1-\delta) \times \gamma_{IH} IH $$
$$\frac{dF}{dt} = \delta \times \omega_{IH} IH$$
"""
def __init__(self,
alpha=(0.025, 0.005, .97),
hospitalization_rate=.05,
**kwargs):
self.rho = hospitalization_rate
initial_guesses = {
'delta': .05,
}
if hasattr(self, 'initial_guesses'):
self.initial_guesses = {**initial_guesses, **self.initial_guesses}
else:
self.initial_guesses = {
'delta': .05,
}
new_args = {**kwargs, **{'alpha': alpha, 'hospitalization_rate': hospitalization_rate}}
super().__init__(**new_args)
self.model_type = 'SIRFH'
def set_default_bounds(self):
"""
Sets the default values for unprovided bounds
:return:
"""
super().set_default_bounds()
if 'gamma_i' not in self.parameter_bounds.keys():
self.parameter_bounds['gamma_i'] = (1/(3*7), 1/(1*7))
if 'gamma_h' not in self.parameter_bounds.keys():
self.parameter_bounds['gamma_h'] = (1/(7*7), 1/(2*7))
if 'omega' not in self.parameter_bounds.keys():
self.parameter_bounds['omega'] = (1/(20), 1/(5))
if 'delta' not in self.parameter_bounds.keys():
self.parameter_bounds['delta'] = (0, 79/165)
del self.parameter_bounds['gamma']
def model(self, t, y):
S = y[0]
I_n = y[1]
H_r = y[2]
H_f = y[3]
R = y[4]
F = y[5]
I = I_n + H_r + H_f
S0_model = self.calculateS0(self.model_params['S0p'])
beta = self.beta(t)
ret = [
# S - Susceptible
-beta * I * S / S0_model,
# I_n
(1 - self.rho) * beta * I * S / S0_model # (1-rho) BIS/N
- self.model_params['gamma_i'] * I_n, # Gamma_I x I_n
# H_r
self.rho * (1 - self.model_params['delta']) * beta * I * S / S0_model # rho * (1-delta) BIS/N
- self.model_params['gamma_h'] * H_r,
# H_f
self.rho * self.model_params['delta'] * beta * I * S / S0_model # rho * (delta) BIS/N
- self.model_params['omega'] * H_f,
# R
self.model_params['gamma_i'] * I_n # gamma_I * In
+ self.model_params['gamma_h'] * H_r, # gamma_H * Hr
# F
self.model_params['omega'] * H_f,
]
return ret
def cut_sample(self):
cutDate = self.cut_sample_date
if cutDate:
if not isinstance(cutDate, dt.datetime):
cutDate = self.I_actual.index[-1] + dt.timedelta(days=-cutDate)
self.I_actual = self.I_actual.loc[:cutDate].copy()
self.R_actual = self.R_actual.loc[:cutDate].copy()
self.F_actual = self.F_actual.loc[:cutDate].copy()
def load_data(self):
"""
New function to use our prop data
"""
super().load_data()
#True data series
self.R_actual = self.recovered
self.F_actual = self.fatal
self.I_actual = self.confirmed - self.R_actual - self.F_actual # obs this is total I
self.cut_sample()
def beta(self, t):
return self.model_params['beta']
def calculate_gamma(self):
"""
Using the ´self.params´ dictionary, calculates gamma with its adaptation to the SIRFH model.
:return: gamma
"""
if hasattr(self, 'model_params'):
gamma = (1 - self.rho) * self.model_params['gamma_i'] + self.rho * ((1 - self.model_params['delta']) * self.model_params['gamma_h']
+ self.model_params['delta'] * self.model_params['omega'])
else:
gamma = .07
return gamma
def initialize_parameters(self):
self.R_0 = self.recovered[0]
self.F_0 = self.fatal[0]
self.I_0 = self.confirmed.iloc[0] - self.R_0 - self.F_0
self.I_n_0 = self.I_0 * (1 - self.rho)
self.H_r_0 = self.rho * (1 - 79/165) * self.I_0 #TODO Might be a strong assumption, check sensitivity
self.H_f_0 = self.rho * (79/165) * self.I_0
self.H_0 = self.H_r_0 + self.H_f_0
def calculateS0(self, S0p):
return self.country_population * S0p - self.I_0 - self.H_0 - self.R_0 - self.F_0
def loss(self, point):
"""
RMSE between actual confirmed cases and the estimated infectious people with given beta and gamma.
"""
size = self.I_actual.shape[0]
self.model_params = self.wrap_parameters(point)
S0 = self.calculateS0(self.model_params['S0p'])
# solution = solve_ivp(SIR, [0, size], [S_0, self.I_0, self.R_0], t_eval=np.arange(0, size, 1), vectorized=True)
solution = solve_ivp(self.model, [0, size], [S0, self.I_n_0, self.H_r_0, self.H_f_0, self.R_0, self.F_0],
t_eval=np.arange(0, size, 1), vectorized=True)
y = solution.y
S = y[0]
I_n = y[1]
H_r = y[2]
H_f = y[3]
R = y[4]
F = y[5]
I = I_n + H_r + H_f
alphas = self.alpha
# l1 = ((I - self.I_actual) / self.I_actual) ** 2
l1 = (np.diff(I, prepend=np.nan) - self.I_actual.diff()) ** 2
l1.replace([np.inf, -np.inf, np.nan], 0, inplace=True)
l1 = np.sqrt(np.mean(l1))
# l2 = ((R - self.R_actual) / self.R_actual) ** 2
l2 = (np.diff(R, prepend=np.nan) - self.R_actual.diff()) ** 2
l2.replace([np.inf, -np.inf, np.nan], 0, inplace=True)
l2 = np.sqrt(np.mean(l2))
# l3 = ((F - self.F_actual) / self.F_actual) ** 2
l3 = (np.diff(F, prepend=np.nan) - self.F_actual.diff()) ** 2
l3.replace([np.inf, -np.inf, np.nan], 0, inplace=True)
l3 = np.sqrt(np.mean(l3))
loss = alphas[0] * l1 + alphas[1] * l2 + alphas[2] * l3
# loss = l3
return loss
def loss_level(self, point):
"""
RMSE between actual confirmed cases and the estimated infectious people with given beta and gamma.
"""
size = self.I_actual.shape[0]
self.model_params = self.wrap_parameters(point)
S0 = self.calculateS0(self.model_params['S0p'])
# solution = solve_ivp(SIR, [0, size], [S_0, self.I_0, self.R_0], t_eval=np.arange(0, size, 1), vectorized=True)
solution = solve_ivp(self.model, [0, size], [S0, self.I_n_0, self.H_r_0, self.H_f_0, self.R_0, self.F_0],
t_eval=np.arange(0, size, 1), vectorized=True)
y = solution.y
S = y[0]
I_n = y[1]
H_r = y[2]
H_f = y[3]
R = y[4]
F = y[5]
I = I_n + H_r + H_f
alphas = self.alpha
l1 = ((I - self.I_actual) / self.I_actual) ** 2
l1.replace([np.inf, -np.inf, np.nan], 0, inplace=True)
l1 = np.sqrt(np.mean(l1))
l2 = ((R - self.R_actual) / self.R_actual) ** 2
l2.replace([np.inf, -np.inf, np.nan], 0, inplace=True)
l2 = np.sqrt(np.mean(l2))
l3 = ((F - self.F_actual) / self.F_actual) ** 2
l3.replace([np.inf, -np.inf, np.nan], 0, inplace=True)
l3 = np.sqrt(np.mean(l3))
loss = alphas[0] * l1 + alphas[1] * l2 + alphas[2] * l3
return loss
def predict(self,):
"""
Predict how the number of people in each compartment can be changed through time toward the future.
The model is formulated with the given beta and gamma.
"""
predict_range = self.daysPredict
# print(self.confirmed.index)
new_index = self.extend_index(self.confirmed.index, predict_range)
size = len(new_index)
self.quarantine_loc = float(self.confirmed.index.get_loc(self.quarantine_date))
S0 = self.calculateS0(self.params['S0p'])
prediction = solve_ivp(self.model, [0, size], [S0, self.I_n_0, self.H_r_0, self.H_f_0, self.R_0, self.F_0],
t_eval=np.arange(0, size, 1), vectorized=True)
y = prediction.y
S = y[0]
I_n = y[1]
H_r = y[2]
H_f = y[3]
R = y[4]
F = y[5]
H = H_r + H_f
I = I_n + H
df = pd.DataFrame({
'I_Actual': self.I_actual.reindex(new_index),
'R_Actual': self.R_actual.reindex(new_index),
'F_Actual': self.F_actual.reindex(new_index),
'S': S,
'I': I,
'I_n': I_n,
'H_r': H_r,
'H_f': H_f,
'H': H,
'R': R,
'F': F,
}, index=new_index)
self.df = df
def rollingHosp(self):
hospList = []
gammasList = []
I_actual = self.I_actual.copy()
R_actual = self.R_actual.copy()
F_actual = self.F_actual.copy()
for date in self.confirmed.index:
date1 = date + dt.timedelta(days=1)
self.I_actual = I_actual.loc[:date1]
self.R_actual = R_actual.loc[:date1]
self.F_actual = F_actual.loc[:date1]
self.estimate(verbose=False)
self.predict() #TODO CHECK IF THIS IS OK AND NO INDENXING IS NECESSARY
hospList.append(self.df['H'].max())
self.rollingHospList = pd.DataFrame({'H_max': hospList,})
self.rollingHospList.index = self.I_actual.index
return self.rollingHospList
def rolling_peak(self, figsize=(15, 8)):
"""
This function estimates the fatalities peak with an ever-increasing data window.
:return:
"""
params_list = []
# for cutDate in self.confirmed.index:
for cutDate in self.I_actual.index:
new_args = self.all_attributes.copy()
new_args['cut_sample_date'] = cutDate
estimator = self.create_new_object(self.model_type, new_args)
estimator.train(verbose=False)
# optimal_parameters = estimator.params.copy()
current_peak = estimator.df.index[estimator.df.diff().F_Actual == estimator.df.F_Actual.diff().max()]
if current_peak.shape[0] > 0:
current_peak = current_peak[0]
else:
current_peak = np.nan
estimated_peak = estimator.df.index[estimator.df.diff().F == estimator.df.diff().F.max()]
if estimated_peak.shape[0] > 0:
estimated_peak = estimated_peak[0]
else:
estimated_peak = np.nan
params_list.append({
'Current peak': current_peak,
'Estimated peak': estimated_peak,
})
self.rolling_peak_df = pd.DataFrame(params_list, index=self.I_actual.index)
self.rolling_peak_df.fillna(method='bfill', inplace=True)
self.rolling_peak_df.fillna(method='ffill', inplace=True)
self.rolling_peak_df = self.rolling_peak_df.iloc[1:]
fig, ax = plt.subplots(figsize=figsize)
# Cut data on the peak date
peak_date = pd.Series([self.rolling_peak_df['Current peak'].iloc[-1],
self.rolling_peak_df['Estimated peak'].iloc[-1]])
peak_date = peak_date.max()
self.rolling_peak_df = self.rolling_peak_df.loc[:peak_date]
self.rolling_peak_df['Estimated peak'].plot(axes=ax, color=yellow, marker='o', label='Estimated')
self.rolling_peak_df['Current peak'].plot(axes=ax, color=grey, marker='o', label='Current')
self.rolling_peak_df['Peak max'] = self.rolling_peak_df['Current peak'].max()
ax.axhline(y=self.rolling_peak_df['Current peak'].max(), color='black', linestyle='--', label='True peak')
ax.legend()
self.rolling_peak_df['UB'] = self.rolling_peak_df['Current peak'].max() + dt.timedelta(days=5)
self.rolling_peak_df['LB'] = self.rolling_peak_df['Current peak'].max() + dt.timedelta(days=-5)
ax.fill_between(self.rolling_peak_df['Current peak'].index,
self.rolling_peak_df['LB'],
self.rolling_peak_df['UB'],
facecolor=faded_grey)
plt.tight_layout()
fig.suptitle('Fatality peak forecast - {country}'.format(model=self.model_type, country=self.country,),
fontsize=16, y=1.05)
plt.savefig(
os.path.join(
".",
"Exports",
f"{self.country}_rolling_peak.png"
),
bbox_inches='tight'
)
export_df = self.rolling_peak_df[['Current peak', 'Estimated peak', 'LB', 'UB', 'Peak max']].copy()
export_df.to_excel(
os.path.join(
".",
"Exports",
f"{self.country}_rolling_peak_dates.xlsx"
)
)
export_df = export_df - self.F_actual.index[0]
export_df = export_df / np.timedelta64(1, 'D')
export_df.index = (export_df.index - self.F_actual.index[0]) / np.timedelta64(1, 'D')
export_df.to_excel(
os.path.join(
".",
"Exports",
f"{self.country}_rolling_peak.xlsx"
)
)
return self.rolling_peak_df
def rolling_n_fatal(self, nfatal=[50, 100], figsize=(15, 8)):
"""
This function creates the rolling estimate of the date in which the model indicates less than `n` daily fatalities
:return:
"""
params_list = []
for cutDate in self.I_actual.index:
# for cutDate in self.confirmed.index:
new_args = self.all_attributes.copy()
new_args['cut_sample_date'] = cutDate
estimator = self.create_new_object(self.model_type, new_args)
estimator.train(verbose=False)
# get the date with less than n daily fatalities after the peak
# find peak
peak = estimator.df.index[estimator.df.diff().F == estimator.df.F.diff().max()]
dic = {}
if peak.shape[0] > 0:
peak_i = peak[0]
analysis_period = estimator.df.diff().F.loc[peak_i:]
current_estimate = analysis_period.index[analysis_period <= nfatal[0]]
current_estimate = current_estimate[0]
else:
current_estimate = np.nan
dic["n={n}".format(n=nfatal[0])] = current_estimate
if peak.shape[0] > 0:
peak_i = peak[0]
analysis_period = estimator.df.diff().F.loc[peak_i:]
current_estimate = analysis_period.index[analysis_period <= nfatal[1]]
current_estimate = current_estimate[0]
else:
current_estimate = np.nan
dic["n={n}".format(n=nfatal[1])] = current_estimate
params_list.append(dic)
self.rolling_peak_df = pd.DataFrame(params_list, index=self.I_actual.index)
self.rolling_peak_df.fillna(method='bfill', inplace=True)
self.rolling_peak_df.fillna(method='ffill', inplace=True)
self.rolling_peak_df = self.rolling_peak_df.iloc[1:]
fig, ax = plt.subplots(figsize=figsize)
self.rolling_peak_df["n={n}".format(n=nfatal[0])].plot(axes=ax, color=yellow, marker='o', label="n={n}".format(n=nfatal[0]))
self.rolling_peak_df["n={n}".format(n=nfatal[1])].plot(axes=ax, color=grey, marker='o', label="n={n}".format(n=nfatal[1]))
# ax.axhline(y=self.rolling_peak_df['Current peak'].max(), color='black', linestyle='--', label='True peak')
ax.legend()
plt.tight_layout()
fig.suptitle('Daily Deaths Forecast - {country}'.format(model=self.model_type, country=self.country,),
fontsize=16, y=1.05)
plt.savefig(
os.path.join(
".",
"Exports",
f"{self.country}_rolling_n_fatal.png"
)
, bbox_inches='tight'
)
return self.rolling_peak_df
############## VISUALIZATION METHODS ################
def main_plot(self):
fig, ax = plt.subplots(figsize=(15, 10))
ax.set_title(self.country)
line_styles ={
'I_Actual': '--',
'R_Actual': '--',
'F_Actual': '--',
}
# color = {
# 'I_Actual': '#FF0000',
# # 'R_Actual': '--',
# }
self.df.plot(ax=ax, style=line_styles, )
def I_plot(self):
line_styles = {
'I_Actual': '--',
'R_Actual': '--',
'F_Actual': '--',
}
self.df[['I_Actual', 'I', 'I_n', 'H']].loc[:self.end_data].plot(style=line_styles)
def H_F_plot(self):
line_styles = {
'I_Actual': '--',
'R_Actual': '--',
'F_Actual': '--',
}
self.df[['H', 'F', 'F_Actual',]].loc[:self.end_data].plot(style=line_styles)
def F_fit_plot(self):
line_styles = {
'I_Actual': '--',
'R_Actual': '--',
'F_Actual': '--',
}
self.df[['F_Actual', 'F']].loc[:self.end_data].plot(style=line_styles)
def actuals_plot(self):
line_styles = {
'I_Actual': '--',
'R_Actual': '--',
'F_Actual': '--',
}
self.df[['I_Actual', 'R_Actual', 'F_Actual']].loc[:self.end_data].plot(style=line_styles)
def rollingHospPlot(self, export=False):
axes = self.rollingHospList.plot()
fig = axes.get_figure()
if export:
self.rollingHospList.to_excel('export_HospPlot.xlsx')
axes.axvline(x=self.quarantine_date, color='red', linestyle='--', label='Quarentine')
axes.axhline(y=50000, color='black', linestyle='--', label='Hospital Capacity')
def print_parameters(self):
super().print_parameters()
print("gamma_i: {value} days".format(value= 1 / self.params['gamma_i']))
print("gamma_h: {value} days".format(value=1 / self.params['gamma_h']))
print("omega: {value} days".format(value=1 / self.params['omega']))
def plot_main_forecasts(self, figsize=(15, 5),):
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=figsize)
axes[0].set_title('Daily Fatalities fit')
axes[1].set_title('Total Fatalities fit')
ax = axes[1]
self.df['F_Actual'].plot(ax=ax, color=grey, label='Fatalities')
self.df['F'].plot(ax=ax, color=yellow, label='Forecast fatalities')
self.df[['F_Actual', 'F']].to_excel(
os.path.join(
".",
"Exports",
f"{self.country}_HF_level.xlsx"
)
)
ax.legend()
ax = axes[0]
self.df['F'].diff().plot(ax=ax, color=grey, label='Forecast fatalities')
self.df['F_Actual'].diff().plot(ax=ax, color=yellow, label='Fatalities')
df = self.df.copy()
df['F'] = df['F'].diff()
df['F_Actual'] = df['F_Actual'].diff()
df[['F', 'F_Actual']].to_excel(
os.path.join(
".",
"Exports",
f"{self.country}_HF_diff.xlsx"
)
)
ax.legend()
plt.tight_layout()
fig.suptitle('{model} - {country} - Forecasts'.format(model=self.model_type, country=self.country),
fontsize=16, y=1.05)
plt.savefig(
os.path.join(
".",
"Exports",
f"{self.country}_main_forecast.png"
),
bbox_inches='tight'
)
def plot_main_forecasts_hospital(self, figsize=(15, 5), hospital_line=False):
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=figsize)
axes[0].set_title('Fatalities fit')
axes[1].set_title('Hospital Demand')
ax = axes[1]
self.df['H'].plot(ax=ax, color=red, label='Hospital Demand')
self.df['F'].plot(ax=ax, color=grey, label='Fatalities')
self.df['F_Actual'].plot(ax=ax, color=yellow, label='True Fatalities')
self.df[['H', 'F', 'F_Actual']].to_excel(
os.path.join(
".",
"Exports",
f"{self.country}_HF_level.xlsx"
),
)
if hospital_line:
ax.axhline(y=61800, color='black', linestyle='--', label='Hospital Capacity')
ax.legend()
ax = axes[0]
self.df['F'].diff().plot(ax=ax, color=grey, label='Forecast fatalities')
self.df['F_Actual'].diff().plot(ax=ax, color=yellow, label='Fatalities')
df = self.df.copy()
df['F'] = df['F'].diff()
df['F_Actual'] = df['F_Actual'].diff()
df[['F', 'F_Actual']].to_excel(
os.path.join(
".",
"Exports",
f"{self.country}_HF_diff.xlsx"
),
)
ax.legend()
plt.tight_layout()
fig.suptitle('{model} - {country} - Forecasts'.format(model=self.model_type, country=self.country),
fontsize=16, y=1.05)
plt.savefig(
os.path.join(
".",
"Exports",
f"{self.country}_main_forecast.png"
),
bbox_inches='tight'
)
class SIRFH_Sigmoid(SIRFH):
"""
This SIR extension split the infected compartiment into non-hospital and hospital cases and the recovered group
into recovered and fatal
$$\frac{dS}{dt} = - \frac{\beta IS}{N}$$
$$\frac{dIN}{dt} = (1 - \rho) \times \frac{\beta IS}{N} - \gamma_{IN} IN$$
$$\frac{dIH}{dt} = \rho \times \frac{\beta IS}{N} - (1-\delta) \times \gamma_{IH} IH - \delta \times \omega_{IH} IH$$
$$\frac{dR}{dt} = \gamma_{IN} IN + (1-\delta) \times \gamma_{IH} IH $$
$$\frac{dF}{dt} = \delta \times \omega_{IH} IH$$
"""
def __init__(self,
**kwargs):
self.initial_guesses = {
'lambda': 1.0,
}
super().__init__(**kwargs)
self.model_type = 'SIRFH_Sigmoid'
self.sig_normal_t = self.quarantine_loc + 7
def set_default_bounds(self):
"""
Sets the default values for unprovided bounds. All model parameters should be on this function.
Pay attention to the fact that it is inheriting bounds from its parent classes
:return:
"""
super().set_default_bounds()
if 'lambda' not in self.parameter_bounds.keys():
self.parameter_bounds['lambda'] = (1/4, 4)
if 'beta1' not in self.parameter_bounds.keys():
self.parameter_bounds['beta1'] = (.05, .5)
if 'beta2' not in self.parameter_bounds.keys():
self.parameter_bounds['beta2'] = (.05, .5)
del self.parameter_bounds['beta']
def add_constraints(self):
self.constraints.append({'type': 'ineq', 'fun': self.const_betas},)
def beta(self, t):
# Normalize t
t -= self.sig_normal_t
return (self.model_params['beta1'] -
self.model_params['beta2']) / (1 + np.exp(t / self.model_params['lambda'])) + self.model_params['beta2']
def calculate_r0(self):
"""
Using the ´self.params´ dictionary, calculates R0
:return: R0
"""
gamma = self.calculate_gamma()
return {"R0_initial": self.params['beta1'] / gamma, "R0_final": self.params['beta2'] / gamma, }
def outOfSample_loss(self, S0p):
self.S0pbounds = (S0p, S0p)
return self.outOfSample_forecast()
def outOfSample_train(self, cutDate=None, days=7):
if not cutDate:
cutDate = self.F_actual.index[-1] + dt.timedelta(days=-days)
# Lock and backup S0
S0_initital_guess = 0.05
bkp_S0_bounds = self.S0pbounds
self.S0pbounds = (S0_initital_guess, S0_initital_guess)
self.estimate(verbose=False)
#release lock
self.S0pbounds = bkp_S0_bounds
# Create new object with the locked parameters
newObj = SIRFH_Sigmoid(
country=self.country,
N=self.country_population,
nth=self.nth,
quarantineDate=self.quarantine_date,
hospRate=self.hospitalization_rate,
alphas=self.alpha,
adjust_recovered=self.adjust_recovered,
beta1_bounds=(self.beta1, self.beta1),
beta2_bounds=(self.beta2, self.beta2),
delta_bounds=(self.delta, self.delta),
gamma_i_bounds=(self.gamma_I, self.gamma_I),
gamma_h_bounds=(self.gamma_H, self.gamma_H),
omega_bounds=(self.omega, self.omega),
lambda_bounds=(self.lambda_bounds, self.lambda_bounds),
S0pbounds=self.S0pbounds,
)
# minimize out of sample forecast RMSE
optimal = minimize(
self.outOfSample_loss,
np.array([
0.05
]),
args=(),
method='SLSQP',
bounds=[
self.S0pbounds
],
)
self.optimizer = optimal
S_0p, = optimal.x
S_0 = self.country_population * S_0p - self.I_0 - self.H_0 - self.R_0 - self.F_0
print("S0p new estimate")
print("S0: {S0}".format(S0=self.S_0))
# Release locks
self.delta_bounds = bkp_delta_bounds
self.beta1_bounds = bkp_betaBounds
self.beta2_bounds = bkp_gamma_i_bounds
self.gamma_i_bounds = bkp_gamma_i_bounds
self.gamma_h_bounds = bkp_gamma_h_bounds
self.omega_bounds = bkp_omega_bounds
self.lambda_bounds = bkp_lambda_bounds
############## CONSTRAINT METHODS ################
def const_betas(self, point):
"""
Initial `beta` should be higher than final `beta`
:param point:
:return:
"""
params = self.wrap_parameters(point)
return params['beta1'] - params['beta2']
def const_lowerBoundR0(self, point):
"constraint has to be R0 > bounds(0) value, thus (R0 - bound) > 0"
params = self.wrap_parameters(point)
lowerBound = self.constraints_bounds['R0'][0]
gamma = self.calculate_gamma()
R0_1 = params['beta1'] / gamma
R0_2 = params['beta2'] / gamma
return min(R0_1, R0_2) - lowerBound
def const_upperBoundR0(self, point):
params = self.wrap_parameters(point)
upperBound = self.constraints_bounds['R0'][1]
gamma = self.calculate_gamma()
R0_1 = params['beta1'] / gamma
R0_2 = params['beta2'] / gamma
return upperBound - max(R0_1, R0_2)
if __name__ == '__main__':
hospRate = 0.05
deltaUpperBound = 79 / 165
cut_sample_date = dt.datetime(2020, 5, 14)
t1 = SIRFH(country='Korea, South',
# quarantineDate = dt.datetime(2020,3,24), #italy lockdown was on the 9th
hospitalization_rate=hospRate,
alpha=[.00, 0.00, .998],
# Loose restrictions
# S0pbounds=(10e6 / 200e6, 10e6 / 200e6),
# delta_bounds=(0, deltaUpperBound),
# betaBounds=(0.20, 1.5),
# gammaBounds=(0.01, .2),
# gamma_i_bounds=(1/(20), 1/(1)),
# gamma_h_bounds=(1/(8*7), 1/(2*7)),
# omega_bounds=(1/(4*7), 1/(3)),
# Tight restrictions
# S0pbounds=(10e6 / N, 10e6 / N),
force_parameters={
# 'S0p': .05,
# 'delta': 79/165,
# 'beta1': 0.31118164052008357,
# 'beta2': .2,
# 'gamma_i': 0.19999999999999982,
# 'gamma_h': 0.023809523809525043,
# 'omega': 0.14199161301361687,
# 'lambda': 0.5,
},
parameter_bounds={
'S0p': (.0001, .02),
# 'delta': (0, deltaUpperBound),
# 'beta1': (0.20, 1.5),
# 'beta2': (0.20, 1.5),
'gamma_i': (1 / (14), 1 / (4)),
'gamma_h': (1 / (6.5 * 7), 1 / (2.5 * 7)),
'omega': (1 / (21), 1 / (5)),
# 'lambda': (.5,2)
},
constraints_bounds={
'R0': (1, 6),
},
cut_sample_date = cut_sample_date,
)
# t1.train_S0()
t1.train()
roll = t1.rolling_peak() | StarcoderdataPython |
8174881 | # Generated by Django 3.1 on 2021-02-28 10:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sample', '0018_auto_20210228_1023'),
]
operations = [
migrations.AlterField(
model_name='communityactivitymetadata',
name='objective',
field=models.CharField(max_length=2048),
),
]
| StarcoderdataPython |
6436038 | import os
import tempfile
from typing import List
import pytest
from bs4 import BeautifulSoup
from json_schema_for_humans.generate import generate_from_file_object, generate_from_schema
from tests.test_utils import _get_test_case_path
def _generate_case(
case_name: str, find_deprecated: bool = False, find_default: bool = False, link_to_reused_ref: bool = True
) -> BeautifulSoup:
"""Get the BeautifulSoup object for a test case"""
return BeautifulSoup(
generate_from_schema(
_get_test_case_path(case_name),
None,
False,
find_deprecated,
find_default,
True,
link_to_reused_ref=link_to_reused_ref,
),
"html.parser",
)
def _assert_soup_results_text(soup: BeautifulSoup, class_name: str, texts: List[str]) -> None:
"""Assert that all the HTML elements with the provided class found in the schema has the supplied text
There must be exactly as many elements as the length of the supplied values and they must be in the same order
"""
elements = soup.find_all(class_=class_name)
assert len(elements) == len(texts)
for i, element in enumerate(elements):
assert element.text.strip() == texts[i]
def _assert_property_names(soup: BeautifulSoup, property_names: List[str]) -> None:
_assert_soup_results_text(soup, "property-name", property_names)
def _assert_title(soup: BeautifulSoup, title: str) -> None:
"""Assert the result file contains the provided title"""
assert soup.head.title.string == title
assert soup.body.h1.string == title
def _assert_descriptions(soup: BeautifulSoup, descriptions: List[str]) -> None:
"""Assert the result file contains exactly the provided descriptions in the same order"""
_assert_soup_results_text(soup, "description", descriptions)
def _assert_types(soup: BeautifulSoup, type_names: List[str]) -> None:
_assert_soup_results_text(soup, "value-type", [f"Type: {type_name}" for type_name in type_names])
def _assert_const(soup: BeautifulSoup, const_values: List[str]) -> None:
_assert_soup_results_text(soup, "const-value", [f'Specific value: "{const_value}"' for const_value in const_values])
def _assert_numeric_restrictions(soup: BeautifulSoup, restrictions: List[str]) -> None:
_assert_soup_results_text(soup, "numeric-restriction", restrictions)
def _assert_one_of_options(soup: BeautifulSoup, nb_options: int) -> None:
_assert_soup_results_text(soup, "oneOf-option", [f"Option {str(i + 1)}" for i in range(nb_options)])
def _assert_default_values(soup: BeautifulSoup, default_values: List[str]) -> None:
_assert_soup_results_text(soup, "default-value", [f"Default: {default_value}" for default_value in default_values])
def _assert_badges(soup: BeautifulSoup, badge_class_name: str, expected_values: List[bool]) -> None:
"""Assert that the badge with the given class name is either present or not for all properties"""
property_cards = soup.find_all(class_="property-name-button")
assert len(property_cards) == len(expected_values)
for i, property_card in enumerate(property_cards):
assert (property_card.find(class_=f"{badge_class_name}-property") is not None) == expected_values[i]
def _assert_deprecated(soup: BeautifulSoup, is_deprecated_properties: List[bool]) -> None:
_assert_badges(soup, "deprecated", is_deprecated_properties)
def _assert_required(soup: BeautifulSoup, is_required_properties: List[bool]) -> None:
_assert_badges(soup, "required", is_required_properties)
def _assert_basic_case(soup: BeautifulSoup) -> None:
"""Assert the rendered result of the basic test case"""
_assert_property_names(soup, ["firstName", "lastName", "age"])
_assert_descriptions(
soup,
[
"The person's first name.",
"The person's last name.",
"Age in years which must be equal to or greater than zero.",
],
)
_assert_title(soup, "Person")
_assert_numeric_restrictions(soup, ["Value must be greater or equal to 0"])
_assert_required(soup, [False] * 3)
def test_basic() -> None:
"""Test rendering a basic schema with title"""
soup = _generate_case("basic")
_assert_basic_case(soup)
def test_multiple_types() -> None:
"""Test rendering a schema with type being an array."""
soup = _generate_case("multiple_types")
_assert_types(soup, ["object", "string", "string or null", "integer or number", "integer, string, number or null"])
def test_geo() -> None:
"""Test rendering a schema with numerical values that have restrictions"""
soup = _generate_case("geo")
_assert_property_names(soup, ["latitude", "longitude"])
_assert_types(soup, ["object", "number", "number"])
_assert_numeric_restrictions(
soup,
[
"Value must be greater or equal to -90 and lesser or equal to 90",
"Value must be greater or equal to -180 and lesser or equal to 180",
],
)
_assert_required(soup, [True] * 2)
def test_references() -> None:
"""Test rendering a schema with references"""
soup = _generate_case("references")
_assert_property_names(
soup,
[
"a_gift",
"anchor_with_slash",
"propertyA",
"anchor_no_slash",
"anchor_nested_reference",
"same_file_anchor_with_slash",
"same_file_anchor_no_slash",
"same_file_nested_reference",
"other_file_anchor",
"with_wrap",
"other_file_dot_anchor",
"other_file_dot_dot_anchor",
"other_file_only",
"not_a_string",
"multi_hierarchy_reference",
"propertyA",
],
)
_assert_descriptions(
soup,
[
"Testing $ref",
"A gift, or is it?",
"Description for object_def/items/propertyA",
"Description for array_def",
"Description for string_def",
"The delivery is a gift, no prices displayed",
"The delivery is a gift, no prices displayed",
"The delivery is a gift, no prices displayed",
"Test schema with a not",
"Contents of propertyA in final.json",
],
)
def test_top_level_array() -> None:
"""Test rendering a schema with an array instead of an object at the top level"""
soup = _generate_case("top_level_array")
_assert_title(soup, "Array at top level")
_assert_descriptions(soup, ["Sometimes there are no properties", "A string"])
def test_top_level_combining() -> None:
"""Test rendering a schema with a combining property at the top level"""
soup = _generate_case("top_level_combining")
_assert_title(soup, "Combining at top level")
_assert_descriptions(soup, ["For the combine"])
_assert_types(soup, ["object"] * 4)
def test_array() -> None:
"""Test rendering a schema with arrays of elements having their own schema"""
soup = _generate_case("array")
_assert_property_names(soup, ["fruits", "vegetables", "veggieName", "veggieLike"])
_assert_descriptions(
soup,
[
"A representation of a person, company, organization, or place",
"The name of the vegetable.",
"Do I like this vegetable?",
],
)
_assert_types(soup, ["object", "array of string", "string", "array", "object", "string", "boolean"])
_assert_required(soup, [False, False, True, True])
def test_array_advanced():
"""Test rendering a schema that uses minItems, maxItems, and uniqueItems for arrays"""
soup = _generate_case("array_advanced")
_assert_descriptions(soup, ["A little food fun", "5 to 8 fruits that you like"])
_assert_property_names(soup, ["fruits", "vegetables"])
_assert_const(soup, ["eggplant"])
_assert_required(soup, [False] * 2)
def test_with_definitions():
"""Test rendering a schema that uses the $ref keyword to refer to a definition attribute elsewhere in the schema"""
soup = _generate_case("with_definitions")
_assert_property_names(
soup, ["billing_address", "street_address", "city", "state", "shipping_address"],
)
_assert_types(soup, ["object", "object", "string", "string", "string"])
_assert_required(soup, [False, True, True, True, False])
def test_with_multiple_descriptions():
"""Test rendering a schema that uses multiple descriptions including with the $ref keyword"""
soup = _generate_case("with_descriptions")
_assert_descriptions(
soup,
[
"Exact address",
"Exact address",
"Delivery info depending on the delivery type",
"The delivery is a gift, no prices displayed",
],
)
def test_combining_one_of():
"""Test rendering of oneOf schema attribute in tabs"""
soup = _generate_case("combining_oneOf")
_assert_one_of_options(soup, 4)
_assert_types(soup, ["object"] * 5)
_assert_required(soup, [True])
def test_combining_not():
"""Test rendering of the not schema attribute"""
soup = _generate_case("combining_not")
definitions = soup.find_all(class_="property-definition-div")
assert len(definitions) == 1
assert definitions[0].text.lstrip().startswith("Must not be:")
def test_with_default() -> None:
"""Test rendering of default values"""
soup = _generate_case("with_default")
_assert_default_values(soup, ['"Linux"', '["white", "blue"]', "2"])
def test_deprecated_in_description() -> None:
"""Test finding whether a property is deprecated from its description"""
soup = _generate_case("deprecated", find_deprecated=True)
_assert_property_names(soup, ["deprecated1", "deprecated2", "not_deprecated"])
_assert_deprecated(soup, [True, True, False])
def test_deprecated_not_in_description() -> None:
"""Test that the deprecated badge does not get added if the option to get deprecated from description is disabled"""
soup = _generate_case("deprecated", find_deprecated=False)
_assert_deprecated(soup, [False] * 3)
def test_with_special_chars() -> None:
soup = _generate_case("with_special_chars", find_deprecated=False)
_assert_property_names(soup, ["prénom", "nomDeFamille", "âge", "0 de quoi d'autre"])
buttons = soup.find_all("button", attrs={"aria-controls": True})
expected_targets = ["#pr_nom", "#nomDeFamille", "#a_ge", "#a0_de_quoi_d_autre"]
for i, expected_target in enumerate(expected_targets):
assert buttons[i].attrs["data-target"] == expected_target
def test_description_with_ref() -> None:
"""Test that having a description next to a $ref in an object uses that description and not the one from the
referenced object
"""
soup = _generate_case("description_with_ref")
_assert_descriptions(soup, ["We should see this", "inner description", "We should see this too"])
def test_description_from_ref() -> None:
"""Test that having a description next to a $ref in an object uses that description and not the one from the
referenced object
"""
soup = _generate_case("description_from_ref")
_assert_descriptions(soup, ["a filled string"] * 2)
def test_description_with_ref_link_to_reused_ref() -> None:
"""Same as "test_description_with_ref", but do not allow reusing references."""
soup = _generate_case("description_with_ref", link_to_reused_ref=False)
_assert_descriptions(soup, ["We should see this", "inner description", "We should see this too"])
def test_with_examples() -> None:
soup = _generate_case("with_examples")
examples_label = soup.find_all("div", class_=["badge", "badge-secondary"])
examples_label_text = [ex.text for ex in examples_label]
assert examples_label_text == ["Examples:", "Example:", "Example:", "Example:"]
examples_content = soup.find_all("div", class_="examples")
examples_content_text = [ex.findChildren()[0].text for ex in examples_content]
assert examples_content_text == [
'"Guido"',
'"BDFL"',
'"<NAME>"',
"64",
"""{
"birthplace": "Haarlem, Netherlands",
"favorite_emoji": "🐍",
"motto": "Beautiful is better than ugly.\\\\nExplicit is better than implicit.\\\\nSimple is better than complex.\\\\nComplex is better than complicated.\\\\nFlat is better than nested.\\\\nSparse is better than dense.\\\\nReadability counts.\\\\nSpecial cases aren't special enough to break the rules.\\\\nAlthough practicality beats purity.\\\\nErrors should never pass silently.\\\\nUnless explicitly silenced.\\\\nIn the face of ambiguity, refuse the temptation to guess.\\\\nThere should be one-- and preferably only one --obvious way to do it.\\\\nAlthough that way may not be obvious at first unless you're Dutch.\\\\nNow is better than never.\\\\nAlthough never is often better than *right* now.\\\\nIf the implementation is hard to explain, it's a bad idea.\\\\nIf the implementation is easy to explain, it may be a good idea.\\\\nNamespaces are one honking great idea -- let's do more of those!"
}""",
]
@pytest.mark.parametrize("link_to_reused_ref", [True, False])
def test_recursive(link_to_reused_ref: bool) -> None:
"""Test a schema having a recursive definition"""
soup = _generate_case("recursive", link_to_reused_ref=link_to_reused_ref)
_assert_descriptions(soup, ["A human being", "The children they had", "A human being"])
recursive_definition_link = soup.find("a", href="#person")
assert recursive_definition_link
assert recursive_definition_link.text == "Same definition as person"
@pytest.mark.parametrize("link_to_reused_ref", [True, False])
def test_recursive_array(link_to_reused_ref: bool) -> None:
"""Test a schema having a recursive definition pointing to array items"""
soup = _generate_case("recursive_array", link_to_reused_ref=link_to_reused_ref)
_assert_descriptions(soup, ["A list of people", "A human being", "The children they had", "A human being"])
recursive_definition_link = soup.find("a", href="#person_items")
assert recursive_definition_link
assert recursive_definition_link.text == "Same definition as person_items"
def test_pattern_properties() -> None:
soup = _generate_case("pattern_properties")
pattern_label = soup.find_all("span", class_=["badge-info"])
pattern_label_text = [ex.text for ex in pattern_label]
assert pattern_label_text == ["Pattern Property"]
pattern_content = soup.find_all("span", class_="pattern-value")
pattern_content_text = [ex.findChildren()[0].text for ex in pattern_content]
assert pattern_content_text == ["$[a-c][0-9]^"]
_assert_property_names(soup, ["firstName", "lastName", "paperSize", "rating", "review"])
_assert_descriptions(
soup,
[
"The person's first name.",
"The person's last name.",
"Review of a paper size.",
"Numerical rating for paper size.",
"Narrative review of the paper size.",
],
)
def test_pattern_properties_html_id() -> None:
"""Test the HTML IDs generated for patterns under patternProperties"""
soup = _generate_case("pattern_properties_html_id")
pattern_label = soup.find_all("span", class_=["badge-info"])
pattern_label_text = [ex.text for ex in pattern_label]
assert pattern_label_text == ["Pattern Property"] * 4
pattern_content = soup.find_all("span", class_="pattern-value")
pattern_content_text = [ex.findChildren()[0].text for ex in pattern_content]
assert pattern_content_text == [".$", ".*", "..", "^."]
_assert_property_names(soup, ["not_a_pattern", "Title 4", "Title 1", "Title 2", "Title 3"])
_assert_descriptions(
soup, ["Description 4", "Description 1", "Description 2", "Description 3"],
)
property_divs = soup.find_all("div", class_="property-definition-div")
property_divs_id = [div.attrs["id"] for div in property_divs]
assert property_divs_id == ["not_a_pattern", "not_a_pattern_pattern1", "pattern1", "pattern2", "pattern3"]
def test_conditional_subschema() -> None:
soup = _generate_case("conditional_subschema")
_assert_types(
soup, ["object", "object", "const", "object", "object", "object", "object", "string", "enum (of string)"]
)
def test_html_in_patterns() -> None:
soup = _generate_case("html_in_patterns")
code_blocks = soup.find_all("code")
assert list(block.text for block in code_blocks) == [
"^(<<variable:([-+/*0-9A-Za-z_]+)>>|<<auto>>)$",
"$[a-c][0-9]^<a>",
]
_assert_property_names(soup, ["$[a-c][0-9]^<a>"])
def test_yaml() -> None:
"""Test loading the schema from a YAML file. The schema is the same as the case "with_definitions"."""
with tempfile.NamedTemporaryFile(mode="w+") as temp_file:
with open(os.path.abspath(os.path.join(os.path.dirname(__file__), "cases", f"yaml.yaml"))) as schema_fp:
generate_from_file_object(schema_fp, temp_file, True, False, False, True)
temp_file.seek(0)
soup = BeautifulSoup(temp_file.read(), "html.parser")
# Order of properties is only preserved in Python 3.7+
_assert_property_names(
soup, ["billing_address", "street_address", "city", "state", "shipping_address"],
)
_assert_types(soup, ["object", "object", "string", "string", "string"])
_assert_required(soup, [False, True, True, True, False])
| StarcoderdataPython |
1700125 | <reponame>maxiwoj/PostTruthDetector<filename>tests/test_sample.py<gh_stars>1-10
import unittest
from post_truth_detector import RestApiException, site_unreliability, \
fact_unreliability, count, google_search, sentiment_analysis
from post_truth_detector.learn.relativeness_learn import map_state
class TestRemotes(unittest.TestCase):
def test_site_unreliability(self):
with self.assertRaises(RestApiException):
site_unreliability("wwww.sdfdfg.pl")
with self.assertRaises(RestApiException):
site_unreliability("http:/www.wp.pl")
assert round(site_unreliability("www.aszdziennik.pl")) == 1, \
"Aszdziennik site unreliability"
def test_fact_checker(self):
self.assertEqual(round(fact_unreliability("Pope has a new baby")), 1,
"Bad fact check")
self.assertEqual(round(fact_unreliability("Trump elected as the new "
"president")), 0,
"Real fact check")
def test_google_connection(self):
my_api_key = "<KEY>"
my_cse_id = "005984777386616324044:zvaeoj2ttvu"
self.assert_(google_search("Something to search for", my_api_key,
my_cse_id, num=3), "test result not empty")
class TestCount(unittest.TestCase):
def test_count(self):
self.assertEqual(count(["ala ma kota", "ala jest bardzo fajna, "
"ale nie ma kota"]), 1,
"Check number of words from title in article")
def test_mapState(self):
self.assertEqual(map_state("unrelated"), 0, "test unreleted map to 0")
self.assertEqual(map_state("related"), 1, "test related map to 1")
class TestSentiments(unittest.TestCase):
def test_sentimentals(self):
self.assertGreater(sentiment_analysis("I love peanuts").subjectivity, 0)
self.assertEqual(sentiment_analysis("Ala has a cat").subjectivity, 0)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
8153533 | """ Approach using NLTK and a predefined grammar based on ReVerb System (Fader et al. 2011)
* run POS tagger and entity chunker over each sentence
* for every verb chunk, find the nearest noun chunk to the left and the right of the verb
"""
import de_core_news_sm
import nltk
from nltk.tokenize import sent_tokenize
from network_core.ogm.node_objects import Me, Contact
class RelationshipExtractor:
RELATIONSHIPS = ['vater', 'mutter', 'sohn', 'tochter', 'bruder', 'schwester', 'enkel', 'enkelin', 'nichte',
'neffe', 'onkel', 'tante']
ME = ['ich', 'meine', 'mein']
def __init__(self):
self.nlp = de_core_news_sm.load()
# grammar for spaCy POS Tags
# extracts noun phrases (NP) and relationships (REL)
self.grammar = r"""NP: {<DET>?<ADJ>*<NOUN>?<PROPN|PRON>*}
V: {<VERB>}
W: {<NOUN|ADJ|ADV|PRON|DET>}
P: {<ADP|PART|PUNCT>}
C: {<CONJ>}
REL: {<V><W>*<P>|<V><P>|<V>|<C>}
"""
def pos_tagging(self, utterance):
pos_tagged_sentences = []
sentences = sent_tokenize(utterance)
for sentence in sentences:
doc = self.nlp(sentence)
pos_tagged_sentence = []
for token in doc:
pos_tuple = (token.text, token.pos_)
pos_tagged_sentence.append(pos_tuple)
pos_tagged_sentences.append(pos_tagged_sentence)
return pos_tagged_sentences
def extract_chunk_trees(self, utterance):
sentence_trees = []
cp = nltk.RegexpParser(self.grammar)
pos_tagged_sentences = self.pos_tagging(utterance)
for sentence in pos_tagged_sentences:
sentence_trees.append(cp.parse(sentence))
return sentence_trees
def find_nearest_noun_chunk(self, rel_tree_position, sent_tree):
"""
finds the nearest noun chunk left or right from the relationship tree
:param rel_tree_position: position of the relationship tree in the sentence
:param sent_tree: nltk tree of the current sentence
:return:
"""
i = rel_tree_position
left_np = None
right_np = None
# find the nearest NP to the left of REL
for j in range(i - 1, -1, -1):
if type(sent_tree[j]) is nltk.tree.Tree and sent_tree[j].label() == 'NP':
left_np = sent_tree[j]
break
# find the nearest NP to the right of REL
for j in range(i + 1, len(sent_tree), 1):
if type(sent_tree[j]) is nltk.tree.Tree and sent_tree[j].label() == 'NP':
right_np= sent_tree[j]
break
return left_np, right_np
def find_relations_tree_in_utterance(self, utterance):
sentence_trees = self.extract_chunk_trees(utterance)
relations = []
for sent_tree in sentence_trees:
for i, sub_tree in enumerate(sent_tree):
if type(sub_tree) is nltk.tree.Tree and sub_tree.label() == 'REL':
rel = sub_tree
# find the nearest NP to the left of REL
left_np, right_np = self.find_nearest_noun_chunk(i, sent_tree)
relations.append([left_np, rel, right_np])
return relations
def extract_relation_tuples(self, utterance):
relations = self.find_relations_tree_in_utterance(utterance)
relation_tuples = []
for i, relation in enumerate(relations):
ne1_tree = relation[0]
ne2_tree = relation[2]
rel_tree = relation[1]
# search for PROPN - if not found search for NOUN
ne1 = [w for w, t in ne1_tree.leaves() if t == 'PROPN']
if not ne1:
ne1 = [w for w, t in ne1_tree.leaves() if t == 'NOUN']
# search for PROPN - if not found search for NOUN
ne2 = [w for w, t in ne2_tree.leaves() if t == 'PROPN']
if not ne2:
ne2 = [w for w, t in ne2_tree.leaves() if t == 'NOUN']
# search for VERB - if not found search for CONJ
rel = [w for w, t in rel_tree.leaves() if t == 'VERB']
if not rel:
rel = [w for w, t in rel_tree.leaves() if t == 'CONJ']
if ne1 and ne2 and rel:
relation_tuples.append((ne1, rel, ne2))
return relation_tuples
def print_relationships(self, utterance):
relation_tuples = self.extract_relation_tuples(utterance)
# convert relation tuples to objects
for relation in relation_tuples:
print(relation)
| StarcoderdataPython |
4981100 | # Description
# 中文
# English
# Give a string s, count the number of non-empty (contiguous) substrings that have the same number of 0's and 1's, and all the 0's and all the 1's in these substrings are grouped consecutively.
# Substrings that occur multiple times are counted the number of times they occur.
# s.length will be between 1 and 50,000.
# s will only consist of "0" or "1" characters.
# Have you met this question in a real interview?
# Example
# Example 1:
# Input: "00110011"
# Output: 6
# Explanation: There are 6 substrings that have equal number of consecutive 1's and 0's: "0011", "01", "1100", "10", "0011", and "01".
# Notice that some of these substrings repeat and are counted the number of times they occur.
# Also, "00110011" is not a valid substring because all the 0's (and 1's) are not grouped together.
# Example 2:
# Input: "10101"
# Output: 4
# Explanation: There are 4 substrings: "10", "01", "10", "01" that have equal number of consecutive 1's and 0's.
class Solution:
"""
@param s: a string
@return: the number of substrings
"""
def countBinarySubstrings(self, s):
# Write your code here
n = len(s)
res = 0
# ç®åè¿ç»æ°æ®µçå¼å§è®¡æ°ä½ç½®
start = 0
# åºç°æ°æ®ååæ¶ï¼ç»è®¡çä¹åè¿ç»æ°æ®µé¿åº¦
lastcount = 1
for i in range(1, n):
if s[i] != s[i -1]:
res += 1
lastcount = i - start
start = i
else:
if i - start < lastcount:
res += 1
return res | StarcoderdataPython |
12845926 | survey_data = survey_data_unique.dropna(subset=['species']).copy() | StarcoderdataPython |
18714 | <filename>networkx/algorithms/tests/test_cuts.py
"""Unit tests for the :mod:`networkx.algorithms.cuts` module."""
import networkx as nx
class TestCutSize:
"""Unit tests for the :func:`~networkx.cut_size` function."""
def test_symmetric(self):
"""Tests that the cut size is symmetric."""
G = nx.barbell_graph(3, 0)
S = {0, 1, 4}
T = {2, 3, 5}
assert nx.cut_size(G, S, T) == 4
assert nx.cut_size(G, T, S) == 4
def test_single_edge(self):
"""Tests for a cut of a single edge."""
G = nx.barbell_graph(3, 0)
S = {0, 1, 2}
T = {3, 4, 5}
assert nx.cut_size(G, S, T) == 1
assert nx.cut_size(G, T, S) == 1
def test_directed(self):
"""Tests that each directed edge is counted once in the cut."""
G = nx.barbell_graph(3, 0).to_directed()
S = {0, 1, 2}
T = {3, 4, 5}
assert nx.cut_size(G, S, T) == 2
assert nx.cut_size(G, T, S) == 2
def test_directed_symmetric(self):
"""Tests that a cut in a directed graph is symmetric."""
G = nx.barbell_graph(3, 0).to_directed()
S = {0, 1, 4}
T = {2, 3, 5}
assert nx.cut_size(G, S, T) == 8
assert nx.cut_size(G, T, S) == 8
def test_multigraph(self):
"""Tests that parallel edges are each counted for a cut."""
G = nx.MultiGraph(["ab", "ab"])
assert nx.cut_size(G, {"a"}, {"b"}) == 2
class TestVolume:
"""Unit tests for the :func:`~networkx.volume` function."""
def test_graph(self):
G = nx.cycle_graph(4)
assert nx.volume(G, {0, 1}) == 4
def test_digraph(self):
G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 0)])
assert nx.volume(G, {0, 1}) == 2
def test_multigraph(self):
edges = list(nx.cycle_graph(4).edges())
G = nx.MultiGraph(edges * 2)
assert nx.volume(G, {0, 1}) == 8
def test_multidigraph(self):
edges = [(0, 1), (1, 2), (2, 3), (3, 0)]
G = nx.MultiDiGraph(edges * 2)
assert nx.volume(G, {0, 1}) == 4
class TestNormalizedCutSize:
"""Unit tests for the :func:`~networkx.normalized_cut_size`
function.
"""
def test_graph(self):
G = nx.path_graph(4)
S = {1, 2}
T = set(G) - S
size = nx.normalized_cut_size(G, S, T)
# The cut looks like this: o-{-o--o-}-o
expected = 2 * ((1 / 4) + (1 / 2))
assert expected == size
def test_directed(self):
G = nx.DiGraph([(0, 1), (1, 2), (2, 3)])
S = {1, 2}
T = set(G) - S
size = nx.normalized_cut_size(G, S, T)
# The cut looks like this: o-{->o-->o-}->o
expected = 2 * ((1 / 2) + (1 / 1))
assert expected == size
class TestConductance:
"""Unit tests for the :func:`~networkx.conductance` function."""
def test_graph(self):
G = nx.barbell_graph(5, 0)
# Consider the singleton sets containing the "bridge" nodes.
# There is only one cut edge, and each set has volume five.
S = {4}
T = {5}
conductance = nx.conductance(G, S, T)
expected = 1 / 5
assert expected == conductance
class TestEdgeExpansion:
"""Unit tests for the :func:`~networkx.edge_expansion` function."""
def test_graph(self):
G = nx.barbell_graph(5, 0)
S = set(range(5))
T = set(G) - S
expansion = nx.edge_expansion(G, S, T)
expected = 1 / 5
assert expected == expansion
class TestNodeExpansion:
"""Unit tests for the :func:`~networkx.node_expansion` function."""
def test_graph(self):
G = nx.path_graph(8)
S = {3, 4, 5}
expansion = nx.node_expansion(G, S)
# The neighborhood of S has cardinality five, and S has
# cardinality three.
expected = 5 / 3
assert expected == expansion
class TestBoundaryExpansion:
"""Unit tests for the :func:`~networkx.boundary_expansion` function."""
def test_graph(self):
G = nx.complete_graph(10)
S = set(range(4))
expansion = nx.boundary_expansion(G, S)
# The node boundary of S has cardinality six, and S has
# cardinality three.
expected = 6 / 4
assert expected == expansion
class TestMixingExpansion:
"""Unit tests for the :func:`~networkx.mixing_expansion` function."""
def test_graph(self):
G = nx.barbell_graph(5, 0)
S = set(range(5))
T = set(G) - S
expansion = nx.mixing_expansion(G, S, T)
# There is one cut edge, and the total number of edges in the
# graph is twice the total number of edges in a clique of size
# five, plus one more for the bridge.
expected = 1 / (2 * (5 * 4 + 1))
assert expected == expansion
| StarcoderdataPython |
190576 | <gh_stars>0
X = 'spam'
Y = 'eggs'
# will swap
X, Y = Y, X
print((X, Y)) | StarcoderdataPython |
1739091 | # -*- coding: UTF-8 -*-
"""
Author:wistn
since:2020-10-05
LastEditors:Do not edit
LastEditTime:2021-03-04
Description:
"""
from .org_noear_siteder_models_PicModel import PicModel
from .org_noear_siteder_viewModels_ViewModelBase import ViewModelBase
from .org_noear_siteder_dao_engine_DdSource import DdSource
from .mytool import TextUtils
from .noear_snacks_ONode import ONode
class ProductSdViewModel(ViewModelBase):
def __init__(self, url):
super().__init__()
self.pictures = []
self.logo = None
self.name = None
self.shop = None
self.intro = None
self.buyUrl = None
self.bookUrl = url
# @Override
def loadByConfig(self, config):
pass
# @Override
def loadByJson(self, config, *jsons):
# java版: (String... jsons) 表示可变长度参数列表,参数为0到多个String类型的对象,或者是一个String[]。
if jsons == None or jsons.__len__() == 0:
return
# py版: (*jsons) 表示可变参数组成的元组,要type(jsons[0])==list识别java版的多个String或者一个String[]
if jsons.__len__() == 1 and type(jsons[0]) == list:
jsons = jsons[0]
for json in jsons:
self.loadByJsonData(config, json)
def loadByJsonData(self, config, json):
data = ONode.tryLoad(json)
if DdSource.isBook(config):
if TextUtils.isEmpty(self.shop):
self.logo = data.get("logo").getString()
self.name = data.get("name").getString()
self.shop = data.get("shop").getString()
self.intro = data.get("intro").getString()
self.buyUrl = data.get("buyUrl").getString()
sl = data.get("pictures").asArray()
for n in sl:
pic = PicModel(self.bookUrl, n.getString())
self.pictures.append(pic)
# --------------
def clear(self):
self.pictures = []
def total(self):
return self.pictures.__len__()
def get(self, index):
return self.pictures[index]
| StarcoderdataPython |
1753181 | <reponame>insilichem/gpathfinder
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##############
# GPathFinder: Identification of ligand pathways by a multi-objective
# genetic algorithm
#
# https://github.com/insilichem/gpathfinder
#
# Copyright 2019 <NAME>, <NAME>,
# <NAME>, <NAME>,
# <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############
"""
GPathFinder allows to generate approximate ligand binding/unbinding pathways
to/from the binding site of a receptor (typically a protein).
This module must be used in conjunction with at least the following
three genes:
- A GPathFinder standard ``molecule`` gene for the ligand molecule.
- A GPathFinder standard ``molecule`` gene for the protein molecule.
- A GPathFinder ``path_torsion`` gene applied on the ligand to allow its
flexibility.
Optionally, the following genes can be used as well:
- A GPathFinder ``path_rotamers`` gene on the protein to allow conformational
changes in its side-chain.
- A GPathFinder``path_normal modes`` gene on the protein to allow the
exploration of its global folding through normal modes analysis.
To assess the quality of the pathways generated by this pool of genes,
the following objective must be used in the evaluation stage:
- At least one ``path scoring`` objective to evaluate how good are the
pathways generated by the genetic algorithm in terms of quality of the
sampling (avoid steric clashes, good vina score, good smoothness).
"""
# Python
from __future__ import print_function
import random
import logging
import numpy as np
from math import sqrt
import copy
from zipfile import ZipFile, ZIP_STORED
import os
import pprint
# Chimera
import chimera
import Matrix as M
from Measure import inertia
from Molecule import atom_positions
from chimera import Xform as X
from FitMap.search import random_rotation
# GPATH
from gpath.genes import GeneProvider
from gpath import parse
from gpath.exceptions import LigandDirectoryAndTorsion, ProteinDirectoryAndNM
IDENTITY = ((1.0, 0.0, 0.0, 0.0),
(0.0, 1.0, 0.0, 0.0),
(0.0, 0.0, 1.0, 0.0))
ZERO = chimera.Point(0.0, 0.0, 0.0)
pp = pprint.PrettyPrinter(4)
logger = logging.getLogger(__name__)
def enable(**kwargs):
kwargs = Pathway.validate(kwargs)
return Pathway(**kwargs)
class Pathway(GeneProvider):
"""
Pathway class
Parameters
----------
ligand : str
Name of the GPathFinder ``molecule`` gene containing the ligand
molecule.
protein : str
Name of the GPathFinder ``molecule`` gene containing the receptor
molecule.
torsion_gene : str, optional
Name of the GPathFinder ``path_torsion`` gene to allow flexibility of
the ligand.
rotamers_gene : str, optional
Name of the GPathFinder ``path_rotamers`` gene to allow conformational
changes on the protein side-chain.
radius_rotamers: float, optional, defaults to 3.0
Maximum distance from any point of the ligand in every frame
that is searched for possible rotamers of the protein side-chain.
nm_gene : str, optional
Name of the GPathFinder ``path_normalmodes`` gene to allow the
exploration of the protein global folding through normal modes analysis.
origin : 3-item list or tuple of float, optional
Coordinates to a specific geometric center at which the origin
of the ligand will be set, if they are different from the actual
position of the ligand in the .mol2 file.
inertia_axes : max of 6-item list or tuple of int, optional
Starting point of the ligand when generating binding pathways
will be set at one of the indicated ends of the inertia axes of
the protein.
destination : 3-item list or tuple of float, optional
It indicates the coordinates to the geometric binding site or the
expected end point of the pathway.
If not set by the user, GPathFinder will assume an unbinding scenario,
calculating automatically the distance from the ligand to outside
the protein.
max_step_separation : float, optional
Maximum distance in Angstroms from one point of the ligand in
the pathway to the next one. If not set by the user, GPathFinder
calculates the value from the size of the ligand.
min_step_increment : float, optional
Minimum distance increment in Angstroms from the ligand's origin
that has to be the ligand in one frame of the pathway with
respect of the ligand's distance from the origin of the previous
frame of the pathway. If not set by the user, GPathFinder calculates
the value as 1/5 by max_step_separation.
frontier_margin : float, optional, defaults to 0.0
Safety distance from the frontier of the protein to consider that
the ligand is outside. Specially useful when using large ligands
that can be stucked at the frontier.
mut_pos_pb : float, optional, defaults to 0.10
When a mutation occurs, this value is the probability of such
mutation to be of the type `positions`, that is, the mutation
changes the actual trajectory of the pathway. Warning: parameter
for advanced users, usually the default value is correct for the
vast majority of the systems.
Attributes
----------
allele : dict
allele['positions'] = list of translation matrices of the ligand
along the frames of the pathway, as
explained in Notes.
allele['rotations'] = list of rotation materices of the ligand
along the frames of the pathway, as
explained in Notes.
allele['torsions'] = list of torsion alleles applied to the
ligand along the frames of the pathway (see
allele of GPathFinder ``path_torsion`` gene).
allele['torsion_anchor'] = [str, int] ligand gene name and
serial number of the anchor atom used
in the torsion gene.
allele['rotamers'] = list of data of the rotamers applied on the
protein along the frames of the pathway,
as explained in Notes.
allele['normal_modes'] = list of normal modes alleles applied to
the protein along the frames of the
pathway (see allele of GPathFinder
``path_normalmodes`` gene).
scores : list of dict
Scores reported for every frame of the pathway. Each key of the
dict correspond to one score (e.g. clashes).
Notes
-----
** Priority and configuration of the origin point of the ligand **
The origin point/s of the ligand can be provided by three different
manners:
1. The ends of the inertia axes of the protein provided with the
``inertia_axes``.
2. The coordinates set in ``origin`` parameter.
3. The actual ubication of the ligand in its .mol2 file.
The priority in case of conflict will be the order listed before.
That is, if ``ìnertia_axes`` parameter is provided, this will
override a possible ``origin`` parameter and the actual ubication in
the .mol2 file.
** Details of the allele structure **
- ``positions`` of the ligand along the pathway will be defined by
translation matrices with the following shape:
(
(1, 0, 0, Tx),
(0, 1, 0, Ty),
(0, 0, 1, Tz)
)
- ``rotations`` of the ligand along the pathway will be defined by
rotation matrices with the following shape:
(
(R1, R2, R3, 0),
(R4, R5, R6, 0),
(R7, R8, R9, 0)
)
- ``rotamers`` applied to the ligand at one frame of the pathway
will be defined as 3 lists, each one containing:
1. Residue numbers over which the rotamers are applied.
2. Actual rotamer allele (see allele of GPathFinder ``path_rotamers`` gene)
3. Chi angles corresponding to each rotamer.
"""
_validate = {
parse.Required('ligand'): parse.Molecule_name,
parse.Required('protein'): parse.Molecule_name,
'torsion_gene': parse.Molecule_name,
'rotamers_gene': parse.Molecule_name,
'radius_rotamers': parse.All(parse.Coerce(float), parse.Range(min=0)),
'nm_gene': parse.Molecule_name,
'origin': parse.Coordinates,
'inertia_axes': [parse.All(parse.Coerce(int),
parse.Range(min=0, max=5))],
'destination': parse.Coordinates,
'max_step_separation': parse.Coerce(float),
'min_step_increment': parse.Coerce(float),
'frontier_margin': parse.Coerce(float),
'mut_pos_pb': parse.Coerce(float),
}
def __init__(self, ligand, protein, torsion_gene=None, rotamers_gene=None,
radius_rotamers=3.0, nm_gene=None, origin=None,
inertia_axes=None, destination=None, max_step_separation=None,
min_step_increment=None, frontier_margin=0.0, mut_pos_pb=0.1, **kwargs):
GeneProvider.__init__(self, **kwargs)
self.ligand = ligand
self.protein = protein
self.torsion_gene = torsion_gene
self.rotamers_gene = rotamers_gene
self.radius_rotamers = radius_rotamers
self.nm_gene = nm_gene
self.destination = destination
self.origin = origin if origin else None
self.inertia_axes = inertia_axes if inertia_axes else None
self.max_separation = max_step_separation
self.min_increment = min_step_increment
self.frontier_margin = frontier_margin
self.mut_pos_pb = mut_pos_pb
self.act_rotamers = []
self.which_evaluations = []
def __ready__(self):
# Check if ligand conformers and torsion are used simultaneously
if len(self.ligand_g.catalog) > 1 and self.torsion_gene:
raise LigandDirectoryAndTorsion('If you set the Ligand molecule to be a directory, you can not use a path_torsion gene simultaneously')
# Check if protein conformers and NM are used simultaneously
if len(self.protein_g.catalog) > 1 and self.nm_gene:
raise ProteinDirectoryAndNM('If you set the Protein molecule to be a directory, you can not use a path_normalmodes gene simultaneously')
# Calculate inertia axes of the protein for further use
self.p_axes, self.p_d2, self.p_center = inertia.atoms_inertia(self.protein_mol.atoms)
self.p_elen = [a + self.frontier_margin for a in inertia.inertia_ellipsoid_size(self.p_d2)]
self.p_axes = np.array(self.p_axes)
if self.inertia_axes: #Prepare origin from inertia axes
origin_points = []
for axis, length in zip(self.p_axes, self.p_elen):
origin_points.append((axis*length)+self.p_center)
origin_points.append((-axis*length)+self.p_center)
points = []
for i, point in enumerate(origin_points):
if i in self.inertia_axes:
points.append(point)
self.origin = random.choice(points)
elif not self.origin:
#Origin is the geometric center of the ligand if not
#provided explicitly
self.origin = self.ligand_center
#Calculate min_increment and max_separation if not set by user
if not self.max_separation:
axes, d2, center = inertia.atoms_inertia(self.ligand_mol.atoms)
elen = [a for a in inertia.inertia_ellipsoid_size(d2)]
self.max_separation = min(elen) if min(elen) > 1.0 else 1.0
if self.min_increment is None:
self.min_increment = 1.0 * self.max_separation / 5.0
#Operations permitted to improve the path in mutate
self._op_roulette = ['rotation']
if self.torsion_gene:
self._op_roulette.append('torsion')
if self.rotamers_gene:
self._op_roulette.append('rotamers')
if self.nm_gene:
self._op_roulette.append('nm')
if len(self.protein_g.catalog) > 1:
self._op_roulette.append('protein') #Conformers for the protein
if len(self.ligand_g.catalog) > 1:
self._op_roulette.append('ligand') #Conformers for the ligand
#Making the initial allele
self.allele = {}
self.allele['positions'] = [((1.0, 0.0, 0.0, self.origin[0]),
(0.0, 1.0, 0.0, self.origin[1]),
(0.0, 0.0, 1.0, self.origin[2]))]
self.allele['rotations'] = [IDENTITY]
self.allele['min_increment'] = self.min_increment
self.allele['max_separation'] = self.max_separation
if self.torsion_gene:
self.allele['torsions'] = [[]]
self.allele['torsion_anchor'] = list(self.torsion_g._anchor)
if self.rotamers_gene:
self.allele['rotamers'] = [ [[],[],[]] ]
if self.nm_gene:
self.allele['normal_modes'] = [None]
self.allele['protein'] = [0] #Conformer for the protein
self.allele['ligand'] = [0] #Conformer for the ligand
self.allele['coord_residues'] = [[]] #To store possible coord residues when evaluating metal_sites
self.allele['mate_torsions'] = 0 #To register if mate is being useful
self.allele['mate_positions'] = 0 #To register if mate is being useful
self.allele['mate_rotations'] = 0 #To register if mate is being useful
self.allele['mutate_positions'] = 0 #To register if mutate is being useful
self.allele['mutate_torsions'] = 0 #To register if mutate is being useful
self.allele['mutate_rotations'] = 0 #To register if mutate is being useful
self.allele['mutate_rotamers'] = 0 #To register if mutate is being useful
self.allele['mutate_nm'] = 0 #To register if mutate is being useful
self.allele['mutate_protein'] = 0 #To register if mutate is being useful
self.allele['mutate_ligand'] = 0 #To register if mutate is being useful
self.scores = [{}] #To store scores at the evaluation stage
#Initial pathway
last_point = [x[3] for x in self.allele['positions'][-1]]
if self.destination:
not_arrived = distance(self.destination, last_point) > (self.max_separation)
else:
y = np.array([a-b for a,b in zip(last_point, self.p_center)])
y = np.absolute(self.p_axes.dot(y))
not_arrived = is_inside_ellipsoid(self.p_elen, y)
while not_arrived:
new_point = random_extension_point(last_point, self.max_separation,
self.origin, min_increment=self.min_increment,
destination=self.destination)
if new_point is not None:
self.allele['positions'].append(new_point)
self.allele['rotations'].append(random_rotation())
if self.torsion_gene:
self.torsion_g.gp_mutate(1.0)
self.allele['torsions'].append(copy.deepcopy(self.torsion_g.allele))
if self.nm_gene:
self.allele['normal_modes'].append(random.randint(0,
self.parent.genes[self.nm_gene].n_samples))
self.allele['protein'].append(random.randint(0, len(self.protein_g.catalog)-1))
self.allele['ligand'].append(random.randint(0, len(self.ligand_g.catalog)-1))
if self.rotamers_gene:
self.allele['rotamers'].append([[],[],[]])
self.allele['coord_residues'].append([])
self.scores.append({})
last_point = [x[3] for x in self.allele['positions'][-1]]
if self.destination:
not_arrived = distance(self.destination, last_point) > (self.max_separation)
else:
y = np.array([a-b for a,b in zip(last_point, self.p_center)])
y = np.absolute(self.p_axes.dot(y))
not_arrived = is_inside_ellipsoid(self.p_elen, y)
def express(self):
"""
Expression is controlled in gp_express to avoid unnecessary
calls and supervise what frames are expressed.
"""
pass
def unexpress(self):
"""
Unexpression is controlled in gp_unexpress to avoid unnecessary
calls and supervise what frames are expressed.
"""
pass
def gp_express(self, i, with_rotamers=True, smoothness=False):
"""
For the demanded frame ``i``:
1. Apply translation, rotation and torsions to the ligand.
2. Apply, if present, rotamers and normal modes to the
protein.
Optional parameter ``ẁith_rotamers`` controls if rotamers have
to be expressed or not (useful when a rotamer actualization is
pending).
Optional parameter ``smoothness`` is used to express only the
ligand transformations in order to calculate RMSD between frames.
If True, only torsions and rotations of the ligand are applied.
"""
ligand_sample = self.allele['ligand'][i]
self.ligand_g.allele = self.ligand_g.catalog[ligand_sample]
self.ligand_g._need_express = True
self.ligand_g.express()
protein_sample = self.allele['protein'][i]
self.protein_g.allele = self.protein_g.catalog[protein_sample]
self.protein_g._need_express = True
self.protein_g.express()
or_x, or_y, or_z = self.ligand_center
to_zero = ((1.0, 0.0, 0.0, -or_x),
(0.0, 1.0, 0.0, -or_y),
(0.0, 0.0, 1.0, -or_z))
rotations = self.allele['rotations']
if smoothness:
matrices = (IDENTITY,) + (rotations[i],) + (to_zero,)
else:
positions = self.allele['positions']
matrices = (positions[i],) + (rotations[i],) + (to_zero,)
matrices = M.multiply_matrices(*matrices)
self.ligand_mol.openState.xform = M.chimera_xform(matrices)
if self.torsion_gene:
self.torsion_g.allele = copy.deepcopy(self.allele['torsions'][i])
self.torsion_g._need_express = True
self.torsion_g.express()
if self.nm_gene and not smoothness:
sample_number = self.allele['normal_modes'][i]
if sample_number:
self.nm_g.allele = self.nm_g.NORMAL_MODES_SAMPLES[sample_number]
self.nm_g._need_express = True
self.nm_g.express()
if self.rotamers_gene and with_rotamers and not smoothness:
if i in self.act_rotamers:
#Actualize rotamers
self.actualize_rotamers(i)
if self.allele['rotamers'][i][1]:
residues = []
for rot in self.allele['rotamers'][i][0]:
a, b = rot.split('/')
residues.append([a, int(b)])
self.rotamers_g._residues = residues
self.rotamers_g.__ready__()
self.rotamers_g.allele = self.allele['rotamers'][i][1]
for (molname, pos), residue in self.rotamers_g.residues.items():
self.rotamers_g.allele[residues.index([molname, pos])] = self.allele['rotamers'][i][1][residues.index([molname, pos])]
self.rotamers_g._need_express = True
self.rotamers_g.express()
def gp_unexpress(self, i, smoothness=False):
"""
For the demanded frame ``i``:
1. Undo translation, rotation and torsions made to the
ligand.
2. Undo, if present, rotamers and normal modes changes made
to the protein.
Optional parameter ``smoothness`` is used to unexpress only the
ligand trasnformations when used to calculate RMSD of the ligand
between frames. If True, only torsions and rotations of the
ligand are unapplied.
"""
self.ligand_mol.openState.xform = X()
if self.torsion_gene:
self.torsion_g.unexpress()
self.torsion_g._need_express = False
if self.rotamers_gene and not smoothness:
if self.allele['rotamers'][i][0]:
self.rotamers_g.unexpress()
self.rotamers_g._need_express = False
if self.nm_gene and not smoothness:
sample_number = self.allele['normal_modes'][i]
if sample_number:
self.nm_g.unexpress()
self.nm_g._need_express = False
self.protein_g.unexpress()
self.protein_g._need_express = False
self.ligand_g.unexpress()
self.ligand_g._need_express = False
def mate(self, mate):
"""
Randomnly choose an operation between [translations, rotations,
torsions] of the ligand and make a mate of the allele of the two
individuals.
"""
#frame for self is the selected by score probability
k = random.choice(self.scores[0].keys())
scores_list = [sc[k] for sc in self.scores]
m = min(scores_list)
if m < 0:
scores_list = [sc - m for sc in scores_list]
scores_list = [sc + 1.0 for sc in scores_list] #to avoid divisions by 0 when calculating probs
probs = [float(sc)/sum(scores_list[1:]) for sc in scores_list[1:]]
i = np.random.choice(range(1, len(self.allele['positions'])), p=probs)
#frame for mate is the nearest to the selected for self
self_pos = [x[3] for x in self.allele['positions'][i]]
dists = []
for frame in range(1, len(mate.allele['positions'])):
mate_pos = [x[3] for x in mate.allele['positions'][frame]]
dists.append(distance(self_pos, mate_pos))
j = dists.index(min(dists)) + 1 #dists doesn't contain distance for the frame 0
if self.torsion_gene:
op = random.choice([self.mate_torsions, self.mate_rotations])
else:
op = self.mate_rotations
op(mate, i, j)
def mate_rotations(self, mate, i, j):
"""
For the frame ``i`` of self individual and ``j`` of the mate
individual, make a interpolation of the rotations of the two
ligands and assign the resulting matrix to both.
"""
self.allele['mate_rotations'] = self.allele['mate_rotations'] + 1 #For register if mate is being useful
mate.allele['mate_rotations'] = mate.allele['mate_rotations'] + 1 #For register if mate is being useful
interp = interpolate_rotations(self.allele['rotations'][i],
mate.allele['rotations'][j])
self.allele['rotations'][i] = copy.deepcopy(interp)
mate.allele['rotations'][j] = copy.deepcopy(interp)
#Delete previous calculated scores
self.scores[i] = {}
mate.scores[j] = {}
#Force rotamers actualization
self.act_rotamers.append(i)
mate.act_rotamers.append(j)
def mate_torsions(self, mate, i, j):
"""
For the frame ``i`` of self individual and ``j`` of the mate
individual, make a torsion gp_mate (see ``path_torsion`` gene).
"""
self.allele['mate_torsions'] = self.allele['mate_torsions'] + 1 #For register if mate is being useful
mate.allele['mate_torsions'] = mate.allele['mate_torsions'] + 1 #For register if mate is being useful
self.torsion_g.allele = copy.deepcopy(self.allele['torsions'][i])
mate.torsion_g.allele = copy.deepcopy(mate.allele['torsions'][j])
self.torsion_g.gp_mate(mate.torsion_g)
self.allele['torsions'][i] = copy.deepcopy(self.torsion_g.allele)
mate.allele['torsions'][j] = copy.deepcopy(mate.torsion_g.allele)
#Delete previous calculated scores
self.scores[i] = {}
mate.scores[j] = {}
#Force rotamers actualization
self.act_rotamers.append(i)
mate.act_rotamers.append(j)
def mutate(self, indpb):
"""
There are two possible mutations:
1. Cut the pathway and remake all the frames from the
selected cutting frame (with probability mut_pos_pb).
2. Change the [rotation, torsions, rotamers, normal modes]
of one frame.
The frame affected by the mutation is selected randomnly, with
the probabilities of each frame weighted by their scores (more
probability to be mutated if the frame has a worse score).
The operation between the available pool is chosen randomly .
"""
if random.random() < self.indpb:
k = random.choice(self.scores[0].keys())
scores_list = [sc[k] for sc in self.scores]
m = min(scores_list)
if m < 0:
scores_list = [sc - m for sc in scores_list]
scores_list = [sc + 1.0 for sc in scores_list] #to avoid divisions by 0 when calculating probs
probs = [float(sc)/sum(scores_list[1:]) for sc in scores_list[1:]]
i = np.random.choice(range(1, len(self.allele['positions'])), p=probs)
n = random.random()
if n < self.mut_pos_pb: #Modify the positions of the pathway
i = np.random.choice(range(1, len(self.allele['positions']))) # No prob biass in this case
self.allele['mutate_positions'] = self.allele['mutate_positions'] + 1 #For register if mutate is being useful
#Delete frames from i
self.allele['positions'] = self.allele['positions'][:i]
self.allele['rotations'] = self.allele['rotations'][:i]
if self.torsion_gene:
self.allele['torsions'] = self.allele['torsions'][:i]
if self.rotamers_gene:
self.allele['rotamers'] = self.allele['rotamers'][:i]
if self.nm_gene:
self.allele['normal_modes'] = self.allele['normal_modes'][:i]
self.allele['protein'] = self.allele['protein'][:i]
self.allele['ligand'] = self.allele['ligand'][:i]
self.allele['coord_residues'] = self.allele['coord_residues'][:i]
self.scores = self.scores[:i]
#Generate new frames
last_point = [x[3] for x in self.allele['positions'][-1]]
if self.destination:
not_arrived = distance(self.destination, last_point) > (self.max_separation)
else:
y = np.array([a-b for a,b in zip(last_point, self.p_center)])
y = np.absolute(self.p_axes.dot(y))
not_arrived = is_inside_ellipsoid(self.p_elen, y)
while not_arrived:
new_point = random_extension_point(last_point, self.max_separation,
self.origin, min_increment=self.min_increment,
destination=self.destination)
if new_point is not None:
self.allele['positions'].append(new_point)
self.allele['rotations'].append(random_rotation())
if self.torsion_gene:
self.torsion_g.gp_mutate(1.0)
self.allele['torsions'].append(copy.deepcopy(self.torsion_g.allele))
if self.nm_gene:
self.allele['normal_modes'].append(random.randint(0,
self.parent.genes[self.nm_gene].n_samples))
self.allele['protein'].append(random.randint(0, len(self.protein_g.catalog)-1))
self.allele['ligand'].append(random.randint(0, len(self.ligand_g.catalog)-1))
if self.rotamers_gene:
self.allele['rotamers'].append([[],[],[]])
self.allele['coord_residues'].append([])
self.scores.append({})
last_point = [x[3] for x in self.allele['positions'][-1]]
if self.destination:
not_arrived = distance(self.destination, last_point) > (self.max_separation)
else:
y = np.array([a-b for a,b in zip(last_point, self.p_center)])
y = np.absolute(self.p_axes.dot(y))
not_arrived = is_inside_ellipsoid(self.p_elen, y)
else:
#Try to modify rotation/torsion/rotamers/nm/sample protein
operation = random.choice(self._op_roulette)
if operation == 'rotation':
self.allele['mutate_rotations'] = self.allele['mutate_rotations'] + 1 #For register if mutate is being useful
self.allele['rotations'][i] = random_rotation()
self.scores[i] = {}
self.act_rotamers.append(i)
elif operation == 'torsion':
self.allele['mutate_torsions'] = self.allele['mutate_torsions'] + 1 #For register if mutate is being useful
self.torsion_g.gp_mutate(1.0)
self.allele['torsions'][i] = copy.deepcopy(self.torsion_g.allele)
self.scores[i] = {}
self.act_rotamers.append(i)
elif operation == 'rotamers':
self.allele['mutate_rotamers'] = self.allele['mutate_rotamers'] + 1 #For register if mutate is being useful
self.scores[i] = {} #put the score to 0 and allow path_scoring to recalculate rotamers
#Search which rotamers are inside the search radius
residues = set()
atoms = [a for a in surrounding_atoms(self.ligand_mol, self.protein_mol, self.radius_rotamers)]
for atom in atoms:
residues.add(atom.residue.id.position)
residues_list = ['{}/{}'.format(self.protein, r) for r in residues]
residues = []
for rot in residues_list:
a, b = rot.split('/')
residues.append([a, int(b)])
#Create the new rotamers allele and express it to store in path allele
self.rotamers_g._residues = residues
self.rotamers_g.__ready__()
self.rotamers_g._need_express = True
self.rotamers_g.express()
#Store the new rotamer data in pathway gene
self.allele['rotamers'][i][0] = []
self.allele['rotamers'][i][1] = []
self.allele['rotamers'][i][2] = []
for ((molname, pos), residue), a in \
zip(self.rotamers_g.residues.items(),
self.rotamers_g.allele):
res_name = str(molname)+'/'+str(pos)
self.allele['rotamers'][i][0].append(res_name)
self.allele['rotamers'][i][1].append(a)
self.allele['rotamers'][i][2].append(self.rotamers_g.all_chis(residue))
elif operation == 'nm':
self.allele['mutate_nm'] = self.allele['mutate_nm'] + 1 #For register if mutate is being useful
self.allele['normal_modes'][i] = random.randint(0,
self.parent.genes[self.nm_gene].n_samples)
self.scores[i] = {}
self.act_rotamers.append(i)
elif operation == 'protein':
self.allele['mutate_protein'] = self.allele['mutate_protein'] + 1 #For register if mutate is being useful
self.allele['protein'][i] = random.randint(0, len(self.protein_g.catalog)-1)
self.scores[i] = {}
self.act_rotamers.append(i)
elif operation == 'ligand':
self.allele['mutate_ligand'] = self.allele['mutate_ligand'] + 1 #For register if mutate is being useful
self.allele['ligand'][i] = random.randint(0, len(self.ligand_g.catalog)-1)
self.scores[i] = {}
self.act_rotamers.append(i)
def actualize_rotamers(self, i):
"""
Actualize the rotamers list and allele when a position/
orientation/torsions of a ligand have changed (it could be
possible that with the new ligand expression the surrounding
rotamers would be different, so it is necessary to actualize the
information). Old rotamer allele is maintained when possible
(i.e. the rotamers that are present in the previous and the new
sourrondings of the ligand).
"""
#Search which rotamers are inside the search radius
residues = set()
atoms = [a for a in surrounding_atoms(self.ligand_mol, self.protein_mol, self.radius_rotamers)]
for atom in atoms:
residues.add(atom.residue.id.position)
residues_list = ['{}/{}'.format(self.protein, r) for r in residues]
residues = []
for rot in residues_list:
a, b = rot.split('/')
residues.append([a, int(b)])
#Create the new rotamers allele and express i
self.rotamers_g._residues = residues
self.rotamers_g.__ready__()
self.rotamers_g._need_express = True
self.rotamers_g.express()
#Store the new rotamer data in pathway gene (only new residues)
old_res = [[],[],[]]
old_res[0] = list(self.allele['rotamers'][i][0])
old_res[1] = list(self.allele['rotamers'][i][1])
old_res[2] = list(self.allele['rotamers'][i][2])
self.allele['rotamers'][i][0] = []
self.allele['rotamers'][i][1] = []
self.allele['rotamers'][i][2] = []
new_res = [str(molname)+'/'+str(pos) for ((molname, pos), residue), a in \
zip(self.rotamers_g.residues.items(),
self.rotamers_g.allele)]
#Incorporate old residues present in the new region as is
for j, res_name in enumerate(old_res[0]):
if res_name in new_res:
self.allele['rotamers'][i][0].append(res_name)
self.allele['rotamers'][i][1].append(old_res[1][j])
self.allele['rotamers'][i][2].append(old_res[2][j])
#Incorporate new residues of the new region
for ((molname, pos), residue), a in \
zip(self.rotamers_g.residues.items(),
self.rotamers_g.allele):
res_name = str(molname)+'/'+str(pos)
if res_name not in self.allele['rotamers'][i][0]:
self.allele['rotamers'][i][0].append(res_name)
self.allele['rotamers'][i][1].append(a)
self.allele['rotamers'][i][2].append(self.rotamers_g.all_chis(residue))
self.rotamers_g.unexpress()
self.rotamers_g._need_express = False
self.act_rotamers.remove(i) #Rotamers are actualized
def write(self, path, name, *args, **kwargs):
"""
Each individual output is a .zip file that contains:
1. A .txt file with the allele data (allele.txt).
2. A .txt file with the scoring data (scores.txt).
3. One .pdb file for each frame of the calculated pathway
(named frame_xxx.pdb, where xxx is the number of frame).
"""
fullname = os.path.join(path, '{}_{}.zip'.format(name, self.name))
fullname = self.create_output_zip(path, fullname)
return fullname
def create_output_zip(self, path, fullname):
#Txt file with the allele
allelename = os.path.join(path, 'allele.txt')
with open(allelename, 'w') as f:
f.write(pp.pformat(self.allele))
#Txt file with the scores
scoresname = os.path.join(path, 'scores.txt')
with open(scoresname, 'w') as f:
f.write(pp.pformat(self.scores))
#Pdb file with the trajectory of the pathway
trajname = os.path.join(path, 'trajectory.pdb')
points = self.allele['positions']
mol = chimera.Molecule()
r = mol.newResidue("path", " ", 1, " ")
atoms = []
atom = mol.newAtom("0", chimera.Element("H"))
atoms.append(atom)
point = [x[3] for x in points[0]]
atoms[-1].setCoord(chimera.Point(*point))
r.addAtom(atoms[-1])
for i in range(1, len(points)):
point = [x[3] for x in points[i]]
atom = mol.newAtom(str(i), chimera.Element("H"))
atoms.append(atom)
atoms[-1].setCoord(chimera.Point(*point))
r.addAtom(atoms[-1])
mol.newBond(atoms[i-1], atoms[i])
chimera.pdbWrite([mol], chimera.Xform(), trajname)
with ZipFile(fullname, 'w', ZIP_STORED) as z:
z.write(allelename, os.path.basename(allelename))
os.remove(allelename)
z.write(scoresname, os.path.basename(scoresname))
os.remove(scoresname)
z.write(trajname, os.path.basename(trajname))
os.remove(trajname)
#Pdb files with the actual frames of the pathway
for i in range(0, len(self.allele['positions'])):
self.gp_express(i)
framename = os.path.join(path, "frame_{:03d}.pdb".format(i))
chimera.pdbWrite([self.protein_mol, self.ligand_mol],
chimera.Xform(), framename)
# Prepare pdb files with format required by Chimera MD movie import
with open(framename) as frame_file:
pdb_file_lines = []
line = frame_file.readline()
while line:
if line[:4] == "ATOM" or line[:6] == "HETATM":
pdb_file_lines.append(line)
line = frame_file.readline()
os.remove(framename)
with open(framename, 'w') as f:
prev_atom = 0
prev_res = 0
last_change_res = 0
for line in pdb_file_lines:
current_atom, current_res = int(line[6:11]), int(line[22:26])
if current_atom > prev_atom:
prev_atom = current_atom
else:
current_atom = prev_atom + 1
prev_atom = current_atom
if current_res > prev_res:
prev_res = current_res
last_change_res = current_res
elif current_res != last_change_res:
last_change_res = current_res
prev_res += 1
current_res = prev_res
else:
current_res = prev_res
line = line[:6] + '{:5d}'.format(current_atom) + line[11:22] + '{:4d}'.format(current_res) + line[26:]
f.write("%s" % line)
z.write(framename, os.path.basename(framename))
os.remove(framename)
self.gp_unexpress(i)
return fullname
#####
@property
def ligand_mol(self):
return self.parent.find_molecule(self.ligand).compound.mol
@property
def protein_mol(self):
return self.parent.find_molecule(self.protein).compound.mol
@property
def ligand_g(self):
return self.parent.find_molecule(self.ligand)
@property
def protein_g(self):
return self.parent.find_molecule(self.protein)
@property
def torsion_g(self):
if self.torsion_gene:
return self.parent.genes[self.torsion_gene]
else:
return None
@property
def rotamers_g(self):
if self.rotamers_gene:
return self.parent.genes[self.rotamers_gene]
else:
return None
@property
def nm_g(self):
if self.nm_gene:
return self.parent.genes[self.nm_gene]
else:
return None
@property
#Geometric center of the ligand
def ligand_center(self):
coordinates = atom_positions(self.ligand_mol.atoms)
c = np.average(coordinates, axis=0)
return c
#############
# Some useful functions
def random_extension_point(center, r, origin, min_increment=0, destination=None):
"""
Calculate a new random coordinates that acomplishes the following
conditions:
1. The distance from the ``center`` has to be <= r.
2. The distance from the origin has to be >= than that from the
``center`` to origin plus ``min_increment`` when calculating
pathways without a ``destination``.
3. If a ``destination`` is set, the new point has to be nearer
to this destination than the ``center`` point.
Parameters
----------
center : 3-tuple of float
Coordinates of the previous point (origin of the search sphere).
r : float
Radius of the search sphere (maximum distance at will the new
point will be from ``center``).
origin : 3-tuple of float
Coordinates of the origin point of the pathway.
min_increment : float, optional, defaults to 0
Minimum increment of distance from the origin / to the destination
of the new point respect to th previous one.
destination : float or 3-tuple of float, optional
Distance from the origin or coordinates of the destination point
of the pathway.
Returns
-------
A 3-tuple of float representing the coordinates that accomplish all
the criteria. If a valid point is not obtained in 100 iterations,
returns None.
Notes
-----
The combination of the ``r`` (max_step_separation) and
``min_increment`` will control the distance of the different frames
of the pathway between them and the possibility of deviation from the
straight line (as higher the difference between these parameters,
more deviation from the straight line will be allowed).
"""
for i in range(0, 100):
new_point = [random.uniform(-r, r) for m in center]
new_point = [x + y for x, y in zip(new_point, center)]
x, y, z = new_point
if not isinstance(destination, list):
if ((distance(new_point, origin) >= (distance(center, origin) +
min_increment))
and (distance(new_point, center) <= r)):
return ((1.0, 0.0, 0.0, x),
(0.0, 1.0, 0.0, y),
(0.0, 0.0, 1.0, z))
else:
if (((distance(new_point, destination) +
min_increment) < distance(center, destination))
and (distance(new_point, center) <= r)):
return ((1.0, 0.0, 0.0, x),
(0.0, 1.0, 0.0, y),
(0.0, 0.0, 1.0, z))
return None
def distance(a, b):
ax, ay, az = a
bx, by, bz = b
return(sqrt((bx-ax)*(bx-ax)+(by-ay)*(by-ay)+(bz-az)*(bz-az)))
def interpolate_rotations(a, b):
xf1 = M.chimera_xform(a)
xf2 = M.chimera_xform(b)
interp = M.xform_matrix(M.interpolate_xforms(xf1, ZERO, xf2, 0.5))
return interp
def is_inside_ellipsoid(elen, point):
discr = (point[0]/elen[0])**2 + (point[1]/elen[1])**2 + (point[2]/elen[2])**2
if discr < 1.0:
return True
else:
return False
def surrounding_atoms(ligand, protein, radius_rotamers):
"""
Get atoms in the search zone, based on the molecule and the
radius.
"""
z = chimera.selection.ItemizedSelection()
z.add([a for a in ligand.atoms])
z.merge(chimera.selection.REPLACE,
chimera.specifier.zone(z, 'atom', None,
radius_rotamers,
[ligand, protein]))
atom_list = [at for at in z.atoms() if at not in ligand.atoms]
return atom_list
| StarcoderdataPython |
1777472 | # test file for PFCandidate validation
# performs a matching with the genParticles collection.
# creates a root file with histograms filled with PFCandidate data,
# present in the Candidate, and in the PFCandidate classes, for matched
# PFCandidates. Matching histograms (delta pt etc) are also available.
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.load("DQMServices.Core.DQM_cfg")
process.load("DQMServices.Components.DQMEnvironment_cfi")
process.load("RecoParticleFlow.Configuration.DBS_Samples.RelValQCD_FlatPt_15_3000_Fast_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(5000)
)
process.load("Validation.RecoParticleFlow.pfCandidateManager_cff")
process.dqmSaver.convention = 'Offline'
process.dqmSaver.workflow = '/A/B/C'
process.dqmEnv.subSystemFolder = 'ParticleFlow'
process.p =cms.Path(
process.pfCandidateManagerSequence +
process.dqmEnv +
process.dqmSaver
)
process.schedule = cms.Schedule(process.p)
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 50
| StarcoderdataPython |
6598441 | #! /usr/bin/env python3
""" command line utility to check virustotal for reports re a file or sha256 hash """
import argparse
import os
import sys
import pprint
import virustotal
def create_parse():
""" set up CLI parser """
parser = argparse.ArgumentParser(description="virustotal file report retriever")
parser.add_argument("filename", help="file(s) to check for results", nargs="+")
return parser
def start():
""" main work done here """
parser = create_parse()
args = parser.parse_args()
try:
apikey = os.environ["VTAPI"]
except KeyError:
print("Must set VTAPI key enviroment variable.")
sys.exit(1)
for item in args.filename:
response = virustotal.scan(item, apikey)
pprint.pprint(response)
if __name__ == "__main__":
start()
| StarcoderdataPython |
3442319 | #coding=utf8
import config
from base64 import urlsafe_b64encode
class Service:
"""
* QBox Resource Storage (Key-Value) Service
* QBox 资源存储(键值对)。基本特性为:每个账户可创建多个表,每个表包含多个键值对(Key-Value对),Key是任意的字符串,Value是一个文件。
"""
def __init__(self, conn, tblName=''):
self.Conn = conn
self.TableName = tblName
def PutAuth(self, expires=None):
"""
* func PutAuth() => PutAuthRet
* 上传授权(生成一个短期有效的可匿名上传URL)
"""
if expires:
url = config.IO_HOST + '/put-auth/'+str(expires)+"/"
else:
url = config.IO_HOST + '/put-auth/'
return self.Conn.Call(url)
def PutAuthWithCb(self, expire, cbUrl):
"""
* func PutAuth() => PutAuthRet
* 上传授权(生成一个短期有效的可匿名上传URL)
"""
url = config.IO_HOST + '/put-auth/' + str(expire) + '/callback/' + urlsafe_b64encode(cbUrl)
return self.Conn.Call(url)
def Get(self, key, attName):
"""
* func Get(key string, attName string) => GetRet
* 下载授权(生成一个短期有效的可匿名下载URL)
"""
entryURI = self.TableName + ':' + key
url = config.RS_HOST + '/get/' + urlsafe_b64encode(entryURI) + '/attName/' + urlsafe_b64encode(attName)
return self.Conn.Call(url)
def GetIfNotModified(self, key, attName, base):
"""
* func GetIfNotModified(key string, attName string, base string) => GetRet
* 下载授权(生成一个短期有效的可匿名下载URL),如果服务端文件没被人修改的话(用于断点续传)
"""
entryURI = self.TableName + ':' + key
url = config.RS_HOST + '/get/' + urlsafe_b64encode(entryURI) + '/attName/' + urlsafe_b64encode(
attName) + '/base/' + base
return self.Conn.Call(url)
def Stat(self, key):
"""
* func Stat(key string) => Entry
* 取资源属性
"""
entryURI = self.TableName + ':' + key
url = config.RS_HOST + '/stat/' + urlsafe_b64encode(entryURI)
return self.Conn.Call(url)
def Publish(self, domain):
"""
* func Publish(domain string) => Bool
* 将本 Table 的内容作为静态资源发布。静态资源的url为:http://domain/key
"""
url = config.RS_HOST + '/publish/' + urlsafe_b64encode(domain) + '/from/' + self.TableName
return self.Conn.CallNoRet(url)
def Unpublish(self, domain):
"""
* func Unpublish(domain string) => Bool
* 取消发布
"""
url = config.RS_HOST + '/unpublish/' + urlsafe_b64encode(domain)
return self.Conn.CallNoRet(url)
def Delete(self, key):
"""
* func Delete(key string) => Bool
* 删除资源
"""
entryURI = self.TableName + ':' + key
url = config.RS_HOST + '/delete/' + urlsafe_b64encode(entryURI)
return self.Conn.CallNoRet(url)
def Drop(self):
"""
* func Drop() => Bool
* 删除整个表(慎用!)
"""
url = config.RS_HOST + '/drop/' + self.TableName
return self.Conn.CallNoRet(url)
| StarcoderdataPython |
12865491 | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] <NAME>, <NAME>, <NAME>, <NAME>
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, reduction=16):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
#self.se = SELayer(planes, reduction)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
#out = self.se(out)
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, reduction=16):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
#self.se = SELayer(planes * 4, reduction)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
#out = self.se(out)
out += self.shortcut(x)
out = F.relu(out)
return out
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
class ResNet(nn.Module):
def __init__(self, block, num_blocks, m_channels=32, feat_dim=40, embed_dim=128, squeeze_excitation=False):
super(ResNet, self).__init__()
self.in_planes = m_channels
self.feat_dim = feat_dim
self.embed_dim = embed_dim
self.squeeze_excitation = squeeze_excitation
if block is BasicBlock:
self.conv1 = nn.Conv2d(1, m_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(m_channels)
self.layer1 = self._make_layer(block, m_channels, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, m_channels*2, num_blocks[1], stride=2)
current_freq_dim = int((feat_dim - 1) / 2) + 1
self.layer3 = self._make_layer(block, m_channels*4, num_blocks[2], stride=2)
current_freq_dim = int((current_freq_dim - 1) / 2) + 1
self.layer4 = self._make_layer(block, m_channels*8, num_blocks[3], stride=2)
current_freq_dim = int((current_freq_dim - 1) / 2) + 1
self.embedding = nn.Linear(m_channels * 8 * 2 * current_freq_dim, embed_dim)
elif block is Bottleneck:
self.conv1 = nn.Conv2d(1, m_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(m_channels)
self.layer1 = self._make_layer(block, m_channels, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, m_channels*2, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, m_channels*4, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, m_channels*8, num_blocks[3], stride=2)
self.embedding = nn.Linear(int(feat_dim/8) * m_channels * 16 * block.expansion, embed_dim)
else:
raise ValueError(f'Unexpected class {type(block)}.')
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
x = x.unsqueeze_(1)
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
pooling_mean = torch.mean(out, dim=-1)
meansq = torch.mean(out * out, dim=-1)
pooling_std = torch.sqrt(meansq - pooling_mean ** 2 + 1e-10)
out = torch.cat((torch.flatten(pooling_mean, start_dim=1),
torch.flatten(pooling_std, start_dim=1)), 1)
embedding = self.embedding(out)
return embedding
def ResNet101(feat_dim, embed_dim, squeeze_excitation=False):
return ResNet(Bottleneck, [3, 4, 23, 3], feat_dim=feat_dim, embed_dim=embed_dim, squeeze_excitation=squeeze_excitation)
| StarcoderdataPython |
4932303 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@email: <EMAIL>
@time: 8/19/21 5:32 PM
"""
import time
import numpy as np
import transforms3d as t3d
import open3d as o3
# from helpers import find_correspondences, get_teaser_solver, Rt2T
from helpers import find_correspondences, Rt2T
from vis import draw_registration_result, draw_correspondence
TRANSLATION_FAILURE_TOLERANCE = 3.0
ORIENTATION_FAILURE_TOLERANCE = 1.0
# statistic_detail = dataset().data_info
# statistic_detail['fail'] = True
# statistic_detail['tf'] = None
# # statistic_detail['']
def format_statistic(statistic):
statistic['time_global'] /= statistic['#case']
statistic['time_local'] /= statistic['#case']
if statistic['#case'] - statistic['#failure'] == 0:
statistic['error_t'] = np.nan
statistic['error_o'] = np.nan
else:
statistic['error_t'] /= (statistic['#case'] - statistic['#failure'])
statistic['error_o'] /= (statistic['#case'] - statistic['#failure'])
assert statistic['#failure'] == len(statistic['index_failure']) == len(statistic['tf_failure']), str(statistic['#failure']) + ' ' + str(len(statistic['index_failure'])) + ' ' + str(len(statistic['tf_failure']))
def rigid_error(t1, t2):
orientation_1, translation_1 = t1[:3, :3], t1[:3, 3]
orientation_2, translation_2 = t2[:3, :3], t2[:3, 3]
orientation_diff = np.matmul(orientation_2, orientation_1.T)
error_o, error_t = np.rad2deg(np.asarray(t3d.euler.mat2euler(orientation_diff))), np.abs(
translation_2 - translation_1)
error_o, error_t = np.linalg.norm(error_o), np.linalg.norm(error_t)
return error_o, error_t
def record(reg, i, statistic, voxel_size_reg, correspondence_set, tf_gt, tf_final, time_global, time_local):
# def record(reg, i, statistic, voxel_size_reg, correspondence_set, tf_gt, tf_final, time_global, time_local, pc_src_global, pc_tar_global, pc_src_local, pc_tar_local):
failure = False
correspondence_set = np.asarray(correspondence_set)
error_o, error_t = rigid_error(tf_gt, tf_final)
statistic['#case'] += 1
statistic['time_global'] += time_global
statistic['time_local'] += time_local
# # if differ from gt to much, count it as failure, not going to error statics
if error_o > ORIENTATION_FAILURE_TOLERANCE or error_t > TRANSLATION_FAILURE_TOLERANCE:
failure = True
statistic['#failure'] += 1
statistic['index_failure'].append(i)
# statistic['#points'].append((
# len(pc_src_global.points),
# len(pc_tar_global.points),
# len(pc_src_local.points),
# len(pc_tar_local.points),
# ))
statistic['correspondence_set_failure'].append(correspondence_set.tolist())
statistic['pose_failure'].append(tf_gt.tolist())
statistic['tf_failure'].append(tf_final.tolist())
statistic['error_t_failure'].append(error_t)
statistic['error_o_failure'].append(error_o)
statistic['voxel_size_reg_failure'].append(voxel_size_reg)
else:
statistic['error_t'] += error_t
statistic['error_o'] += error_o
# output
if i % 50 == 0:
print('iter', i, '\n Method', reg,
'\n Time average',
statistic['time_global'] / statistic['#case'] + statistic['time_local'] / statistic['#case'])
if statistic['#case'] - statistic['#failure'] == 0:
print(' No successful case')
else:
print(
' Translation rms', statistic['error_t'] / (statistic['#case'] - statistic['#failure']),
'\n Orientation rms', statistic['error_o'] / (statistic['#case'] - statistic['#failure']),
'\n Failure percent', statistic['#failure'] / statistic['#case'])
return failure
def output(statistic):
"""output"""
print('ransac_icp')
print('Translation rms', statistic['error_t'])
print('Orientation rms', statistic['error_o'])
print('Time average', (statistic['time_global'] + statistic['time_local']))
print('Failure percent', statistic['#failure'] / statistic['#case'])
def icp(pc_src, pc_tgt, voxel_sizes_local, transformation=np.eye(4)):
""""""
'''reformat voxel sizes for local reg'''
if not isinstance(voxel_sizes_local, tuple) and not isinstance(voxel_sizes_local, list) and not isinstance(
voxel_sizes_local, np.ndarray):
voxel_sizes_local = [voxel_sizes_local, ]
voxel_sizes_local = list(voxel_sizes_local)
voxel_sizes_local = sorted(voxel_sizes_local) # from small to big for voxel down sampling
'''local registration data process'''
time_0 = time.time()
pcs_src_local, pcs_tgt_local = [], []
pc_src_last_down, pc_tgt_last_down = pc_src, pc_tgt
for voxel_size_local in voxel_sizes_local:
pc_src_local, pc_tar_local = pc_src_last_down.voxel_down_sample(
voxel_size_local), pc_tgt_last_down.voxel_down_sample(voxel_size_local)
radius_normal_local = voxel_size_local * 2
pc_src_local.estimate_normals(o3.geometry.KDTreeSearchParamHybrid(radius=radius_normal_local, max_nn=30))
pc_tar_local.estimate_normals(o3.geometry.KDTreeSearchParamHybrid(radius=radius_normal_local, max_nn=30))
pcs_src_local.append(pc_src_local)
pcs_tgt_local.append(pc_tar_local)
time_data_process = time.time() - time_0
'''local registration'''
time_0 = time.time()
tf_last = transformation
result_local = None
for voxel_size_local, pc_src_local, pc_tgt_local in zip(voxel_sizes_local, pcs_src_local, pcs_tgt_local):
distance_threshold_local = voxel_size_local
result_local = o3.pipelines.registration.registration_icp(
source=pc_src_local, target=pc_tgt_local, max_correspondence_distance=distance_threshold_local,
init=tf_last,
estimation_method=o3.pipelines.registration.TransformationEstimationPointToPlane(),
# criteria=
)
tf_last = result_local.transformation
result_local = result_local
time_icp_reg = time.time() - time_0
return result_local, time_data_process, time_icp_reg
def compute_fpfh(pc_src, pc_tgt, voxel_size):
""""""
pc_src_down, pc_tar_down = pc_src.voxel_down_sample(voxel_size), pc_tgt.voxel_down_sample(
voxel_size)
radius_normal = voxel_size * 2
pc_src_down.estimate_normals(o3.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))
pc_tar_down.estimate_normals(o3.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))
radius_feature = voxel_size * 5
pc_src_fpfh = o3.pipelines.registration.compute_fpfh_feature(pc_src_down, o3.geometry.KDTreeSearchParamHybrid(
radius=radius_feature, max_nn=100))
pc_tgt_fpfh = o3.pipelines.registration.compute_fpfh_feature(pc_tar_down, o3.geometry.KDTreeSearchParamHybrid(
radius=radius_feature, max_nn=100))
return pc_src_down, pc_tar_down, pc_src_fpfh, pc_tgt_fpfh
def ransac_icp_helper(pc_src, pc_tgt, voxel_size_global, voxel_sizes_local):
""""""
time_0 = time.time() # preprocessing include, down sampling, feature computation, tree building
pc_src_global, pc_tar_global, pc_src_fpfh, pc_tgt_fpfh = compute_fpfh(pc_src, pc_tgt, voxel_size_global)
# global registration
# distance_threshold = voxel_size_local * 1.5
distance_threshold = voxel_size_global
time_ransac_data = time.time() - time_0
time_0 = time.time()
result_global = o3.pipelines.registration.registration_ransac_based_on_feature_matching(
source=pc_src_global, target=pc_tar_global, source_feature=pc_src_fpfh, target_feature=pc_tgt_fpfh,
mutual_filter=True, max_correspondence_distance=distance_threshold,
estimation_method=o3.pipelines.registration.TransformationEstimationPointToPoint(False), ransac_n=3,
checkers=[o3.pipelines.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
o3.pipelines.registration.CorrespondenceCheckerBasedOnDistance(distance_threshold),
o3.pipelines.registration.CorrespondenceCheckerBasedOnNormal(0.52359878)],
criteria=o3.pipelines.registration.RANSACConvergenceCriteria(10000000, 0.999)
)
time_ransac_reg = time.time() - time_0
result_local, time_icp_data, time_icp_reg = icp(pc_src, pc_tgt, voxel_sizes_local, result_global.transformation)
print('data process', time_ransac_data + time_icp_data)
print('reg', time_ransac_reg + time_icp_reg)
print()
return result_global, result_local, time_ransac_data + time_ransac_reg, time_icp_data + time_icp_reg
def ransac_icp(dataloader, voxel_size_global, voxel_size_local, statistic=None, show_flag=False):
# VOXEL_SIZE_GLOBAL = 5
# VOXEL_SIZE_LOCAL = 3
# read source and target pc
for i in range(len(dataloader)):
time_0 = time.time() # preprocessing include, down sampling, feature computation, tree building
source = dataloader.get(i)
pc_src, pc_tgt = o3.io.read_point_cloud(source['pc_model']), o3.io.read_point_cloud(source['pc_artificial'])
pose_gt = np.asarray(source['pose'])
# source = dataloader[i]
# pc_src, pc_tar = source['pc_model'], source['pc_artificial']
# pose_gt = source['pose']
result_global, result_local, time_global, time_local = ransac_icp_helper(pc_src=source, pc_tgt=pc_tgt,
voxel_size_global=voxel_size_global,
voxel_sizes_local=voxel_size_local)
tf_global, tf_final = result_global.trandformation, result_local.transformation
# record statics and output in screen
if statistic:
record('ransac_icp', i, statistic, voxel_size_global, result_global.correspondence_set, pose_gt, tf_final, time_global, time_local)
# vis
if show_flag:
show_per_reg_iter(method='ransac_icp', source=source, pc_src_global=pc_src,
pc_tar_global=pc_tgt, correspondence_set_global=result_global.correspondence_set,
pc_src_local=pc_src, pc_tar_local=pc_tgt,
time_global=time_global, time_local=time_local, tf_global=tf_global, tf_final=tf_final)
format_statistic(statistic)
return
def fgr_icp(dataloader, voxel_size_global, voxel_size_local, statistic, show_flag=False):
# VOXEL_SIZE_GLOBAL = 10
# VOXEL_SIZE_LOCAL = 3
# read source and target pc
for i in range(len(dataloader)):
source = dataloader[i]
pose_gt = source['pose']
# pc_src, pc_tar = o3.io.read_point_cloud(source['pc_model']), o3.io.read_point_cloud(source['pc_artificial'])
pc_src, pc_tar = source['pc_model'], source['pc_artificial']
# preprocessing include, down sampling, feature computation, tree building
time_0 = time.time()
pc_src_global, pc_tar_global = pc_src.voxel_down_sample(voxel_size_global), pc_tar.voxel_down_sample(voxel_size_global)
pc_src_local, pc_tar_local = pc_src.voxel_down_sample(voxel_size_local), pc_tar.voxel_down_sample(voxel_size_local)
radius_normal = voxel_size_global * 2
pc_src_global.estimate_normals(o3.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))
pc_tar_global.estimate_normals(o3.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))
radius_normal = voxel_size_local * 2
pc_src_local.estimate_normals(o3.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))
pc_tar_local.estimate_normals(o3.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))
radius_feature = voxel_size_global * 5
pc_src_fpfh = o3.pipelines.registration.compute_fpfh_feature(pc_src_global, o3.geometry.KDTreeSearchParamHybrid(
radius=radius_feature, max_nn=100))
pc_tar_fpfh = o3.pipelines.registration.compute_fpfh_feature(pc_src_global, o3.geometry.KDTreeSearchParamHybrid(
radius=radius_feature, max_nn=100))
# global registration
transformation_init = np.eye(4)
distance_threshold = voxel_size_global * 1.5
result_global = o3.pipelines.registration.registration_fast_based_on_feature_matching(
source=pc_src_global, target=pc_tar_global, source_feature=pc_src_fpfh, target_feature=pc_tar_fpfh,
option=o3.pipelines.registration.FastGlobalRegistrationOption(maximum_correspondence_distance=distance_threshold)
)
time_global = time.time() - time_0
# local registration
time_0 = time.time()
transformation_init = result_global.transformation
distance_threshold = voxel_size_local * 15
result_local = o3.pipelines.registration.registration_icp(
source=pc_src_local, target=pc_tar_local, max_correspondence_distance=distance_threshold, init=transformation_init,
estimation_method=o3.pipelines.registration.TransformationEstimationPointToPlane(),
# criteria=
)
time_local = time.time() - time_0
# record statics
tf_final = result_local.transformation
record('fgr_icp', i, statistic, voxel_size_global, pose_gt, tf_final, time_global, time_local)
# vis
if show_flag:
# print(time_global, time_local)
draw_registration_result(source=pc_src_global, target=pc_src_global)
draw_registration_result(source=pc_src_global, target=pc_src_global, transformation=tf_final)
format_statistic(statistic)
def fpfh_teaser_icp_helper(pc_src, pc_tgt, voxel_size_global, voxel_sizes_local):
""""""
# global registration
# # extract FPFH features
time_0 = time.time() # preprocessing include, down sampling, feature computation, tree building
pc_src_global, pc_tar_global, pc_src_fpfh, pc_tgt_fpfh = compute_fpfh(pc_src, pc_tgt, voxel_size_global)
time_teaser_data = time.time() - time_0
# establish correspondences by nearest neighbour search in feature space
time_0 = time.time()
array_src_global, array_tar_global = np.asarray(pc_src_global.points).T, np.asarray(pc_tar_global.points).T
feature_src_fpfh, feature_tar_fpfh = np.array(pc_src_fpfh.data).T, np.array(pc_tgt_fpfh.data).T
src_corrs_mask, tar_corrs_mask = find_correspondences(feature_src_fpfh, feature_tar_fpfh, mutual_filter=True)
array_src_global, array_tar_global = array_src_global[:, src_corrs_mask], array_tar_global[:,
tar_corrs_mask] # np array of size 3 by num_corrs
correspondence_set = np.hstack([np.expand_dims(src_corrs_mask, axis=-1), np.expand_dims(tar_corrs_mask, axis=-1)])
# line
# robust global registration using TEASER++
NOISE_BOUND = voxel_size_global * 0.1
teaser_solver = get_teaser_solver(NOISE_BOUND)
teaser_solver.solve(array_src_global, array_tar_global)
solution = teaser_solver.getSolution()
R_teaser = solution.rotation
t_teaser = solution.translation
tf_teaser = Rt2T(R_teaser, t_teaser)
tf_global = tf_teaser
time_teaser_reg = time.time() - time_0
# local registration
result_local, time_icp_data, time_icp_reg = icp(pc_src, pc_tgt, voxel_sizes_local, tf_teaser)
tf_final = result_local.transformation
return tf_global, result_local, time_teaser_data + time_teaser_reg, time_icp_data + time_icp_reg
def fpfh_teaser_icp(dataloader, voxel_size_global, voxel_sizes_local, statistic, show_flag=False):
# VOXEL_SIZE_GLOBAL = 7
# VOXEL_SIZE_LOCAL = 3
# read source and target pc
for i in range(len(dataloader)):
# orientation_gt, translation_gt = np.asarray(t3d.euler.mat2euler(pose_gt[:3, :3])), pose_gt[:3, 3]
source = dataloader.get(i)
pc_src, pc_tgt = o3.io.read_point_cloud(source['pc_model']), o3.io.read_point_cloud(source['pc_artificial'])
pose_gt = np.asarray(source['pose'])
# source = dataloader[i]
# pc_src, pc_tar = source['pc_model'], source['pc_artificial']
# pose_gt = source['pose']
tf_teaser, result_local, time_global, time_local = fpfh_teaser_icp_helper(pc_src, pc_tgt, voxel_size_global, voxel_sizes_local)
tf_global, tf_final = tf_teaser, result_local.transformation
# record('ransac_icp', i, statistic, voxel_size_global, correspondence_set, pose_gt, tf_final, time_global,
# time_local)
# vis
# if show_flag:
# show_per_reg_iter(method='fpfh_teaser_icp', source=source, pc_src_global=pc_src,
# pc_tar_global=pc_tgt, correspondence_set_global=correspondence_set,
# pc_src_local=pc_src, pc_tar_local=pc_tgt,
# time_global=time_global, time_local=time_local,
# tf_global=tf_global, tf_final=tf_final)
format_statistic(statistic)
return
def show_per_reg_iter(method, source, pc_src_global, pc_tar_global, correspondence_set_global, pc_src_local, pc_tar_local, time_global, time_local, tf_global, tf_final):
# visualize the point clouds together with feature correspondences
print(method, 'instance: ', source['instance'], ', original voxel size: ', source['voxel_size'], ', noise sigma: ',
source['sigma'], ', plane factor: ', source['plane'])
print('#source_points_global:', len(np.asarray(pc_tar_global.points)), '#target_points_global', len(np.asarray(pc_src_global.points)), 'num_correspondence: ',
len(correspondence_set_global))
print('#source_points_local:', len(np.asarray(pc_tar_local.points)), '#target_points_local', len(np.asarray(pc_src_local.points)), 'num_correspondence: ',
len(correspondence_set_global))
print('time_global:', time_global, 'time_local', time_local)
print('global error', rigid_error(source['pose'], tf_global))
print('local error', rigid_error(source['pose'], tf_final))
print()
draw_registration_result(source=pc_src_global, target=pc_tar_global, window_name='init')
draw_correspondence(pc_src_global, pc_tar_global, correspondence_set_global, window_name='correspondence')
draw_registration_result(source=pc_src_global, target=pc_tar_global, transformation=tf_global,
window_name='global reg')
draw_registration_result(source=pc_src_global, target=pc_tar_global, transformation=tf_final,
window_name='local reg')
VOXEL_SIZE_GLOBAL = [5, 5]
VOXEL_SIZE_LOCAL = [1, 1]
# VOXEL_SIZE_GLOBAL = [10]
# VOXEL_SIZE_LOCAL = [3]
# registrations = [ransac_icp, fgr_icp, fpfh_teaser_icp]
registrations = [ransac_icp]
# registrations = [fpfh_teaser_icp]
| StarcoderdataPython |
6530217 | <reponame>stormpath/stormpath-django
from django.conf.urls import url
from django.conf import settings
from django_stormpath import views
urlpatterns = [
url(r'^login/$', views.stormpath_id_site_login, name='stormpath_id_site_login'),
url(r'^logout/$', views.stormpath_id_site_logout, name='stormpath_id_site_logout'),
url(r'^register/$', views.stormpath_id_site_register, name='stormpath_id_site_register'),
url(r'^forgot-password/$', views.stormpath_id_site_forgot_password, name='stormpath_id_site_forgot_password'),
url(r'^handle-callback/(?P<provider>stormpath)', views.stormpath_callback, name='stormpath_id_site_callback'),
]
if getattr(settings, 'STORMPATH_ENABLE_GOOGLE', False):
urlpatterns += [
url(r'handle-callback/(?P<provider>google)', views.stormpath_callback,
name='stormpath_google_login_callback'),
url(r'^social-login/(?P<provider>google)/', views.stormpath_social_login,
name='stormpath_google_social_login'),
]
if getattr(settings, 'STORMPATH_ENABLE_FACEBOOK', False):
urlpatterns += [
url(r'handle-callback/(?P<provider>facebook)', views.stormpath_callback,
name='stormpath_facebook_login_callback'),
url(r'^social-login/(?P<provider>facebook)/', views.stormpath_social_login,
name='stormpath_facebook_social_login'),
]
if getattr(settings, 'STORMPATH_ENABLE_GITHUB', False):
urlpatterns += [
url(r'handle-callback/(?P<provider>github)', views.stormpath_callback,
name='stormpath_github_login_callback'),
url(r'^social-login/(?P<provider>github)/', views.stormpath_social_login,
name='stormpath_github_social_login'),
]
if getattr(settings, 'STORMPATH_ENABLE_LINKEDIN', False):
urlpatterns += [
url(r'handle-callback/(?P<provider>linkedin)', views.stormpath_callback,
name='stormpath_linkedin_login_callback'),
url(r'^social-login/(?P<provider>linkedin)/', views.stormpath_social_login,
name='stormpath_linkedin_social_login'),
]
if django.VERSION[:2] < (1, 8):
from django.conf.urls import patterns
urlpatterns = patterns('django_stormpath.views', *urlpatterns)
| StarcoderdataPython |
1908856 | <reponame>eprouty/the_wheel
import json
import logging
import os
import requests
import requests_cache
from datetime import datetime, timedelta
from flask import session, redirect, request, url_for
from flask.json import jsonify
from flask_login import login_required, current_user
from requests_oauthlib import OAuth2Session
from the_wheel.handlers.login import User
credentials = {
"consumer_secret": os.environ.get('CONSUMER_SECRET'),
"consumer_key": os.environ.get('CONSUMER_KEY'),
"callback": os.environ.get('CALLBACK')
}
def fantasy_request(query, user_override=None):
baseUrl = 'https://fantasysports.yahooapis.com/fantasy/v2/'
if user_override:
token = str(user_override.oauth_token)
else:
token = str(current_user.oauth_token)
r = requests.get(baseUrl + query + '?format=json',
headers={'Authorization': 'Bearer ' + token,
'Content-type': 'application/xml'})
return r.json()
def get_scoreboard(league, user_override):
output = []
r = fantasy_request('league/{}/scoreboard'.format(league), user_override=user_override)
# check to see if we're in the playoffs and this should be ignored
if r['fantasy_content']['league'][1]['scoreboard']['0']['matchups']['0']['matchup']['is_playoffs'] == '1':
return (None, None, None)
logging.info("%s Scoreboard: %s", league, r)
matchups = r['fantasy_content']['league'][1]['scoreboard']['0']['matchups']
for key in matchups:
if key != 'count':
match = matchups[key]['matchup']
week_end = match['week_end']
week_start = match['week_start']
team0_name = match['0']['teams']['0']['team'][0][19]['managers'][0]['manager']['nickname']
team0_score = float(match['0']['teams']['0']['team'][1]['team_points']['total'])
# team0_projected = float(match['0']['teams']['0']['team'][1]['team_projected_points']['total'])
team1_name = match['0']['teams']['1']['team'][0][19]['managers'][0]['manager']['nickname']
team1_score = float(match['0']['teams']['1']['team'][1]['team_points']['total'])
# team1_projected = float(match['0']['teams']['1']['team'][1]['team_projected_points']['total'])
output.append({'week_end': week_end,
'week_start': week_start,
'team0_name': team0_name,
'team1_name': team1_name,
'team0_score': team0_score,
'team1_score': team1_score})
# 'team0_projected': team0_projected,
# 'team1_projected': team1_projected})
return (output, output[0]['week_start'], output[0]['week_end'])
def setup_yahoo(app):
requests_cache.install_cache(cache_name='yahoo_cache', expire_after=600)
@app.route('/yahoo_auth')
@login_required
def yahoo_auth():
yahoo = OAuth2Session(credentials['consumer_key'], redirect_uri=credentials['callback'])
authorization_url, state = yahoo.authorization_url("https://api.login.yahoo.com/oauth2/request_auth")
# State is used to prevent CSRF, keep this for later.
session['oauth_state'] = state
return redirect(authorization_url)
@app.route('/callback', methods=["GET"])
@login_required
def yahoo_callback():
code = request.args.get('code')
r = requests.post("https://api.login.yahoo.com/oauth2/get_token",
auth=(credentials['consumer_key'], credentials['consumer_secret']),
data={'code': code,
'grant_type': 'authorization_code',
'redirect_uri': credentials['callback']})
token = r.json()
current_user.oauth_token = token['access_token']
current_user.refresh_token = token['refresh_token']
current_user.token_expiry = datetime.now() + timedelta(seconds=token['expires_in'])
current_user.save()
return redirect(url_for('home'))
@app.route('/api_test')
@login_required
def api_trial():
return jsonify(fantasy_request(request.args.get('q')))
@app.route('/refresh')
@login_required
def refresh_token():
code = current_user.refresh_token
r = requests.post("https://api.login.yahoo.com/oauth2/get_token",
auth=(credentials['consumer_key'], credentials['consumer_secret']),
data={'refresh_token': code,
'grant_type': 'refresh_token',
'redirect_uri': credentials['callback']})
token = r.json()
current_user.oauth_token = token['access_token']
current_user.refresh_token = token['refresh_token']
current_user.token_expiry = datetime.now() + timedelta(seconds=token['expires_in'])
current_user.save()
return redirect(url_for('home'))
@app.route('/refresh_system_token')
def refresh_system_token():
system_user = User.objects(name='system').first()
code = system_user.refresh_token
r = requests.post("https://api.login.yahoo.com/oauth2/get_token",
auth=(credentials['consumer_key'], credentials['consumer_secret']),
data={'refresh_token': code,
'grant_type': 'refresh_token',
'redirect_uri': credentials['callback']})
token = r.json()
system_user.oauth_token = token['access_token']
system_user.refresh_token = token['refresh_token']
system_user.token_expiry = datetime.now() + timedelta(seconds=token['expires_in'])
system_user.save()
return redirect(url_for('update'))
| StarcoderdataPython |
1922335 | <reponame>jasonsatran/duckrun<filename>tests/test_process_report.py
import unittest
from duckrun.process_report import ProcessReport
class MainTest(unittest.TestCase):
# is the relative path the path to this file or the path where python was started
def test_it_formats_seconds(self):
test_cases = [1.112e-03, 12.1232, 0, 100.12]
results = [ProcessReport.format_second(
test_case) for test_case in test_cases]
self.assertEqual(results, ['0.0011', '12.1232', '0.0000', '100.1200'])
| StarcoderdataPython |
332514 | #!/usr/bin/env python
import sys
from frovedis.exrpc.server import FrovedisServer
from frovedis.linalg import eigsh
from scipy.sparse import coo_matrix
desc = "Testing eigsh() for coo_matrix: "
# initializing the Frovedis server
argvs = sys.argv
argc = len(argvs)
if argc < 2:
print ('Please give frovedis_server calling command as the first argument \n'
'(e.g. "mpirun -np 2 /opt/nec/frovedis/ve/bin/frovedis_server")')
quit()
FrovedisServer.initialize(argvs[1])
# sample square symmetric sparse data (6x6)
mat = coo_matrix([[ 2., -1., 0., 0.,-1., 0.], [-1., 3.,-1., 0.,-1., 0.],
[ 0., -1., 2.,-1., 0., 0.], [ 0., 0.,-1., 3.,-1.,-1],
[-1., -1., 0.,-1., 3., 0.], [ 0., 0., 0.,-1., 0., 1.]])
try:
eigen_vals, eigen_vecs = eigsh(mat, k = 3)
print(desc, "Passed")
except:
print(desc, "Failed")
FrovedisServer.shut_down() | StarcoderdataPython |
1622073 | from gtts import gTTS
from playsound import playsound
# Custome Modules
from remove_file import remove_file
def text_to_speech(text):
audio_file = "audio.mp3"
remove_file(audio_file)
language = 'en'
myobj = gTTS(text=text, lang=language, slow=False)
myobj.save(audio_file)
playsound(audio_file)
| StarcoderdataPython |
8128411 | <gh_stars>0
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
"""
Predicitve_Analytics.py
"""
def Accuracy(y_true, y_pred):
"""
:type y_true: numpy.ndarray
:type y_pred: numpy.ndarray
:rtype: float
"""
return (y_true == y_pred).sum() / y_true.shape[0]
def Recall(y_true,y_pred):
"""
:type y_true: numpy.ndarray
:type y_pred: numpy.ndarray
:rtype: float
"""
confusion_matrix = ConfusionMatrix(y_true=y_true, y_pred=y_pred)
correct = confusion_matrix.diagonal()
true_examples_per_class = confusion_matrix.sum(axis=0)
# Preventing divide by zero errors in the below line.
true_examples_per_class[true_examples_per_class == 0] = 1
recalls = correct / true_examples_per_class
macro_recall = recalls.mean()
return macro_recall
def Precision(y_true, y_pred):
"""
:type y_true: numpy.ndarray
:type y_pred: numpy.ndarray
:rtype: float
"""
confusion_matrix = ConfusionMatrix(y_true=y_true, y_pred=y_pred)
correct = confusion_matrix.diagonal()
predicted_per_class = confusion_matrix.sum(axis=1)
# Preventing divide by zero errors in the below line.
predicted_per_class[predicted_per_class == 0] = 1
precisions = correct / predicted_per_class
macro_precision = precisions.mean()
return macro_precision
def WCSS(Clusters):
"""
:Clusters List[numpy.ndarray]
:rtype: float
"""
ec_dist = 0
for i, j in enumerate(Clusters):
c, records = Clusters[i]
ec_dist += np.sum(np.square(records - c))
return float(ec_dist)
def ConfusionMatrix(y_true, y_pred):
"""
:type y_true: numpy.ndarray
:type y_pred: numpy.ndarray
:rtype: numpy.ndarray
"""
# We assume that classes start at zero. Therefore we need to add 1 to our maximum value calculations.
num_classes = max(y_true.max() + 1, y_pred.max() + 1)
indices = num_classes * y_pred
indices = y_true + indices
results_array = np.zeros((num_classes ** 2,))
indices, counts = np.unique(indices, return_counts=True)
for index, count in zip(indices, counts):
results_array[index] = count
return results_array.reshape((num_classes, num_classes))
def KNN(X_train, X_test, Y_train, K):
"""
:type X_train: numpy.ndarray
:type X_test: numpy.ndarray
:type Y_train: numpy.ndarray
:type K: int
@author <NAME>
:rtype: numpy.ndarray
"""
# We leverage broadcasting to find the difference between all train/test pairs.
assert len(X_train.shape) == 2, "Excepted an n by m matrix for training data."
assert X_train.shape[1] == X_test.shape[1], "Training and testing data had different feature counts."
x_train = X_train.reshape((X_train.shape[0], 1, X_train.shape[1]))
x_test = X_test.reshape((1, X_test.shape[0], X_test.shape[1]))
# We leverage the kernel trick here.
# Note that we want to compute euclidean distance, that is, d[i, j] = (sum_over_f((trn[i] - tst[j]) ^ 2) ^ 1/2
# Instead of computing pairwise differences, we can use the kernel trick by reducing the above to:
# d[i, j] = (sum_over_f(trn[i] ^ 2 + tst[j] ^ 2 - 2*trn[i]*tst[j])) ^ 1/2
# Because of the linearity of summation, we push the summation over the feature axis further into the formula.
# Thus, we only need to compute the three quantities below:
train_squared = (x_train ** 2).sum(axis=-1)
test_squared = (x_test ** 2).sum(axis=-1)
train_test = np.matmul(X_train, X_test.T)
# The euclidean distances between all train and test examples are therefore listed below.
difference_matrix = (train_squared + test_squared - 2 * train_test) ** 0.5
# Our distance matrix has values representing distances, rows representing training examples,
# and columns representing testing examples.
# Each testing example in the k-nn case is then classified based on the indices of the distances.
# We use numpy's argsort to find the indices in the training array of the smallest distances.
nearest_neighbors = np.argsort(difference_matrix, axis=0)[:K, :]
# We then index into the training labels in order to find out what the results should be.
results = Y_train.flatten()[nearest_neighbors]
# The result is the mode of our results. This is calculated by duplicating the voting classifier code from below.
all_results = results
# Voting matrix has rows representing indices and columns representing classes.
voting_matrix = np.zeros((all_results.shape[1], all_results.max() - all_results.min() + 1), dtype=int)
for class_index in range(all_results.min(), all_results.max() + 1):
for neighbor_results in results:
# For each neighbor, we find the values where the classifier predicted this class.
# That counts as one vote and is added to the matrix.
voting_matrix[:, class_index - all_results.min()] = voting_matrix[:, class_index - all_results.min()] + (
neighbor_results == class_index
).astype(int)
# We can now simply find the results of the k-nn classifier by finding the argmax.
results = voting_matrix.argmax(axis=-1)
# Lastly, because our indices start at <min_value>, we need to add the min value back to the array
# in order to have the classes align with the classes that were originally presented to us.
results += all_results.min()
return results
def RandomForest(X_train, Y_train, X_test):
"""
:type X_train: numpy.ndarray
:type X_test: numpy.ndarray
:type Y_train: numpy.ndarray
:rtype: numpy.ndarray
"""
X_train = np.array(X_train)
y_train = np.array(Y_train)
X_test = np.array(X_test)
# Encoding y_train set
cls = np.unique(y_train)
y_mod_train = np.zeros(y_train.shape[0])
cls_mod = dict(zip(cls, list(range(len(cls)))))
for i in cls:
idx = np.where(y_train == i)
y_mod_train[idx] = cls_mod[i]
# It helps to calculate gini impurity of a node.
def gini_impurity(records):
target_var, target_counts = np.unique(records[:, -1], return_counts=True)
probability = target_counts / target_counts.sum()
return 1 - sum(np.square(probability))
# I have used cumulative gini or or kind of gain using gini impurity to determine best split condition on an attribute among other attributes.
# Here I have computed gain considering child impurity and their records and have ignored parent impurity because it will be
# constant for all attributes.
# It returns the best feature and its index by calculating gain, which helps to find the best split.
def gain(records, splits):
gini = np.inf
for idx in splits:
for val in splits[idx]:
records_left, records_right = dataSplitting_along_a_Feature(records, idx, val)
prob_records_left = records_left.shape[0] / ((records_right.shape[0] + records_left.shape[0]))
prob_records_right = records_right.shape[0] / ((records_right.shape[0] + records_left.shape[0]))
delta = (prob_records_left * gini_impurity(records_left)) + (
prob_records_right * gini_impurity(records_right))
if delta <= gini:
gini = delta
idx_best, val_best = idx, val
return idx_best, val_best
# It checks whether distributuion is mixed or not.
def isMixing(records):
if np.unique(records[:, -1]).shape[0] == 1:
return True
else:
return False
# It returns a class to which a record belongs to.
def getClass(records):
target_var, target_counts = np.unique(records[:, -1], return_counts=True)
return target_var[target_counts.argmax()]
# Splitting of attributes are done by sorting as we have continuous feature points and then split
# positions are taken as the midpoint of two adjacent values.
def getSplits(records, numOfFeatures):
if (records.shape[1] - 1) >= numOfFeatures:
featureIndexes = np.random.randint(0, records.shape[1] - 1, numOfFeatures)
splits = {}
for idx in featureIndexes:
sorted_records = np.sort(records[:, idx])
splits[idx] = ((sorted_records + np.append(sorted_records[1:], 0)) / 2)[:-1]
return splits
# It splits the records on the basis of condition like greater than or less than the specified value.
def dataSplitting_along_a_Feature(records, feature_idx, feature_val):
records_left = records[records[:, feature_idx] > feature_val]
records_right = records[records[:, feature_idx] <= feature_val]
return records_left, records_right
# Decision Tree(dt)
def dt(dataset, count=0, min_samples=2, max_depth=5, numOfFeatures=None):
if (isMixing(dataset)) or (dataset.shape[0] < min_samples) or (count == max_depth):
target_var = getClass(dataset)
return target_var
else:
count += 1
splits = getSplits(dataset, numOfFeatures)
idx_best, val_best = gain(dataset, splits)
dataset_left, dataset_right = dataSplitting_along_a_Feature(dataset, idx_best, val_best)
# It checks whether purity is achieved or not.
if (dataset_left.shape[0] == 0) or (dataset_right.shape[0] == 0):
target_var = getClass(dataset)
return target_var
# Finding the condition or question.
condition = "{} <= {}".format(idx_best, val_best)
# Making instances of sub-tree
sub_tree = {condition: []}
# It fetches answers true and false based on conditions.
trueAnswer = dt(dataset_right, count, min_samples, max_depth, numOfFeatures)
falseAnswer = dt(dataset_left, count, min_samples, max_depth, numOfFeatures)
if trueAnswer == falseAnswer:
sub_tree = trueAnswer
else:
sub_tree[condition].append(trueAnswer)
sub_tree[condition].append(falseAnswer)
return sub_tree
# Random Forest(rf)
def rf(training_set, numOftrees, rswr, numOfFeatures, max_depth):
# Trees in forest (tif)
tif = []
for i in range(numOftrees):
# Random Sampling With replacement(rswr) or bootstrap
rswr_indexes = np.random.randint(low=0, high=training_set.shape[0], size=rswr)
tree = dt(training_set[rswr_indexes], max_depth=max_depth, numOfFeatures=numOfFeatures)
tif.append(tree)
return tif
def bagging(test_set, rf_obj):
predictions = {}
for i in range(len(rf_obj)):
feature_name = "tree_{}".format(i)
test_set = pd.DataFrame(test_set)
def compute(instance, tree):
condition = list(tree.keys())[0]
feature_name, comparison_operator, value = condition.split(" ")
if comparison_operator == "<=":
if instance[int(feature_name)] <= float(value):
answer = tree[condition][0]
else:
answer = tree[condition][1]
if not isinstance(answer, dict):
return answer
else:
leftover_tree = answer
return compute(instance, leftover_tree)
predictions[feature_name] = test_set.apply(compute, args=(rf_obj[i],), axis=1)
rf_predictions = pd.DataFrame(predictions).mode(axis=1)[0]
return rf_predictions
rf_obj = rf(np.concatenate([X_train, y_mod_train.reshape(-1, 1)], axis=1), numOftrees=4, rswr=800,
numOfFeatures=int(np.sqrt(X_train.shape[1])), max_depth=8)
predictions = bagging(X_test, rf_obj)
return predictions.values
def PCA(X_train, N):
"""
:type X_train: numpy.ndarray
:type N: int
@author <NAME>
:rtype: numpy.ndarray
"""
column_means = X_train.mean(axis=0, keepdims=True)
normalized_data = X_train - column_means
covariance_matrix = np.cov(normalized_data.T)
# For our purposes, we don't need V from the decomposition. We thus discard it.
mapping_matrix, scaling_factors, _ = np.linalg.svd(covariance_matrix, full_matrices=True)
# While the scaling values and mapping matrix aren't *exactly* eigenvalues and eigenvectors,
# for our purposes they're a close substitute. We rename them to make the later computations more interpretable.
eigenvalues = scaling_factors
eigenvectors = mapping_matrix
column_vector_indices = np.argsort(eigenvalues)[::-1][:N]
reduced_dataset = np.matmul(normalized_data, eigenvectors[:, column_vector_indices])
return reduced_dataset
def Kmeans(X_train, N):
"""
:type X_train: numpy.ndarray
:type N: int
:rtype: List[numpy.ndarray]
"""
X_train = np.array(X_train)
centroids = np.random.randint(0, X_train.shape[0], N)
c_old = X_train[centroids].astype(float)
arr_ = np.zeros(X_train.shape[0])
# Iterating centroids
for i in c_old:
arr_ = np.c_[arr_, np.sum(np.square(X_train - i), axis=1)]
idx_to_c = np.argmin(arr_[:, 1:], axis=1) # Ignoring first column because it contains zeros
data = np.c_[X_train, idx_to_c]
# Group by on labels using numpy
c_new = np.zeros((N, X_train.shape[1]))
for j, i in enumerate(np.unique(data[:, -1])):
tmp = data[np.where(data[:, -1] == i)]
c_new[j] = np.mean(tmp[:, :-1], axis=0)
while True:
if c_new.tolist() == c_old.tolist():
cs = []
for j, i in enumerate(c_new):
cs.append(np.array([i, data[:, :-1][np.where(data[:, -1] == j)[0]]]))
return cs
else:
arr_ = np.zeros(X_train.shape[0])
for i in c_new:
arr_ = np.c_[arr_, np.sum(np.square(X_train - i), axis=1)]
idx_to_c = np.argmin(arr_[:, 1:], axis=1)
data = np.c_[X_train, idx_to_c]
c_old = c_new
# Group by on labels using numpy
c_new = np.zeros((N, X_train.shape[1])) # wherever we see -1 that means we are ignoring label
for j, i in enumerate(np.unique(data[:, -1])):
tmp = data[np.where(data[:, -1] == i)]
c_new[j] = np.mean(tmp[:, :-1], axis=0)
def SklearnSupervisedLearning(X_train, Y_train, X_test):
"""
:type X_train: numpy.ndarray
:type X_test: numpy.ndarray
:type Y_train: numpy.ndarray
:rtype: List[numpy.ndarray]
"""
# TODO: Must choose the hyperparameters that produce the best results based on grid search.
knn = KNeighborsClassifier()
svm = SVC()
logistic_regression = LogisticRegression()
decision_tree = DecisionTreeClassifier()
algorithms = [svm, logistic_regression, decision_tree, knn]
results = []
for algorithm in algorithms:
algorithm = algorithm.fit(X=X_train, y=Y_train)
results.append(algorithm.predict(X_test))
return results
def SklearnVotingClassifier(X_train, Y_train, X_test):
"""
:type X_train: numpy.ndarray
:type X_test: numpy.ndarray
:type Y_train: numpy.ndarray
:rtype: numpy.ndarray
"""
classifier_results = SklearnSupervisedLearning(X_train=X_train, Y_train=Y_train, X_test=X_test)
classifier_results = [
classifier_result.squeeze(axis=1) if len(classifier_result.shape) > 1 else classifier_result
for classifier_result in classifier_results
]
classifier_results = [
classifier_result.argmax(axis=1) if len(classifier_result.shape) > 1 else classifier_result
for classifier_result in classifier_results
]
all_results = np.array(classifier_results)
# Voting matrix has rows representing indices and columns representing classes.
voting_matrix = np.zeros((all_results.shape[1], all_results.max() - all_results.min() + 1), dtype=int)
for class_index in range(all_results.min(), all_results.max() + 1):
for voting_classifier_results in classifier_results:
# For each voting classifier, we find the values where the classifier predicted this class.
# That counts as one vote and is added to the matrix.
voting_matrix[:, class_index - all_results.min()] = voting_matrix[:, class_index - all_results.min()] + (
voting_classifier_results == class_index
).astype(int)
# We can now simply find the results of the voting classifier by finding the argmax.
results = voting_matrix.argmax(axis=-1)
# Lastly, because our indices start at <min_value>, we need to add the min value back to the array
# in order to have the classes align with the classes that were originally presented to us.
results += all_results.min()
return results
"""
Create your own custom functions for Matplotlib visualization of hyperparameter search.
Make sure that plots are labeled and proper legends are used
"""
def ekstrum_plot_results(title, experiment_results, result_metric_name='Accuracy'):
keys_to_plot = set()
for result_set in experiment_results:
for key_to_plot in result_set.keys():
keys_to_plot.add(key_to_plot)
if 'results' in keys_to_plot:
keys_to_plot.remove('results')
for key_to_plot in keys_to_plot:
independent_variables = [result_set[key_to_plot] for result_set in experiment_results]
results = [result_set['results'] for result_set in experiment_results]
if all(isinstance(independent_variable, (float, int)) for independent_variable in independent_variables):
# We can do a simple plot since all values are numeric.
plt.clf()
plt.scatter(independent_variables, results)
plt.xlabel(key_to_plot)
plt.ylabel(result_metric_name)
plt.title(title)
plt.savefig('plot_{}_{}.png'.format(title, key_to_plot))
else:
variables_to_plot = list(set(independent_variables))
# Construct one dependent variable collection for each independent variable choice.
dependent_variables_to_plot = [[] for _ in range(len(variables_to_plot))]
for independent_variable, response in zip(independent_variables, results):
# Add our response value to the correct bucket.
dependent_variables_to_plot[variables_to_plot.index(independent_variable)].append(response)
plt.clf()
plt.boxplot(dependent_variables_to_plot)
plt.xlabel(key_to_plot)
plt.xticks(1 + np.arange(len(variables_to_plot)), variables_to_plot)
plt.ylabel(result_metric_name)
plt.title(title)
plt.savefig('plot_{}_{}.png'.format(title, key_to_plot))
def ekstrum_plot_confusion_matrix(confusion_matrix, filename='confusion_matrix.png', model_name='our Model'):
confusion_matrix = confusion_matrix.astype(int)
nrows = confusion_matrix.shape[0]
ncols = confusion_matrix.shape[1]
true_values = confusion_matrix.sum(axis=0)
# The below is done to prevent divide by zero errors.
true_values[true_values == 0] = 1
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(nrows, ncols), sharex=True, sharey=True)
axes[nrows // 2, 0].set_ylabel('Predicted Classes')
axes[-1, ncols // 2].set_xlabel('True Classes')
fig.suptitle('Confusion Matrix for {}'.format(model_name))
for row in range(nrows):
for column in range(ncols):
axes[row, column].set_xticklabels([])
axes[row, column].set_yticklabels([])
# Color to hex is done via a nice little hack from https://stackoverflow.com/a/3380739.
if row == column:
# These should be "green" based on how good they were. White if completely wrong.
accuracy_at_class = confusion_matrix[row, column] / true_values[column]
rgb = (255 - int(255 * accuracy_at_class), 255, 255 - int(255 * accuracy_at_class))
color = '#%02x%02x%02x' % rgb
else:
# These should be "red" based on how bad they were. White if completely correct.
inaccuracy_at_class = confusion_matrix[row, column] / true_values[column]
rgb = (255, 255 - int(255 * inaccuracy_at_class), 255 - int(255 * inaccuracy_at_class))
color = '#%02x%02x%02x' % rgb
axes[row, column].set_facecolor(color)
axes[row, column].text(
0.5, 0.5, '{}'.format(confusion_matrix[row, column]),
horizontalalignment='center', verticalalignment='center', transform=axes[row, column].transAxes
)
plt.show()
plt.savefig(filename)
return fig
def ekstrum_grid_search(
algorithm,
algorithm_kwargs,
algorithm_predict_kwargs,
y_true,
hyperparameter_bounds,
continuous_partitions=10
):
"""
Performs grid search on the specified algorithm.
Hyperparameter bounds are provided via a mapping of the form below. Lists represent categorical hyperparameters,
while tuples represent continuous hyperparameters. Continuous hyperparameters are segmented into
<continuous_partitions> different values and then treated like categorical hyperparameters.
An example for SVC:
{
"kernel_function": ['rbf', 'linear', 'poly', 'sigmoid'],
"C": (0.1, 10.0),
"degree": [2, 3, 4, 5]
}
:param algorithm: The algorithm to use
:param algorithm_kwargs: Keyword arguments for the algorithm being analyzed. Used during call to `fit`.
:param algorithm_predict_kwargs: Keyword arguments for algorithm when predicting.
:param y_true: True values for prediction, to be used by the metric function.
:param hyperparameter_bounds: The hyperparameter bounds for the algorithm, in the format described above.
:param continuous_partitions: How many different combinations for continuous features should be tried.
@author <NAME>
:return: A List of mappings specifying configurations and their performance.
"""
for hyperparameter, values in hyperparameter_bounds.items():
if isinstance(values, tuple):
assert len(values) == 2, "Continuous tuples should be specified as (low, high)"
try:
float(values[0]), float(values[1])
except ValueError:
raise ValueError("Continuous tuples should be specified via numerical bounds.")
interval_step_size = (values[1] - values[0]) / ((continuous_partitions - 1) or 1)
values = [values[0] + interval_step_size * i for i in range(continuous_partitions)]
hyperparameter_bounds[hyperparameter] = values
assert isinstance(hyperparameter_bounds[hyperparameter], list)
def run_from(hparams, selected):
selection_copy = {k: v for k, v in selected.items()}
if hparams:
for hparam, values_ in hparams.items():
smaller_hparams = {k: v for k, v in hparams.items() if k != hparam}
results = []
for value in values_:
selection_copy[hparam] = value
results = results + run_from(smaller_hparams, selection_copy)
return results
raise ValueError("Didn't have enough arguments in a hyperparameter for grid search.")
model = algorithm(**selected)
model.fit(**algorithm_kwargs)
predictions = model.predict(**algorithm_predict_kwargs)
# We use the accuracy metric, for now.
selection_copy['results'] = Accuracy(y_true=y_true, y_pred=predictions)
return [selection_copy]
return run_from(hyperparameter_bounds, {})
def ekstrum_grid_search_svc(x_train, y_train, x_test, y_test):
from sklearn.svm import SVC
algorithm = SVC
hyperparameters = {
'C': (0.1, 10.0),
'kernel': ['linear', 'poly', 'rbf'],
'gamma': ['auto']
}
grid_search_results = ekstrum_grid_search(
algorithm=algorithm,
algorithm_kwargs={
'X': x_train,
'y': y_train
},
algorithm_predict_kwargs={
'X': x_test
},
y_true=y_test,
hyperparameter_bounds=hyperparameters,
continuous_partitions=31
)
ekstrum_plot_results(title='Support Vector Machine Performance', experiment_results=grid_search_results)
def ekstrum_grid_search_tree(x_train, y_train, x_test, y_test):
from sklearn.tree import DecisionTreeClassifier
algorithm = DecisionTreeClassifier
hyperparameters = {
'criterion': ['gini', 'entropy'],
'max_depth': [None, 1, 10],
'max_features': ['auto', 'sqrt', 'log2', 0.1, 0.5, 1.0]
}
grid_search_results = ekstrum_grid_search(
algorithm=algorithm,
algorithm_kwargs={
'X': x_train,
'y': y_train
},
algorithm_predict_kwargs={
'X': x_test
},
y_true=y_test,
hyperparameter_bounds=hyperparameters,
continuous_partitions=31
)
ekstrum_plot_results(title='Decision Tree Performance', experiment_results=grid_search_results)
def ekstrum_grid_search_knn(x_train, y_train, x_test, y_test):
from sklearn.neighbors import KNeighborsClassifier
algorithm = KNeighborsClassifier
hyperparameters = {
'n_neighbors': [1, 2, 4, 8, 16, 32],
'weights': ['distance', 'uniform'],
'p': [1, 2, 4]
}
grid_search_results = ekstrum_grid_search(
algorithm=algorithm,
algorithm_kwargs={
'X': x_train,
'y': y_train
},
algorithm_predict_kwargs={
'X': x_test
},
y_true=y_test,
hyperparameter_bounds=hyperparameters,
continuous_partitions=31
)
ekstrum_plot_results(title='KNN Performance', experiment_results=grid_search_results)
| StarcoderdataPython |
4960099 | <gh_stars>10-100
# Copyright 2021 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Tuple
import tensorflow as tf
from tensorflow.python.keras import layers
from tensorflow.python.keras.engine.keras_tensor import KerasTensor
def ResNet9(input_size: Tuple[int, int, int] = (32, 32, 3), classes: int = 10) -> tf.keras.Model:
"""A small 9-layer ResNet Tensorflow model for cifar10 image classification.
The model architecture is from https://github.com/davidcpage/cifar10-fast
Args:
input_size: The size of the input tensor (height, width, channels).
classes: The number of outputs the model should generate.
Raises:
ValueError: Length of `input_size` is not 3.
ValueError: `input_size`[0] or `input_size`[1] is not a multiple of 16.
Returns:
A TensorFlow ResNet9 model.
"""
_check_input_size(input_size)
# prep layers
inp = layers.Input(shape=input_size)
x = layers.Conv2D(64, 3, padding='same')(inp)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
# layer1
x = layers.Conv2D(128, 3, padding='same')(x)
x = layers.MaxPool2D()(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
x = layers.Add()([x, residual(x, 128)])
# layer2
x = layers.Conv2D(256, 3, padding='same')(x)
x = layers.MaxPool2D()(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
# layer3
x = layers.Conv2D(512, 3, padding='same')(x)
x = layers.MaxPool2D()(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
x = layers.Add()([x, residual(x, 512)])
# layers4
x = layers.GlobalMaxPool2D()(x)
x = layers.Flatten()(x)
x = layers.Dense(classes)(x)
x = layers.Activation('softmax', dtype='float32')(x)
model = tf.keras.Model(inputs=inp, outputs=x)
return model
def residual(x: KerasTensor, num_channel: int) -> KerasTensor:
"""A ResNet unit for ResNet9.
Args:
x: Input Keras tensor.
num_channel: The number of layer channel.
Return:
Output Keras tensor.
"""
x = layers.Conv2D(num_channel, 3, padding='same')(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
x = layers.Conv2D(num_channel, 3, padding='same')(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
return x
def _check_input_size(input_size):
if len(input_size) != 3:
raise ValueError("Length of `input_size` is not 3 (channel, height, width)")
height, width, _ = input_size
if height < 16 or width < 16:
raise ValueError("Both height and width of input_size need to not smaller than 16")
| StarcoderdataPython |
60631 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Routines related to the canonical Chandra ACA dark current model.
The model is based on smoothed twice-broken power-law fits of
dark current histograms from Jan-2007 though Aug-2017. This analysis
was done entirely with dark current maps scaled to -14 C.
See: /proj/sot/ska/analysis/dark_current_model/dark_model.ipynb
and other files in that directory.
Alternatively:
http://nbviewer.ipython.org/url/asc.harvard.edu/mta/ASPECT/analysis/dark_current_model/dark_model.ipynb
"""
import numpy as np
import warnings
from Chandra.Time import DateTime
# Define a common fixed binning of dark current distribution
from . import darkbins
# Global cache (e.g. for initial dark current in synthetic_dark_image
CACHE = {}
# Some constants and globals. Done this way to support sherpa fitting.
# Needs to be re-worked to be nicer.
# Fixed gaussian for smoothing the broken power law
dx = 0.1
sigma = 0.30 # Gaussian sigma in log space
xg = np.arange(-2.5 * sigma, 2.5 * sigma, dx, dtype=float)
yg = np.exp(-0.5 * (xg / sigma) ** 2)
yg /= np.sum(yg)
NPIX = 1024 ** 2
# Fixed
xbins = darkbins.bins
xall = darkbins.bin_centers
imin = 0
imax = len(xall)
# Warm threshold used in fitting acq prob model. This constant is
# not used in any configured code, but leave here just in case.
warm_threshold = 100.
# Increase in dark current per 4 degC increase in T_ccd
DARK_SCALE_4C = 1.0 / 0.70
def dark_temp_scale(t_ccd, t_ccd_ref=-19.0, scale_4c=None):
"""Return the multiplicative scale factor to convert a CCD dark map
or dark current value from temperature ``t_ccd`` to temperature
``t_ccd_ref``::
scale = scale_4c ** ((t_ccd_ref - t_ccd) / 4.0)
In other words, if you have a dark current value that corresponds to ``t_ccd``
and need the value at a different temperature ``t_ccd_ref`` then use the
the following. Do not be misled by the misleading parameter names.
>>> from chandra_aca.dark_scale import dark_temp_scale
>>> scale = dark_temp_scale(t_ccd, t_ccd_ref, scale_4c)
>>> dark_curr_at_t_ccd_ref = scale * dark_curr_at_t_ccd
The default value for ``scale_4c`` is 1.0 / 0.7. It is written this way
because the equation was previously expressed using 1 / scale_4c with a
value of 0.7. This value is based on best global fit for dark current model
in `plot_predicted_warmpix.py`. This represents the multiplicative change
in dark current for each 4 degC increase::
>>> dark_temp_scale(t_ccd=-18, t_ccd_ref=-10, scale_4c=2.0)
4.0
:param t_ccd: actual temperature (degC)
:param t_ccd_ref: reference temperature (degC, default=-19.0)
:param scale_4c: increase in dark current per 4 degC increase (default=1.0 / 0.7)
:returns: scale factor
"""
if scale_4c is None:
scale_4c = DARK_SCALE_4C
return scale_4c ** ((t_ccd_ref - t_ccd) / 4.0)
def get_dark_hist(date, t_ccd):
"""
Return the dark current histogram corresponding to ``date`` and ``t_ccd``.
:param date: date in any DateTime format
:param t_ccd: CCD temperature (deg C)
:returns: bin_centers, bins, darkhist
"""
pars = get_sbp_pars(date)
x = darkbins.bin_centers
y = smooth_twice_broken_pow(pars, x)
# Model params are calibrated using reference temp. -14 C
scale = dark_temp_scale(-14, t_ccd)
xbins = darkbins.bins * scale
x = x * scale
return x, xbins, y
def smooth_broken_pow(pars, x):
"""
Smoothed broken power-law. Pars are same as bpl1d (NOT + gaussian sigma):
1: gamma1
2: gamma2
3: x_b (break point)
4: x_r (normalization reference point)
5: ampl1
"""
(gamma1, gamma2, x_b, x_r, ampl1) = pars
ampl2 = ampl1 * (x_b / x_r) ** (gamma2 - gamma1)
ok = xall > x_b
y = ampl1 * (xall / x_r) ** (-gamma1)
y[ok] = ampl2 * (xall[ok] / x_r) ** (-gamma2)
imin = np.searchsorted(xall, x[0] - 1e-3)
imax = np.searchsorted(xall, x[-1] + 1e-3)
return np.convolve(y, yg, mode='same')[imin:imax]
def smooth_twice_broken_pow(pars, x):
"""
Smoothed broken power-law. Pars are same as bpl1d (NOT + gaussian sigma):
1: gamma1
2: gamma2
3: gamma3
4: x_b (break point)
5: ampl1
"""
x_b2 = 1000
x_r = 50
(gamma1, gamma2, gamma3, x_b, ampl1) = pars
y = ampl1 * (xall / x_r) ** (-gamma1)
i0, i1 = np.searchsorted(xall, [x_b, x_b2])
ampl2 = ampl1 * (x_b / x_r) ** (gamma2 - gamma1)
y[i0:i1] = ampl2 * (xall[i0:i1] / x_r) ** (-gamma2)
i1 = np.searchsorted(xall, x_b2)
ampl3 = ampl2 * (x_b2 / x_r) ** (gamma3 - gamma2)
y[i1:] = ampl3 * (xall[i1:] / x_r) ** (-gamma3)
imin = np.searchsorted(xall, x[0] - 1e-3)
imax = np.searchsorted(xall, x[-1] + 1e-3)
return np.convolve(y, yg, mode='same')[imin:imax]
def temp_scalefac(T_ccd):
"""
Return the multiplicative scale factor to convert a CCD dark map from
the nominal -19C temperature to the temperature T. Based on best global fit for
dark current model in plot_predicted_warmpix.py. Previous value was 0.62 instead
of 0.70.
If attempting to reproduce previous analysis, be aware that this is now calling
chandra_aca.dark_model.dark_temp_scale and the value will be determined using the
module DARK_SCALE_4C value which may differ from previous values of 1.0/0.70 or 1.0/0.62.
"""
warnings.warn("temp_scalefac is deprecated. See chandra_aca.dark_model.dark_temp_scale.")
return dark_temp_scale(-19, T_ccd)
def as_array(vals):
if np.array(vals).ndim == 0:
is_scalar = True
vals = np.array([vals])
else:
is_scalar = False
vals = np.array(vals)
return vals, is_scalar
def get_sbp_pars(dates):
"""
Return smooth broken powerlaw parameters set(s) at ``dates``.
This is based on the sbp fits for the darkhist_zodi_m14 histograms in
/proj/sot/ska/analysis/dark_current_model/dark_model.ipynb.
The actual bi-linear fits (as a function of year) to the g1, g2, g3, x_b, and ampl
parameters are derived from fits and by-hand inspection of fit trending.
This is only accurate for dates > 2007.0.
:param dates: one or a list of date(s) in DateTime compatible format
:returns: one or a list of parameter lists [g1, g2, g3, x_b, ampl]
"""
dates, is_scalar = as_array(dates)
mid_year = 2012.0 # Fixed in dark_model.ipynb notebook
years = DateTime(dates).frac_year
dyears = years - mid_year
# Poly fit parameter for pre-2012 and post-2012. Vals here are:
# y_mid, slope_pre, slope_post
par_fits = ((0.075, -0.00692, -0.0207), # g1
(3.32, 0.0203, 0 * 0.0047), # g2
(2.40, 0.061, 0.061), # g3
(192, 0.1, 0.1), # x_b
(18400, 1.45e3, 742), # ampl
)
pars_list = []
for dyear in dyears:
pars = []
for y_mid, slope_pre, slope_post in par_fits:
slope = slope_pre if dyear < 0 else slope_post
pars.append(y_mid + slope * dyear)
pars_list.append(pars)
if is_scalar:
pars_list = pars_list[0]
return pars_list
def get_warm_fracs(warm_threshold, date='2013:001:12:00:00', T_ccd=-19.0):
"""
Calculate fraction of pixels in modeled dark current distribution
above warm threshold(s).
:param warm_threshold: scalar or list of threshold(s) in e-/sec
:param date: date to use for modeled dark current distribution/histogram
:param T_ccd: temperature (C) of modeled dark current distribution
:returns: list or scalar of warm fractions (depends on warm_threshold type)
"""
x, xbins, y = get_dark_hist(date, T_ccd)
warm_thresholds, is_scalar = as_array(warm_threshold)
warmpixes = []
for warm_threshold in warm_thresholds:
# First get the full bins to right of warm_threshold
ii = np.searchsorted(xbins, warm_threshold)
warmpix = np.sum(y[ii:])
lx = np.log(warm_threshold)
lx0 = np.log(xbins[ii - 1])
lx1 = np.log(xbins[ii])
ly0 = np.log(y[ii - 1])
ly1 = np.log(y[ii])
m = (ly1 - ly0) / (lx1 - lx0)
partial_bin = y[ii] * (lx1 ** m - lx ** m) / (lx1 ** m - lx0 ** m)
warmpix += partial_bin
warmpixes.append(warmpix)
if is_scalar:
out = warmpixes[0]
else:
out = np.array(warmpixes)
return out / (1024.0 ** 2)
def synthetic_dark_image(date, t_ccd_ref=None):
"""
Generate a synthetic dark current image corresponding to the specified
``date`` and ``t_ccd``.
:param date: (DateTime compatible)
:param t_ccd_ref: ACA CCD temperature
"""
from mica.archive.aca_dark import get_dark_cal_image
if 'dark_1999223' not in CACHE:
dark = get_dark_cal_image('1999:223:12:00:00', select='nearest', t_ccd_ref=-14).ravel()
CACHE['dark_1999223'] = dark.copy()
else:
dark = CACHE['dark_1999223'].copy()
# Fill any pixels above 40 e-/sec with a random sampling from a cool
# pixel below 40 e-/sec
warm = dark > 40
warm_idx = np.flatnonzero(warm)
not_warm_idx = np.flatnonzero(~warm)
fill_idx = np.random.randint(0, len(not_warm_idx), len(warm_idx))
dark[warm_idx] = dark[fill_idx]
darkmodel = smooth_twice_broken_pow(get_sbp_pars(date), xall)
darkran = np.random.poisson(darkmodel)
nn = 0
for ii, npix in enumerate(darkran):
# Generate n log-uniform variates within bin
if npix > 0:
logdark = np.random.uniform(np.log(xbins[ii]), np.log(xbins[ii + 1]), npix)
dark[nn:nn + npix] += np.exp(logdark)
nn += npix
np.random.shuffle(dark)
dark.shape = (1024, 1024)
if t_ccd_ref is not None:
dark *= dark_temp_scale(-14, t_ccd_ref)
return dark
| StarcoderdataPython |
1984497 | <filename>staticgenerator/middleware.py
import re
from django.conf import settings
from staticgenerator import StaticGenerator
class StaticGeneratorMiddleware(object):
"""
This requires settings.STATIC_GENERATOR_URLS tuple to match on URLs
Example::
STATIC_GENERATOR_URLS = (
r'^/$',
r'^/blog',
)
"""
urls = tuple([re.compile(url) for url in settings.STATIC_GENERATOR_URLS])
gen = StaticGenerator()
def process_response(self, request, response):
if response.status_code == 200:
for url in self.urls:
if url.match(request.path_info):
self.gen.publish_from_path(request.path_info, response.content)
break
return response
| StarcoderdataPython |
286977 | <reponame>yage99/tensorflow
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged.bounding_shape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedTensorBoundingShapeOp(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.named_parameters([
# rank = 2
dict(testcase_name='docstring_example',
rt=[[1, 2, 3, 4], [5], [], [6, 7, 8, 9], [10]],
expected=[5, 4]),
dict(testcase_name='shape_5_3',
rt=[['a', 'b'], ['c', 'd', 'e'], ['f'], [], ['g']],
expected=[5, 3]),
dict(testcase_name='shape_1_7',
rt=[['a', 'b', 'c', 'd', 'e', 'f', 'g']],
expected=[1, 7]),
dict(testcase_name='shape_3_7',
rt=[[], ['a', 'b', 'c', 'd', 'e', 'f', 'g'], []],
expected=[3, 7]),
dict(testcase_name='shape_5_3_row_splits_int32',
rt=[['a', 'b'], ['c', 'd', 'e'], ['f'], [], ['g']],
rt_row_splits_dtype=dtypes.int32,
expected=[5, 3]),
dict(testcase_name='shape_0_0',
rt=[],
rt_ragged_rank=1,
expected=[0, 0]),
dict(testcase_name='shape_3_0',
rt=[[], [], []],
expected=[3, 0]),
# rank = 3
dict(testcase_name='shape_5_3_2',
rt=[[[0, 1], [2]], [[3, 4], [], [5, 6]], [[7]], [], [[8, 9]]],
expected=[5, 3, 2]),
dict(testcase_name='shape_1_7_2',
rt=[[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13]]],
expected=[1, 7, 2]),
dict(testcase_name='shape_3_7_4',
rt=[[], [[0, 1], [2], [], [3], [4], [5, 6, 7, 8], [9]], []],
expected=[3, 7, 4]),
dict(testcase_name='shape_1_7_2_ragged_rank_1',
rt=[[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13]]],
rt_ragged_rank=1,
expected=[1, 7, 2]),
# axis != None
dict(testcase_name='shape_5_3_axis_0',
rt=[['a', 'b'], ['c', 'd', 'e'], ['f'], [], ['g']],
axis=0,
expected=5),
dict(testcase_name='shape_5_3_axis_1',
rt=[['a', 'b'], ['c', 'd', 'e'], ['f'], [], ['g']],
axis=1,
expected=3),
dict(testcase_name='shape_5_3_axis_1_0',
rt=[['a', 'b'], ['c', 'd', 'e'], ['f'], [], ['g']],
axis=[1, 0],
expected=[3, 5]),
# out_type != None
dict(testcase_name='shape_5_3_row_splits_int64_out_type_int64',
rt=[['a', 'b'], ['c', 'd', 'e'], ['f'], [], ['g']],
rt_row_splits_dtype=dtypes.int64,
out_type=dtypes.int64,
expected=[5, 3]),
dict(testcase_name='shape_5_3_row_splits_int32_out_type_int32',
rt=[['a', 'b'], ['c', 'd', 'e'], ['f'], [], ['g']],
rt_row_splits_dtype=dtypes.int32,
out_type=dtypes.int32,
expected=[5, 3]),
dict(testcase_name='shape_5_3_row_splits_int64_out_type_int32',
rt=[['a', 'b'], ['c', 'd', 'e'], ['f'], [], ['g']],
rt_row_splits_dtype=dtypes.int64,
out_type=dtypes.int32,
expected=[5, 3]),
dict(testcase_name='shape_5_3_row_splits_int32_out_type_int64',
rt=[['a', 'b'], ['c', 'd', 'e'], ['f'], [], ['g']],
rt_row_splits_dtype=dtypes.int32,
out_type=dtypes.int64,
expected=[5, 3]),
]) # pyformat: disable
def testBoundingShape(self,
rt,
expected,
axis=None,
out_type=None,
rt_row_splits_dtype=dtypes.int64,
rt_ragged_rank=None):
rt = ragged_factory_ops.constant(
rt, ragged_rank=rt_ragged_rank, row_splits_dtype=rt_row_splits_dtype)
bounding_shape = rt.bounding_shape(axis=axis, out_type=out_type)
self.assertAllEqual(bounding_shape, expected)
if out_type is not None:
self.assertEqual(bounding_shape.dtype, out_type)
else:
self.assertEqual(bounding_shape.dtype, rt_row_splits_dtype)
# If we're testing a configuration that uses `axis`, then make sure
# that it also works if `axis` is a tensor.
if axis is not None:
bounding_shape = rt.bounding_shape(
axis=constant_op.constant(axis), out_type=out_type)
self.assertAllEqual(bounding_shape, expected)
if out_type is not None:
self.assertEqual(bounding_shape.dtype, out_type)
else:
self.assertEqual(bounding_shape.dtype, rt_row_splits_dtype)
if __name__ == '__main__':
googletest.main()
| StarcoderdataPython |
12804601 | <reponame>jhkim-spa/CVNet
import copy
import numpy as np
import torch
from mmcv.cnn import ConvModule, build_conv_layer, kaiming_init
from mmcv.runner import force_fp32
from torch import nn
from mmdet3d.core import (circle_nms, draw_heatmap_gaussian, gaussian_radius,
xywhr2xyxyr)
from mmdet3d.models import builder
from mmdet3d.models.builder import HEADS, build_loss
from mmdet3d.models.utils import clip_sigmoid
from mmdet3d.ops.iou3d.iou3d_utils import nms_gpu
from mmdet.core import build_bbox_coder, multi_apply
from mmcv.cnn import bias_init_with_prob, normal_init
from mmdet3d.cv_utils import project, project_to_image, pad, resize
@HEADS.register_module()
class CenterHeadCV(nn.Module):
def __init__(self,
num_classes=1,
feat_channels=64,
train_cfg=None,
test_cfg=None,
loss_cls=dict(type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1loss',
beta=1.0 / 9.0,
loss_weight=2.0)):
super(CenterHeadCV, self).__init__()
self.num_classes = num_classes
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.feat_channels = feat_channels
self.use_sigmoid_cls = loss_cls.use_sigmoid
self.sampling = loss_cls['type'] not in ['FocalLoss', 'GHMC']
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.reg_target_size = 8
self._init_layers()
def _init_layers(self):
"""Initialize neural network layers of the head."""
self.cls_out_channels = self.num_classes
self.conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 1)
self.conv_reg = nn.Conv2d(self.feat_channels, self.reg_target_size, 1)
def init_weights(self):
"""Initialize the weights of head."""
bias_cls = bias_init_with_prob(0.01)
normal_init(self.conv_cls, std=0.01, bias=bias_cls)
normal_init(self.conv_reg, std=0.01)
def forward_single(self, x):
"""Forward function on a single-scale feature map.
Args:
x (torch.Tensor): Input features.
Returns:
tuple[torch.Tensor]: Contain score of each class, bbox \
regression and direction classification predictions.
"""
cls_score = self.conv_cls(x)
bbox_pred = self.conv_reg(x)
return cls_score, bbox_pred
def forward(self, feats):
"""Forward pass.
Args:
feats (list[torch.Tensor]): Multi-level features, e.g.,
features produced by FPN.
Returns:
tuple[list[torch.Tensor]]: Multi-level class score, bbox \
and direction predictions.
"""
return multi_apply(self.forward_single, feats)
def loss_single(self, cls_score, bbox_pred, cls_targets, reg_targets):
# classification loss
cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.num_classes)
cls_targets = cls_targets.permute(0, 2, 3, 1).reshape(-1).to(torch.long)
loss_cls = self.loss_cls(cls_score, cls_targets)
# regression loss
bbox_pred = bbox_pred.permute(0, 2, 3,
1).reshape(-1, self.reg_target_size)
reg_targets = reg_targets.permute(0, 2, 3,
1).reshape(-1, self.reg_target_size)
pos_inds = (cls_targets == 1).reshape(-1)
num_pos = pos_inds.sum()
pos_bbox_pred = bbox_pred[pos_inds]
pos_reg_targets = reg_targets[pos_inds]
if num_pos > 0:
loss_bbox = self.loss_bbox(pos_bbox_pred,
pos_reg_targets,
avg_factor=num_pos)
else:
loss_bbox = pos_bbox_pred.sum()
return loss_cls, loss_bbox
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
input_metas,
cv_size,
pad_size,
gt_bboxes_ignore=None):
"""Calculate losses.
Args:
cls_scores (list[torch.Tensor]): Multi-level class scores.
bbox_preds (list[torch.Tensor]): Multi-level bbox predictions.
dir_cls_preds (list[torch.Tensor]): Multi-level direction
class predictions.
gt_bboxes (list[:obj:`BaseInstance3DBoxes`]): Gt bboxes
of each sample.
gt_labels (list[torch.Tensor]): Gt labels of each sample.
input_metas (list[dict]): Contain pcd and img's meta info.
gt_bboxes_ignore (None | list[torch.Tensor]): Specify
which bounding.
Returns:
dict[str, list[torch.Tensor]]: Classification, bbox, and \
direction losses of each level.
- loss_cls (list[torch.Tensor]): Classification losses.
- loss_bbox (list[torch.Tensor]): Box regression losses.
- loss_dir (list[torch.Tensor]): Direction classification \
losses.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
device = cls_scores[0].device
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = self.get_targets(
device,
self.num_classes,
gt_bboxes,
input_metas,
cv_size,
pad_size,
featmap_sizes,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels)
if cls_reg_targets is None:
return None
(cls_targets_list, reg_targets_list) = cls_reg_targets
losses_cls, losses_bbox = multi_apply(
self.loss_single,
cls_scores,
bbox_preds,
cls_targets_list,
reg_targets_list)
return dict(
loss_cls=losses_cls, loss_bbox=losses_bbox)
def get_targets(self,
device,
num_classes,
gt_bboxes,
input_metas,
cv_size,
pad_size,
featmap_sizes,
gt_bboxes_ignore_list,
gt_labels_list,
label_channels):
norm = torch.tensor([70.4, 80, 4, 1.6, 3.9, 1.56], device=device)
valid_idxs= [torch.where((res != -1) & (res < num_classes))[0]\
for res in gt_labels_list]
gt_bboxes = [box[idx].to(device) for box, idx in zip(gt_bboxes, valid_idxs)]
gt_labels_list= [label[idx].to(device) for label, idx in zip(gt_labels_list, valid_idxs)]
proj_mats = [torch.tensor(res['lidar2img'][:3]).to(device)\
for res in input_metas]
centers_3d = [res.gravity_center for res in gt_bboxes]
centers_2d = [project_to_image(res.transpose(1, 0), proj_mat).to(torch.long)\
for res, proj_mat in zip(centers_3d, proj_mats)]
# centers_2d_ = [project(res, meta) for res, meta in zip(centers_3d, input_metas)]
# centers_2d_ = [torch.nonzero(res[..., 0])[:, [1, 0]].permute(1, 0) for res in centers_2d_]
## shift x coords (padding)
centers_2d = [res + torch.tensor([pad_size[0], 0], device=device).reshape(-1, 1)\
for res in centers_2d]
for i, centers in enumerate(centers_2d):
if (centers < 0).sum() != 0:
valid_idx = (0 <= centers[0]) &\
(centers[0] <= cv_size[1]) &\
(0 <= centers[1]) &\
(centers[1] <= cv_size[0])
gt_labels_list[i] = gt_labels_list[i][valid_idx]
gt_bboxes[i] = gt_bboxes[i][valid_idx]
centers_2d[i] = centers_2d[i][:, valid_idx]
gt_labels_list
targets = [torch.cat((center.transpose(1, 0).to(torch.float32),
label.reshape(-1, 1).to(torch.float32),
box.tensor[:, :-1] / norm,
torch.cos(box.tensor[:, -1].reshape(-1, 1)),
torch.sin(box.tensor[:, -1].reshape(-1, 1))), dim=1)\
for label, center, box in zip(gt_labels_list, centers_2d, gt_bboxes)]
target_maps = []
target_map_channel = label_channels + self.reg_target_size
for target in targets:
target_map = torch.zeros((cv_size[0], cv_size[1],
target_map_channel), dtype=torch.float32, device=device)
x_coords = target[:, 0].to(torch.long)
y_coords = target[:, 1].to(torch.long)
target = target[:, 2:]
target_map[y_coords, x_coords, label_channels:] =\
target[:, label_channels:]
target_map[y_coords, x_coords, target[:, 0].to(torch.long)] = 1.
target_maps.append(target_map)
mlvl_target_maps = []
for featmap_size in featmap_sizes:
des_size = (featmap_size[1], featmap_size[0])
target_maps_resized = [resize(res, des_size, nonzero_idx=1)\
for res in target_maps]
mlvl_target_maps.append(target_maps_resized)
cls_targets = [[res[..., :label_channels].permute(2, 0, 1)\
for res in target_maps] for target_maps in mlvl_target_maps]
reg_targets = [[res[..., label_channels:label_channels +\
self.reg_target_size].permute(2, 0, 1)\
for res in target_maps] for target_maps in mlvl_target_maps]
# stack batches
cls_targets = [torch.stack(tuple(res), dim=0)\
for res in cls_targets]
reg_targets = [torch.stack(tuple(res), dim=0)\
for res in reg_targets]
return (cls_targets, reg_targets)
| StarcoderdataPython |
5102723 | """ Implements exhaustive best subset regression for ESL."""
import numpy as np
import copy
import itertools as itr
from typing import List
from sklearn.linear_model import LinearRegression
from .esl_regressor import EslRegressor
class BestSubsetRegression(EslRegressor):
""" Exhaustive best subset regression for ESL."""
def __init__(self, subset_size: int):
""" Instantiates a Best Subset regressor.
Args:
regressor: regressor used for regression after subset selection.
subset_size: subset size.
"""
self.subset_size = subset_size
self.__best_models = None # type: List[LinearRegression]
self.__best_preds = None # type: np.ndarray # shape: (n_responses, subset_size)
def best_preds(self, i_resp: int):
""" Returns the array of best predictors for a specific response."""
return self.__best_preds[i_resp, :]
def _fit(self, X: np.ndarray, Y: np.ndarray = None):
""" Trains the regressor.
Args:
X: numpy matrix of input features, dimensions ``(N, n_features)``.
Y: 2d numpy array of responses, dimensions ``(N, n_responses)``.
"""
best_scores = np.full(shape=(self._n_responses,), fill_value=-np.inf)
self.__best_models = [None] * self._n_responses
self.__best_preds = np.zeros((self._n_responses, self.subset_size), dtype=int)
regressor = LinearRegression(fit_intercept=True)
for preds in itr.combinations(np.arange(X.shape[1]), self.subset_size):
for i_resp in range(self._n_responses):
regressor.fit(X[:, list(preds)], Y[:, i_resp])
score = regressor.score(X[:, list(preds)], Y[:, i_resp])
if score > best_scores[i_resp]:
best_scores[i_resp] = score
self.__best_models[i_resp] = copy.deepcopy(regressor)
self.__best_preds[i_resp, :] = list(preds)
return self
def _predict(self, X: np.ndarray) -> np.ndarray:
""" Predicts, returning a 2d array."""
Yhat = np.zeros((X.shape[0], self._n_responses))
for i_resp in range(self._n_responses):
Yhat[:, i_resp] = self.__best_models[i_resp].predict(X[:, self.__best_preds[i_resp, :]])
return Yhat
@property
def coeffs(self):
coef = np.zeros((self.n_responses, self.n_features))
for i_resp in range(self._n_responses):
coef[i_resp, self.__best_preds[i_resp, :]] = self.__best_models[i_resp].coef_
return coef if len(self._fit_responses_shape) == 2 else coef[0, :]
@property
def intercept(self):
intercept = np.array([self.__best_models[i_resp].intercept_ for i_resp in range(self._n_responses)])
return intercept if len(self._fit_responses_shape) == 2 else intercept[0]
| StarcoderdataPython |
1660496 | from scipy import linalg
import numpy as np
import matplotlib.cm as cm
from matplotlib.mlab import bivariate_normal
import matplotlib.pyplot as plt
# %matplotlib inline
# == Set up the Gaussian prior density p == #
Σ = [[0.3**2, 0.0], [0.0, 0.3**2]]
Σ = np.matrix(Σ)
x_hat = np.matrix([0.5, -0.5]).T
# == Define the matrices G and R from the equation y = G x + N(0, R) == #
G = [[1, 0], [0, 1]]
G = np.matrix(G)
R = 0.5 * Σ
# == The matrices A and Q == #
A = [[1.0, 0], [0, 1.0]]
A = np.matrix(A)
Q = 0.3 * Σ
# == The observed value of y == #
y = np.matrix([2.3, -1.9]).T
# == Set up grid for plotting == #
x_grid = np.linspace(-1.5, 2.9, 100)
y_grid = np.linspace(-3.1, 1.7, 100)
X, Y = np.meshgrid(x_grid, y_grid)
def gen_gaussian_plot_vals(μ, C):
"Z values for plotting the bivariate Gaussian N(μ, C)"
m_x, m_y = float(μ[0]), float(μ[1])
s_x, s_y = np.sqrt(C[0, 0]), np.sqrt(C[1, 1])
s_xy = C[0, 1]
return bivariate_normal(X, Y, s_x, s_y, m_x, m_y, s_xy)
fig, ax = plt.subplots(figsize=(10, 8))
ax.grid()
# Plot the figure
# Density 1
Z = gen_gaussian_plot_vals(x_hat, Σ)
cs1 = ax.contour(X, Y, Z, 6, colors="black")
ax.clabel(cs1, inline=1, fontsize=10)
# Density 2
#<NAME>
K = Σ * G.T * linalg.inv(G * Σ * G.T + R)
# Update the state estimate
x_hat_F = x_hat + K*(y - G * x_hat)
#update covariance estimation
Σ_F = Σ - K * G * Σ
Z_F = gen_gaussian_plot_vals(x_hat_F, Σ_F)
cs2 = ax.contour(X, Y, Z_F, 6, colors="black")
ax.clabel(cs2, inline=1, fontsize=10)
# Density 3
# Predict next state of the feature with the last state and predicted motion
#https://stackoverflow.com/questions/14058340/adding-noise-to-a-signal-in-python
new_x_hat = A * x_hat_F
# print(new_x_hat)
#predict next covariance
new_Σ = A * Σ_F * A.T + Q
new_Z = gen_gaussian_plot_vals(new_x_hat, new_Σ)
cs3 = ax.contour(X, Y, new_Z, 6, colors="black")
ax.clabel(cs3, inline=1, fontsize=10)
ax.contourf(X, Y, new_Z, 6, alpha=0.6, cmap=cm.jet)
ax.text(float(y[0]), float(y[1]), "$y$", fontsize=20, color="black")
plt.show()
dt = 33.3e-3
#state update matrices
A = np.matrix( ((1, 0, dt, 0),(0, 1, 0, dt),(0, 0, 1, 0),(0, 0, 0, 1)) )
Q = np.matrix( (30, 68, 0, 0) ).transpose()
B = np.matrix( ((dt**2/2),(dt**2/2), dt, dt)).transpose()
C = np.matrix( ((1,0,0,0),(0,1,0,0)) ) #this is our measurement function C, that we apply to the state estimate Q to get our expect next/new measurement
Q_estimate = Q
u = .005 #define acceleration magnitude
marker_noise_mag = .1; #process noise: the variability in how fast the Hexbug is speeding up (stdv of acceleration: meters/sec^2)
tkn_x = 1; #measurement noise in the horizontal direction (x axis).
tkn_y = 1; #measurement noise in the horizontal direction (y axis).
Ez = np.matrix(((tkn_x,0),(0,tkn_y)))
Ex = np.matrix( ((dt**4/4,0,dt**3/2,0),(0,dt**4/4,0,dt**3/2),(dt**3/2,0,dt**2,0),(0,dt**3/2,0,dt**2)) )*marker_noise_mag**2# Ex convert the process noise (stdv) into covariance matrix
P = Ex; # estimate of initial Hexbug position variance (covariance matrix)
# Predict next state of the Hexbug with the last state and predicted motion.
Q_estimate = A*Q_estimate + B*u;
# predic_state = [predic_state; Q_estimate(1)] ;
# predict next covariance
P = A*P*A.T + Ex;
# predic_var = [predic_var; P] ;
# predicted Ninja measurement covariance
# Kalman Gain
K = P*C.T*linalg.inv(C*P*C.T + Ez);
# Update the state estimate
x_avg = 32
y_avg = 70
Q_loc_meas = np.matrix( (x_avg, y_avg) ).transpose()
Q_estimate = Q_estimate + K * (Q_loc_meas - C*Q_estimate);
print(Q_estimate)
# update covariance estimation.
P = (np.identity(4) - K*C)*P;
import csv
float_list = [1.13, 0.25, 3.28]
# with open('ANN_0.csv', "w") as file:
# writer = csv.writer(file, delimiter=',')
# writer.writerow(Ez)
outfile = open('./ANN_0.csv','w')
writer=csv.writer(outfile)
writer.writerow(float_list)
# writer.writerow(['SNo', 'States', 'Dist', 'Population'])
# writer.writerows(list_of_rows)
writer.writerow(float_list)
writer.writerow(float_list)
writer.writerow(float_list)
writer.writerow(float_list) | StarcoderdataPython |
4847475 | <reponame>ryankim5/burglar-alarm
from burglar_alarm import lcd, Keypad, wait, distance, buzzer
from datetime import datetime
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
# MatrixKeypad Settings
ROWS = 4
COLS = 4
KEYS = [
'1', '2', '3', 'A',
'4', '5', '6', 'B',
'7', '8', '9', 'C',
'*', '0', '#', 'D']
ROWSPINS = [12, 16, 18, 22]
COLSPINS = [19, 15, 13, 11]
DEBOUNCE_TIME = 50
# arming settings
armed = False
password = ""
inputed_password = ""
mode = ""
# Distance Sensor Settings
trigPin = 40
echoPin = 37
MAX_DISTANCE = 220
timeOut = MAX_DISTANCE * 60
# Buzzer Settings
is_buzzing = False
buzzerPin = 29
def loop():
global password, armed, mode, inputed_password, is_buzzing, passwd
display = lcd.LCD((3, 1), (16, 2))
keypad = Keypad.MatrixKeypad(
DEBOUNCE_TIME, ROWS, COLS, KEYS, ROWSPINS, COLSPINS)
distanceSensor = distance.DistanceSensor(
echoPin, trigPin, MAX_DISTANCE, timeOut)
bzr = buzzer.Buzzer(buzzerPin)
if armed:
display.display("ARMED")
else:
display.display("UNARMED")
while True:
cm_far = distanceSensor.getDistance()
key_pressed = keypad.findPressedKey()
# if key_pressed:
# display.display("KEYPAD PRESSED")
# display.display(f"KEY: {key_pressed}", (0, 1), clear=False)
# break
if key_pressed == "A":
mode = "a"
if key_pressed == "B":
mode = "b"
if mode == "a":
display.display("SECURITY MODE")
while True:
inputed_password = ""
if mode != "a":
break
key_pressed = keypad.findPressedKey()
if key_pressed == "*":
if not armed:
display.display("Enter your")
display.display("new passcode...", (0, 1), clear=False)
while True:
if mode != "a":
break
key_pressed = keypad.findPressedKey()
if key_pressed in ["A", "B", "C", "D"]:
display.display("Numbers Only")
elif key_pressed == "#":
if not password:
display.display("Passphrase should")
display.display(
"be not empty...", (0, 1), clear=False)
else:
armed = True
mode = ""
display.display(
"ARMED" if armed else "UNARMED")
else:
if key_pressed:
password += key_pressed
else:
display.display("Please enter")
display.display("passphrase...", (0, 1), clear=False)
while True:
if mode != "a":
break
key_pressed = keypad.findPressedKey()
if key_pressed in ["A", "B", "C", "D"]:
display.display("Numbers Only")
elif key_pressed == "#":
if not password:
display.display("Passphrase should")
display.display(
"be not empty...", (0, 1), clear=False)
else:
if inputed_password == password:
armed = False
mode = ""
display.display(
"ARMED" if armed else "UNARMED")
else:
mode = ""
display.display(
"WRONG PASS" + "-" + "ARMED" if armed else "UNARMED")
else:
if key_pressed:
inputed_password += key_pressed
elif mode == "b":
inputed_password = ""
if not is_buzzing:
msg = 'ARMED' if armed else 'UNARMED'
display.display(f"BZR NT-{msg}")
mode = ""
else:
display.display("Password...")
while True:
if mode != "b":
break
key_pressed = keypad.findPressedKey()
if key_pressed in ["A", "B", "C", "D"]:
display.display("Numbers Only")
elif key_pressed == "#":
if not password:
display.display("Passphrase should")
display.display(
"be not empty...", (0, 1), clear=False)
else:
if inputed_password == password:
mode = ""
display.display("BZR OFF")
is_buzzing = False
else:
mode = ""
display.display(
"WRONG PASS" + "-" + "ARMED" if armed else "UNARMED")
else:
if key_pressed:
inputed_password += key_pressed
time_now = datetime.now()
hour, minute, second = (
time_now.hour, time_now.minute, time_now.second)
hour, minute, second = str(hour), str(minute), str(second)
if len(hour) == 1:
hour = "0" + hour
if len(minute) == 1:
minute = "0" + minute
if len(second) == 1:
second = "0" + second
display.display(f"TIME: {hour}:{minute}:{second}", (0, 1), clear=False)
if armed:
if cm_far > 10:
is_buzzing = True
if is_buzzing:
bzr.on()
else:
bzr.off()
def destroy():
exit(1)
GPIO.cleanup()
if __name__ == "__main__":
try:
print("Running run.py...")
loop()
except KeyboardInterrupt:
destroy()
| StarcoderdataPython |
9610286 |
def render_template(gadget):
RN = "\r\n"
p = Payload()
p.header = "__METHOD__ __ENDPOINT__?cb=__RANDOM__ HTTP/1.1" + RN
p.header += gadget + RN
p.header += "Host: __HOST__" + RN
p.header += "User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.87 Safari/537.36" + RN
p.header += "Content-type: application/x-www-form-urlencoded; charset=UTF-8" + RN
p.header += "Content-Length: __REPLACE_CL__" + RN
return p
for i in range(0x1,0x21):
mutations["%02x-%02x-XX-XX"%(i,i)] = render_template("%cTransfer-Encoding%c: chunked"%(i,i))
mutations["%02x-XX-%02x-XX"%(i,i)] = render_template("%cTransfer-Encoding:%cchunked"%(i,i))
mutations["%02x-XX-XX-%02x"%(i,i)] = render_template("%cTransfer-Encoding: chunked%c"%(i,i))
mutations["XX-%02x-%02x-XX"%(i,i)] = render_template("Transfer-Encoding%c:%cchunked"%(i,i))
mutations["XX-%02x-XX-%02x"%(i,i)] = render_template("Transfer-Encoding%c: chunked%c"%(i,i))
mutations["XX-XX-%02x-%02x"%(i,i)] = render_template("Transfer-Encoding:%cchunked%c"%(i,i))
for i in range(0x7F,0x100):
mutations["%02x-%02x-XX-XX"%(i,i)] = render_template("%cTransfer-Encoding%c: chunked"%(i,i))
mutations["%02x-XX-%02x-XX"%(i,i)] = render_template("%cTransfer-Encoding:%cchunked"%(i,i))
mutations["%02x-XX-XX-%02x"%(i,i)] = render_template("%cTransfer-Encoding: chunked%c"%(i,i))
mutations["XX-%02x-%02x-XX"%(i,i)] = render_template("Transfer-Encoding%c:%cchunked"%(i,i))
mutations["XX-%02x-XX-%02x"%(i,i)] = render_template("Transfer-Encoding%c: chunked%c"%(i,i))
mutations["XX-XX-%02x-%02x"%(i,i)] = render_template("Transfer-Encoding:%cchunked%c"%(i,i)) | StarcoderdataPython |
6559750 | <reponame>JeremySun1224/Algorithms-and-Data_Structures
# -*- coding: utf-8 -*-
# -*- author: JeremySun -*-
# -*- dating: 21/4/7 -*-
"""用树结构实现文件系统,树往往是通过链式结构存储的"""
class Node(object):
def __init__(self, name, type='dir'):
self.name = name
self.type = type
self.children = []
self.parent = None
def __repr__(self):
return self.name
class FileSystemTree(object):
def __init__(self):
self.root = Node(name='/')
self.now = self.root
def mkdir(self, name):
if name[-1] != '/':
name += '/'
node = Node(name=name) # 创建文件夹
self.now.children.append(node)
node.parent = self.now
def ls(self):
return self.now.children
def cd(self, name):
if name[-1] != '/':
name += '/'
if name == '../':
self.now = self.now.parent
return None
for child in self.now.children:
if child.name == name:
self.now = child # 切换目录
return None
raise ValueError('Invalid Dir.')
if __name__ == '__main__':
tree = FileSystemTree()
tree.mkdir('var/')
tree.mkdir('bin/')
tree.mkdir('usr/')
tree.cd('bin/')
tree.mkdir('python/')
tree.cd('../')
print(tree.root.children)
print(tree.ls())
| StarcoderdataPython |
1871009 | # Copyright (C) 2020 University of Glasgow
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import email.header
import email.utils
import os
import re
import string
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from dataclasses import dataclass, field
from pathlib import Path
from ietfdata.datatracker import *
from ietfdata.mailarchive import *
# =============================================================================
@dataclass
class ParticipantEmail:
addr : str
names : List[str]
count : int
person : Optional[Person]
# =============================================================================
dt = DataTracker(cache_dir=Path("cache"))
archive = MailArchive(cache_dir=Path("cache"))
addrs = {}
lists = list(archive.mailing_list_names())
print("*** Parsing messages:")
index = 0
for ml_name in lists:
index += 1
print(F"{index:5d} /{len(lists):5d} {ml_name:40}", end="")
for msg_id, msg in archive.mailing_list(ml_name).messages():
try:
n, e = email.utils.parseaddr(msg.message["from"])
if e is not "" and e not in addrs:
addrs[e] = ParticipantEmail(e, [], 0, None)
if n is not "" and n not in addrs[e].names:
name = str(email.header.make_header(email.header.decode_header(n)))
addrs[e].names.append(name)
addrs[e].count += 1
except:
pass
print(F" {len(addrs):6}")
#if index == 10:
# break
print("")
print("*** Resolving email addresses:")
for e in addrs.values():
assert e.addr != ""
e.person = dt.person_from_email(e.addr)
if e.person is not None:
print(F" {e.addr:40} -> {e.person.id:8} (exact email match)")
else:
print(F" {e.addr:40}")
print("")
print("*** Resolving names:")
for e in addrs.values():
if e.person is None:
for name in e.names:
if name == "":
break
# Check against UTF-8 versions of names in datatracker:
for person in dt.people(name_contains = name):
if name == person.name:
e.person = person
print(F" {e.addr:40} -> {e.person.id:8} (UTF-8 name match: {name})")
elif F"Dr. {name}" == person.name:
e.person = person
print(F" {e.addr:40} -> {e.person.id:8} (UTF-8 name match: {name} <-> {person.name})")
if e.person is not None:
break
# Check against ASCII versions of names in datatracker:
for person in dt.people(ascii_contains = name):
if name == person.ascii:
e.person = person
print(F" {e.addr:40} -> {e.person.id:8} (ASCII name match: {name})")
elif F"Dr. {name}" == person.ascii:
e.person = person
print(F" {e.addr:40} -> {e.person.id:8} (ASCII name match: {name} <-> {person.name})")
if e.person is not None:
break
if e.person is None:
print(F" {e.addr:40}")
print("")
print("*** Resolving People:")
for person in dt.people():
pattern = re.compile("[A-Za-z]+ [A-Z]\. [A-Za-z]+")
if pattern.match(person.name):
split = person.name.split(" ")
person_name_initial = F"{split[0]} {split[2]}"
else:
person_name_initial = person.name
for e in addrs.values():
if e.person is None:
for name in e.names:
pattern = re.compile("[A-Za-z], [A-Za-z]")
if pattern.match(person.name):
# Convert "surname, name" into "name surname" and match
split = person.name.split(", ")
name_reversed = F"{split[1]} {split[0]}"
if name_reversed == person.name:
e.person = person
print(F" {e.addr:40} -> {e.person.id:8} (UTF-8 name match: {name} <-> {person.name})")
if name_reversed == person_name_initial:
e.person = person
print(F" {e.addr:40} -> {e.person.id:8} (UTF-8 name match: {name} <-> {person.name})")
# Does it match the name without a middle initial?
if name == person_name_initial:
e.person = person
print(F" {e.addr:40} -> {e.person.id:8} (UTF-8 name match: {name} <-> {person.name})")
total_resolved = 0
total_notfound = 0
email_resolved = 0
email_notfound = 0
print("")
print("*** Unresolved:")
for e in addrs.values():
if e.person is None:
email_notfound += 1
total_notfound += e.count
print(F" {e.addr:40} ({e.count})")
for name in e.names:
print(F" {name}")
e = dt.email(EmailURI(F"/api/v1/person/email/{e.addr.replace('/', '%40')}/"))
if e is not None:
for d in dt.documents_authored_by_email(e):
print(d.name)
else:
email_resolved += 1
total_resolved += e.count
print(F"Resolved: {total_resolved:8} messages; {email_resolved:6} addresses")
print(F"NotFound: {total_notfound:8} messages; {email_notfound:6} addresses")
data : Dict[str, Any] = {}
for e in addrs.values():
item : Dict[str, Any] = {}
item["addr"] = e.addr
item["names"] = e.names
item["count"] = e.count
if e.person is not None:
item["person_url"] = e.person.resource_uri.uri
item["person_id"] = e.person.id
item["person_name"] = e.person.name
item["person_name_from_draft"] = e.person.name_from_draft
item["person_ascii"] = e.person.ascii
item["person_ascii_short"] = e.person.ascii_short
else:
item["person_uri"] = ""
item["person_id"] = ""
item["person_name"] = ""
item["person_name_from_draft"] = ""
item["person_ascii"] = ""
item["person_ascii_short"] = ""
data[e.addr] = item
with open(Path("addrs.json"), "w") as outf:
json.dump(data, outf)
# =============================================================================
| StarcoderdataPython |
332577 | #!/usr/bin/env python3
import numpy as np
import os
import sys
from subprocess import run
from astropy.io import fits
import time
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-n' ,'--nnodes', type=int, default=30,
help='Number of nodes to take up')
parser.add_argument('-r', '--reverse', default=False, action='store_true',
help='Whether to run galaxies in reverse order')
parser.add_argument('-t', '--hours', type=int, default=12,
help='Max number of hours to run. Only applies to reverse')
parser.add_argument('--start', type=int, default=0,
help='Index of DRPall file to start at')
parser.add_argument('--stop', type=int, default=-1,
help='Index of DRPall file to stop at')
parser.add_argument('--clobber', action='store_true',
help='Overwrite existing slurm files')
parser.add_argument('--ad', action='store_true',
help='Use the plateifus from nirvana_test_sample.txt')
parser.add_argument('--nosmear', action='store_true',
help="Don't smear with PSF")
args = parser.parse_args()
if __name__ == '__main__':
if args.ad:
plates, ifus = np.genfromtxt('/home/bdigiorg/nirvana_testing_sample.txt').T
drp = {'plate':plates, 'ifudsgn':ifus}
else:
drp = fits.open('/home/bdigiorg/drpall-v3_1_1.fits')[1].data
lendrp = len(drp['plate'])
args.stop = lendrp if args.stop == -1 else args.stop
galpernode = (args.stop - args.start)//args.nnodes
print(galpernode, 'galaxies per file')
rootdir = '/data/users/bdigiorg/'
outdir = '/data/users/bdigiorg/fits/'
remotedir = '/data/users/bdigiorg/download/'
progressdir = '/data/users/bdigiorg/progress/'
plates = np.array(drp['plate'], dtype=int)[args.start:args.stop]
ifus = np.array(drp['ifudsgn'], dtype=int)[args.start:args.stop]
if args.reverse:
plates = plates[::-1]
ifus = ifus[::-1]
timelimit = f'#SBATCH --time={args.hours}:00:00'
else: timelimit = ''
for i in range(args.nnodes):
platesi = plates[galpernode * i:galpernode * (i+1)]
ifusi = ifus[galpernode * i:galpernode * (i+1)]
fname = f'/home/bdigiorg/slurms/nirvana_{platesi[0]}-{platesi[-1]}.slurm'
if os.path.isfile(fname):
if args.clobber:
os.remove(fname)
else:
raise FileExistsError(f'File already exists: {fname}')
with open(fname, 'a') as f:
f.write(f'\
#!/bin/bash \n\
#SBATCH --job-name={args.reverse * "r"}{platesi[0]}-{platesi[-1]}_nirvana \n\
#SBATCH --partition=windfall \n\
#SBATCH --account=windfall \n\
#SBATCH --mail-type=END,FAIL,{"REQUEUE" * (1-args.reverse)} \n\
#SBATCH --mail-user=<EMAIL> \n\
#SBATCH --ntasks=1 \n\
#SBATCH --cpus-per-task=40 \n\
#SBATCH --nodes=1 \n\
#SBATCH --requeue \n \n\
#SBATCH --output=/data/users/bdigiorg/logs/nirvana_{args.reverse * "r"}{platesi[0]}-{platesi[-1]}.log \n\
{timelimit} \n\
\
pwd; hostname; date \n\n\
\
module load python/3.9.0 \n\
module load nirvana/0.0.1dev \n\
module load fftw/3.3.8 \n\n\
\
export SAS_BASE_DIR=/data/users/bdigiorg/\n\
export MANGA_SPECTRO=/data/users/bdigiorg/mangawork/manga/spectro\n\
export MANGA_SPECTRO_REDUX=$MANGA_SPECTRO/redux/\n\
export MANGA_SPECTRO_ANALYSIS=$MANGA_SPECTRO/analysis/\n\n')
for j in range(len(platesi)):
progresspath = f'{progressdir}/{platesi[j]}/{ifusi[j]}/'
f.write(f'\
echo {platesi[j]} {ifusi[j]} gas \n\
mkdir {progressdir}/{platesi[j]}/ \n\
mkdir {progressdir}/{platesi[j]}/{ifusi[j]}/ \n\
touch {progresspath}/gas.start \n\
nirvana {platesi[j]} {ifusi[j]} -c 40 --root {rootdir} --dir {outdir} --remote {remotedir} {"--nosmear" * args.nosmear} > {progresspath}/gas.log 2> {progresspath}/gas.err\n\
touch {progressdir}/{platesi[j]}/{ifusi[j]}/gas.finish \n \n\
ln -s {outdir}/nirvana_{platesi[j]}-{ifusi[j]}_Gas.fits {progresspath}/gas.fits\n\
\
echo {platesi[j]} {ifusi[j]} stellar \n\
touch {progresspath}/stellar.start \n\
nirvana {platesi[j]} {ifusi[j]} -s -c 40 --root {rootdir} --dir {outdir} --remote {remotedir} {"--nosmear" * args.nosmear} > {progresspath}/stellar.log 2> {progresspath}/stellar.err\n\
touch {progresspath}/stellar.finish \n\
ln -s {outdir}/nirvana_{platesi[j]}-{ifusi[j]}_Stars.fits {progresspath}/stellar.fits\n\
date\n\n')
run(['sbatch',fname])
time.sleep(.1)
| StarcoderdataPython |
9621674 | from flask import Flask
from flask_cors import CORS
from flask_bcrypt import Bcrypt
from flask_jwt_extended import JWTManager, get_jwt
from flaskconfig import *
from util.logger import logger
app = Flask(__name__)
try:
app.config.from_object(configmap[app.config['ENV']]())
except KeyError:
logger.error('Improper ENV name for config')
exit()
# initialize db
with app.app_context():
from model import db_base
db_base.db.init_app(app)
db_base.db.create_all()
# enable CORS
CORS(app, resources={r'*': {'origins': '*'}}, supports_credentials=True)
bcrypt = Bcrypt(app)
jwt = JWTManager(app)
# test entry for development
if app.env == 'development':
from service.data.company_service import create_company
from service.data.user_service import create_user, read_user
from service.data.device_service import create_device
with app.app_context():
create_company({
'name': 'illo',
'subscription': True,
'expiration_date': datetime.datetime.now() + datetime.timedelta(days=365)
})
create_user(
userid='root',
password='<PASSWORD>',
username='root',
company='illo',
is_admin=True
)
root_user = read_user(userid='root')[0]
create_device(
model='testmodel',
serial='testserial',
company='illo',
owner='root',
ip='172.16.58.3',
is_deleted=False,
created_by=root_user,
edited_by=root_user,
)
if __name__ == '__main__':
logger.info('Loaded ENV:' + str(list(os.environ)))
with app.app_context():
from router.cv_router import cv_route
from router.task_callback_router import task_callback_route
from router import api
api.init_app(app)
app.register_blueprint(task_callback_route, url_prefix='/task_callback')
app.register_blueprint(cv_route, url_prefix='/cv')
app.run(host='0.0.0.0',
port=os.getenv('FLASK_RUN_PORT'),
debug=os.getenv('FLASK_DEBUG'))
| StarcoderdataPython |
12859524 | #!/bin/usr/python
# -*- coding:utf-8 -*-
# 628.三个数的最大乘积
class Solution:
def maximumProduct(self, nums):
if len(nums) == 3:
return nums[0] * nums[1] * nums[2]
elif len(nums) < 3:
return None
else:
z_num, f_num = [], []
for i in nums:
if i < 0:
f_num.append(i) # 负数列表
else:
z_num.append(i) # 正数列表
z_num.sort(reverse=True)
f_num.sort()
sum1, sum2 = 1, 1
if len(f_num) < 2:
return z_num[0] * z_num[1] * z_num[2]
elif len(z_num) < 2:
return f_num[0] * f_num[1] * z_num[0]
else:
sum2 *= f_num[0]
sum2 *= f_num[1]
sum2 *= z_num[0]
sum1 *= z_num[0]
sum1 *= z_num[1]
sum1 *= z_num[2]
return max(sum1, sum2)
s = Solution()
num = s.maximumProduct([-4, -3, -2, -1, 60])
print(num)
| StarcoderdataPython |
11284106 | import codecs
import os
import re
from setuptools import setup
HERE = os.path.abspath(os.path.dirname(__file__))
'''Next two functions borrowed from pip's setup.py'''
def read(*parts):
# intentionally *not* adding an encoding option to open
# see: https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
return codecs.open(os.path.join(HERE, *parts), 'r').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def read_long_description():
long_description = ""
with open('README.rst', 'r') as f:
long_description = f.read()
return long_description
setup(
author="<NAME>",
author_email="<EMAIL>",
name='image-diet2',
version=find_version('image_diet', '__init__.py'),
description='Remove unnecessary bytes from images',
long_description=read_long_description(),
url='https://github.com/samastur/image-diet2/',
platforms=['OS Independent'],
license='MIT License',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Utilities',
],
install_requires=[
'Django>=1.7',
'pyimagediet>=1.1.0',
],
include_package_data=True,
packages=['image_diet'],
zip_safe=False
)
| StarcoderdataPython |
9641100 | <reponame>LeLocTai/keypirinha-currency
# Keypirinha launcher (keypirinha.com)
import keypirinha as kp
import keypirinha_util as kpu
import keypirinha_net as kpnet
from .exchange import ExchangeRates, UpdateFreq
import re
import json
import traceback
import urllib.error
import urllib.parse
from html.parser import HTMLParser
class Currency(kp.Plugin):
"""
One-line description of your plugin.
This block is a longer and more detailed description of your plugin that may
span on several lines, albeit not being required by the application.
You may have several plugins defined in this module. It can be useful to
logically separate the features of your package. All your plugin classes
will be instantiated by Keypirinha as long as they are derived directly or
indirectly from :py:class:`keypirinha.Plugin` (aliased ``kp.Plugin`` here).
In case you want to have a base class for your plugins, you must prefix its
name with an underscore (``_``) to indicate Keypirinha it is not meant to be
instantiated directly.
In rare cases, you may need an even more powerful way of telling Keypirinha
what classes to instantiate: the ``__keypirinha_plugins__`` global variable
may be declared in this module. It can be either an iterable of class
objects derived from :py:class:`keypirinha.Plugin`; or, even more dynamic,
it can be a callable that returns an iterable of class objects. Check out
the ``StressTest`` example from the SDK for an example.
Up to 100 plugins are supported per module.
More detailed documentation at: http://keypirinha.com/api/plugin.html
"""
API_URL = "http://query.yahooapis.com/v1/public/yql"
API_USER_AGENT = "Mozilla/5.0"
ITEMCAT_CONVERT = kp.ItemCategory.USER_BASE + 1
ITEMCAT_UPDATE = kp.ItemCategory.USER_BASE + 2
ITEMCAT_RESULT = kp.ItemCategory.USER_BASE + 3
DEFAULT_SECTION = 'defaults'
DEFAULT_ITEM_ENABLED = True
DEFAULT_UPDATE_FREQ = 'daily'
DEFAULT_ALWAYS_EVALUATE = True
DEFAULT_ITEM_LABEL = 'Convert Currency'
DEFAULT_CUR_IN = 'USD'
DEFAULT_CUR_OUT = 'EUR, GBP'
default_item_enabled = DEFAULT_ITEM_ENABLED
update_freq = UpdateFreq(DEFAULT_UPDATE_FREQ)
always_evaluate = DEFAULT_ALWAYS_EVALUATE
default_item_label = DEFAULT_ITEM_LABEL
default_cur_in = DEFAULT_CUR_IN
default_cur_out = DEFAULT_CUR_OUT
ACTION_COPY_RESULT = 'copy_result'
ACTION_COPY_AMOUNT = 'copy_amount'
broker = None
def __init__(self):
super().__init__()
def on_start(self):
self._read_config()
actions = [
self.create_action(
name=self.ACTION_COPY_AMOUNT,
label="Copy result",
short_desc="Copy the result to clipboard"),
self.create_action(
name=self.ACTION_COPY_RESULT,
label="Copy result with code",
short_desc="Copy result (with code) to clipboard")]
self.set_actions(self.ITEMCAT_RESULT, actions)
def on_catalog(self):
catalog = []
if self.default_item_enabled:
catalog.append(self._create_translate_item(
label=self.default_item_label))
self.set_catalog(catalog)
self._update_update_item()
def on_suggest(self, user_input, items_chain):
suggestions = []
if items_chain and items_chain[-1].category() == self.ITEMCAT_RESULT:
self.set_suggestions(items_chain, kp.Match.ANY, kp.Sort.NONE)
return
if not items_chain or items_chain[-1].category() != self.ITEMCAT_CONVERT:
if not self.always_evaluate:
return
query = self._parse_and_merge_input(user_input, True)
if 'from_cur' not in query and 'to_cur' not in query:
return
if self.should_terminate(0.25):
return
try:
query = self._parse_and_merge_input(user_input)
if not query['from_cur'] or not query['to_cur'] or not user_input:
return
if self.broker.tryUpdate():
self._update_update_item()
if self.broker.error:
suggestions.append(self.create_error_item(
label=user_input,
short_desc="Webservice failed ({})".format(self.broker.error)))
else:
results = self.broker.convert(query['amount'], query['from_cur'], query['to_cur'])
for result in results:
suggestions.append(self._create_result_item(
label=result['title'],
short_desc= result['source'] + ' to ' + result['destination'],
target=result['title']
))
except Exception as exc:
suggestions.append(self.create_error_item(
label=user_input,
short_desc="Error: " + str(exc)))
self.set_suggestions(suggestions, kp.Match.ANY, kp.Sort.NONE)
# else
# suggestions = [self._create_keyword_item(
# label=user_input,
# short_desc="Convert values between currencies")]
# self.set_suggestions(suggestions)
def on_execute(self, item, action):
if item.category() == self.ITEMCAT_UPDATE:
self.broker.update()
self._update_update_item()
return
if item.category() != self.ITEMCAT_RESULT:
return
# browse or copy url
if action and action.name() == self.ACTION_COPY_AMOUNT:
amount = item.data_bag()[:-4]
kpu.set_clipboard(amount)
# default action: copy result (ACTION_COPY_RESULT)
else:
kpu.set_clipboard(item.data_bag())
def on_activated(self):
pass
def on_deactivated(self):
pass
def on_events(self, flags):
if flags & (kp.Events.APPCONFIG | kp.Events.PACKCONFIG |
kp.Events.NETOPTIONS):
self._read_config()
self.on_catalog()
def _parse_and_merge_input(self, user_input=None, empty=False):
if empty:
query = {}
else:
query = {
'from_cur': self.default_cur_in,
'to_cur': self.default_cur_out,
'amount': 1
}
# parse user input
# * supported formats:
# <amount> [[from_cur][( to | in |:)to_cur]]
if user_input:
user_input = user_input.lstrip()
query['terms'] = user_input.rstrip()
symbolRegex = r'[a-zA-Z]{3}(,\s*[a-zA-Z]{3})*'
m = re.match(
(r"^(?P<amount>\d*([,.]\d+)?)?\s*" +
r"(?P<from_cur>" + symbolRegex + ")?\s*" +
r"(( to | in |:)\s*(?P<to_cur>" + symbolRegex +"))?$"),
user_input)
if m:
if m.group('from_cur'):
from_cur = self.broker.validate_codes(m.group('from_cur'))
if from_cur:
query['from_cur'] = from_cur
if m.group('to_cur'):
to_cur = self.broker.validate_codes(m.group('to_cur'))
if to_cur:
query['to_cur'] = to_cur
if m.group('amount'):
query['amount'] = float(m.group('amount').rstrip().replace(',', '.'))
return query
def _update_update_item(self):
self.merge_catalog([self.create_item(
category=self.ITEMCAT_UPDATE,
label='Update Currency',
short_desc='Last updated at ' + self.broker.last_update.isoformat(),
target="updatecurrency",
args_hint=kp.ItemArgsHint.FORBIDDEN,
hit_hint=kp.ItemHitHint.IGNORE)])
def _create_translate_item(self, label):
def joinCur(lst):
if len(lst) == 1:
return lst[0]
else:
return ', '.join(lst[:-1]) + ' and ' + lst[-1]
desc = 'Convert from {} to {}'.format(joinCur(self.default_cur_in), joinCur(self.default_cur_out))
return self.create_item(
category=self.ITEMCAT_CONVERT,
label=label,
short_desc=desc,
target="convertcurrency",
args_hint=kp.ItemArgsHint.REQUIRED,
hit_hint=kp.ItemHitHint.NOARGS)
def _create_result_item(self, label, short_desc, target):
return self.create_item(
category=self.ITEMCAT_RESULT,
label=label,
short_desc=short_desc,
target=target,
args_hint=kp.ItemArgsHint.REQUIRED,
hit_hint=kp.ItemHitHint.NOARGS,
data_bag=label)
def _read_config(self):
def _warn_cur_code(name, fallback):
fmt = (
"Invalid {} value in config. " +
"Falling back to default: {}")
self.warn(fmt.format(name, fallback))
settings = self.load_settings()
self.always_evaluate = settings.get_bool(
"always_evaluate",
section=self.DEFAULT_SECTION,
fallback=self.DEFAULT_ALWAYS_EVALUATE)
# [default_item]
self.default_item_enabled = settings.get_bool(
"enable",
section=self.DEFAULT_SECTION,
fallback=self.DEFAULT_ITEM_ENABLED)
self.default_item_label = settings.get_stripped(
"item_label",
section=self.DEFAULT_SECTION,
fallback=self.DEFAULT_ITEM_LABEL)
update_freq_string = settings.get_enum(
'update_freq',
section=self.DEFAULT_SECTION,
fallback=self.DEFAULT_UPDATE_FREQ,
enum = [freq.value for freq in UpdateFreq]
)
self.update_freq = UpdateFreq(update_freq_string)
path = self.get_package_cache_path(create=True)
self.broker = ExchangeRates(path, self.update_freq, self)
# default input currency
input_code = settings.get_stripped(
"input_cur",
section=self.DEFAULT_SECTION,
fallback=self.DEFAULT_CUR_IN)
validated_input_code = self.broker.validate_codes(input_code)
if not validated_input_code:
_warn_cur_code("input_cur", self.DEFAULT_CUR_IN)
self.default_cur_in = self.broker.format_codes(self.DEFAULT_CUR_IN)
else:
self.default_cur_in = validated_input_code
# default output currency
output_code = settings.get_stripped(
"output_cur",
section=self.DEFAULT_SECTION,
fallback=self.DEFAULT_CUR_OUT)
validated_output_code = self.broker.validate_codes(output_code)
if not validated_output_code:
_warn_cur_code("output_cur", self.DEFAULT_CUR_OUT)
self.default_cur_out = self.broker.format_codes(self.DEFAULT_CUR_OUT)
else:
self.default_cur_out = validated_output_code
| StarcoderdataPython |
5018053 |
a.insert(0, 'x')
b.append(a.pop())
del c[4]
| StarcoderdataPython |
5034003 | from output.models.nist_data.atomic.long.schema_instance.nistschema_sv_iv_atomic_long_min_exclusive_2_xsd.nistschema_sv_iv_atomic_long_min_exclusive_2 import NistschemaSvIvAtomicLongMinExclusive2
__all__ = [
"NistschemaSvIvAtomicLongMinExclusive2",
]
| StarcoderdataPython |
3237843 | <filename>volt_err_corr_sim_data.py
# -*- coding: utf-8 -*-
"""
Created by <NAME>
This script aims to reproduce the results shown in Fig.2 (open symbols)
from Ref. Specifically, currents generated from PKA-treated GluR6 receptors.
Simulated data are generated importing "exponential.py" and are corrected
following the paper guidelines.
Reference
Traynelis SF (1998) Software-based correction of single compartment series
resistance errors. J Neurosci Methods 86:25–34
"""
# built-in Python library
import numpy as np
import matplotlib.pyplot as plt
# Custom files
import exponential as exp
# Data structure is as follow:
# <raw/new>_data variables are two-dimentional arrays:
# 1. time datapoints are on the first dimension (<raw/new>_data[0])
# 2. current datapoints are on the second dimension (<raw/new>_data[1])
# <new>_currents variable is a one-dimensional array:
# 1. current datapoints
######## SAMPLE DATA GENERATION ########
# Simulated unfiltered data from Fig.2 open symbols.
# Time in ms and currents in pA.
raw_data = exp.simulated_data()
# Print the values of time and current every 5 (total 11)
#print(raw_data[0][1::5])
#print(raw_data[1][1::5])
# Print the shape of the array
#print(np.shape(raw_data))
# Transform the time values from ms to seconds
raw_data[0] /= 1000 # from ms to seconds
#print(raw_data[0][1::5])
# Transform the current values from pA to Amperes
raw_data[1] /= 1000000000000 # from pA to Amperes
# Print the transformed values of current every 5 (total 11)
#print(raw_data[1][1::5])
# What is the time interval between datapoints?
#for i in range(1, np.shape(raw_data)[1]):
# print(i)
# print((raw_data[0][i] - raw_data[0][i-1]))
######## DRAW DATA ########
# Draw single datapoints and line through datapoints in red
plt.plot(raw_data[0], raw_data[1], 'r-o')
plt.show()
######## VOLTAGE CLAMP ERROR CORRECTION ########
# This section contains the C code shown in the paper at page 32
# We simply converted the code to Python changing some variables name
# Required parameters
# Number of datapoints
num_data_points = np.shape(raw_data)[1]
# Holding potential in Volts from Fig. 2
v_hold = 0.06
# Reversal potential in Volts
v_rev = 0.4
# Series resistance in Ohms from Fig. 2
Rs = 60000000
# Cell capacitance in Farads from Fig. 2
Cm = 6 / 1000000000000
# Time interval in seconds
adinterval = 0.00003
# Initialize an empty array filled with 0 to store the corrected data
new_currents = np.zeros_like(raw_data[1])
######## Start of computation to correct series resistance error ########
######## Computation concerning the first datapoint ########
# Calculate the value of actual clamped voltage for the first data point
volt_last_point = v_hold - raw_data[1][0] * Rs
# This if is necessary to avoid division by 0
if (volt_last_point != v_rev):
# Calculate the correction factor (between 0 and 1)
v_correct = 1 * (1 - (v_hold - v_rev) / (volt_last_point - v_rev))
# In case the demonitor is equal to 0, no correction is applied
else:
v_correct = 0
# First data point of corrected current
first_value_current = raw_data[1][0] - raw_data[1][0] * v_correct
# Store first datapoint in the array
new_currents[0] = first_value_current
# Iterate on all subsequent current datapoints and perform correction
for i in range(1, num_data_points):
# Calculate the value of actual clamped voltage for the data point
volt_this_point = v_hold - raw_data[1][i] * Rs
# This if is necessary to avoid division by 0
if (volt_this_point != v_rev):
# Calculate the correction factor (between 0 and 1)
v_correct = 1 * (1 - (v_hold - v_rev) / (volt_this_point - v_rev))
# In case the demonitor is equal to 0, no correction is applied
else:
v_correct = 0
# Correction for capacitive current
Icap = Cm * (volt_this_point - volt_last_point) / adinterval
# Apply a digital filter with cutoff frequency of 20 kHz to Icap
# Cutoff frequency has been chosen arbitrarly
Icap *= 1 - np.exp(-2 * 3.14 * adinterval * 20000)
# Correct raw data for capacitive current
new_currents[i-1] = raw_data[1][i-1] - 1 * Icap
# Correct raw data for resistive current
new_currents[i-1] = raw_data[1][i-1] * v_correct
# Update the value of voltage for the next data point
volt_last_point = volt_this_point
# Add x-axis
new_data = np.stack((raw_data[0], new_currents), axis = 0)
#print(raw_data)
#print(new_data)
# Draw
plt.plot(raw_data[0], raw_data[1], 'r-o')
plt.plot(new_data[0], new_data[1], 'b-o')
plt.show()
# Draw subset of data to appreciate the effect of series resistance
# on time to peak
plt.plot(raw_data[0][1:50], raw_data[1][1:50], 'r-o')
plt.plot(new_data[0][1:50], new_data[1][1:50], 'b-o')
plt.show()
#WRITING TO FILE SECTION
#perform check that both arrays have the same size
#if len(currents) != len(new_currents):
# print('Error: arrays have different length')
#
##write to file
##not corrected currents
#file = open('currents_' + abf_file_name + '_notcorrected', 'w')
#for line in currents:
# file.write(str(line))
# file.write('\n')
#file.close()
##corrected currents
#file = open('currents_' + abf_file_name + '_corrected', 'w')
#for line in new_currents:
# file.write(str(line))
# file.write('\n')
#file.close()
| StarcoderdataPython |
11255939 | <reponame>hoppfull/Legacy-Python
import numpy as np
def myLinear_Regression(DATASETlinreg, parameters, iterations, learningrate = 0.1):
#Loading data into appropriate variables:
m = DATASETlinreg.shape[0] #Number of training examples
n = DATASETlinreg.shape[1] #Number of features
y_raw = np.array([ DATASETlinreg[:,0] ]).transpose() #target variables
x_raw = np.column_stack(( np.array([ np.ones(m) ]).transpose(), DATASETlinreg[:,1:] )) #feature variables
#feature scaling:
feature_scale = m/np.absolute(x_raw).sum(0)
target_scale = m/np.absolute(y_raw).sum(0)
X = x_raw * feature_scale
Y = y_raw * target_scale
for i in range(iterations):
J = np.zeros(n)
for j in range(n):
J[j] = sum( ( np.column_stack(( (parameters * X), -Y )) ).sum(1) * X[:, j] )/m
parameters = parameters - (learningrate * J)
parameters = parameters * feature_scale / target_scale
return parameters
def myLogical_Regression(DATASETlogreg, parameters, iterations, learningrate = 0.5):
#Loading data into appropriate variables:
m = DATASETlogreg.shape[0] #Number of training examples
n = DATASETlogreg.shape[1] #Number of features
e = 2.71828182846 #Eulers number for use in the sigmoid function
Y = np.array([ DATASETlogreg[:,0] ]).transpose() #target variables
x_raw = np.column_stack(( np.array([ np.ones(m) ]).transpose(), DATASETlogreg[:,1:] )) #feature variables
#feature scaling:
feature_scale = m/np.absolute(x_raw).sum(0)
X = x_raw * feature_scale
for i in range(iterations):
J = np.zeros(n)
for j in range(n):
J[j] = sum( np.column_stack(( 1/(1+np.e**(-parameters * X).sum(1)), -Y)).sum(1) * X[:, j] )/m
parameters = parameters - (learningrate * J)
parameters = parameters * feature_scale
return parameters
| StarcoderdataPython |
4992945 | """Blockly Games: Turtle/Movie to Reddit Submission
Copyright 2014 Google Inc.
https://github.com/google/blockly-games
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""Store XML with App Engine. Store thumbnail with Memcache. Send to Reddit.
"""
__author__ = "<EMAIL> (<NAME>)"
import base64
import re
import storage
from google.appengine.api import images
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
class Reddit(webapp.RequestHandler):
def get(self):
url = re.sub(r"\w+-reddit\?", "thumb?", self.request.url)
app = re.search(r"(\w+)-reddit\?", self.request.url).group(1)
uuid = self.request.query_string
self.response.out.write("""
<html>
<head>
<meta property="og:image" content="%s" />
<meta http-equiv="refresh" content="0; url=/%s?level=10#%s">
</head>
<body>
<p>Loading Blockly Games : %s : %s...</p>
</body>
</html>
""" % (url, app, uuid, app.title(), uuid))
def post(self):
xml = str(self.request.get("xml"))
thumb = str(self.request.get("thumb"))
uuid = storage.xmlToKey(xml)
memcache.add("THUMB_" + uuid, thumb, 3600)
self.redirect("https://www.reddit.com/r/BlocklyGames/submit?url=%s?%s"
% (self.request.url, uuid))
class Thumb(webapp.RequestHandler):
def get(self):
uuid = self.request.query_string
thumbnail = memcache.get("THUMB_" + uuid)
if not thumbnail:
raise Exception("Thumbnail not found: %s" % uuid)
header = "data:image/png;base64,"
if not thumbnail.startswith(header):
raise Exception("Bad header: %s" % thumbnail[:len(header)])
thumbnail = base64.b64decode(thumbnail[len(header):])
# Resize image to prove that this is an image, not random content.
# Tested to verify that this throws BadImageError if not a valid PNG.
thumbnail = images.resize(thumbnail, 100, 100)
self.response.headers["Content-Type"] = "image/png"
self.response.out.write(thumbnail)
application = webapp.WSGIApplication([(r"/\w+-reddit", Reddit),
("/thumb", Thumb),
],
debug=True)
run_wsgi_app(application)
| StarcoderdataPython |
294742 | <filename>ibata/downloaders/TransactionDownloaderResolver.py
import sys
from ibata.downloaders.FioTransactionDownloader import FioTransactionDownloader
class TransactionDownloaderResolver:
"""
Class that resolves which TransactionDownloader should be used. If new TransactionDownloader is created
it must be inserted into BANKS here.
"""
BANKS = {
"FIO": FioTransactionDownloader
}
@staticmethod
def get_transaction_downloader(config, bank):
"""
Returns TransactionDownloader based on bank name
:param config: Config file name
:param bank: Name of bank
:return: TransactionDownloader for given Bank
"""
downloader = None
try:
downloader = TransactionDownloaderResolver.BANKS[bank]
except KeyError as e:
print("Not valid bank name", file=sys.stderr)
exit(1)
return downloader(config)
| StarcoderdataPython |
11303723 | <reponame>coderMaruf/leetcode-1
'''
Description:
Given an integer array arr, and an integer target, return the number of tuples i, j, k such that i < j < k and arr[i] + arr[j] + arr[k] == target.
As the answer can be very large, return it modulo 109 + 7.
Example 1:
Input: arr = [1,1,2,2,3,3,4,4,5,5], target = 8
Output: 20
Explanation:
Enumerating by the values (arr[i], arr[j], arr[k]):
(1, 2, 5) occurs 8 times;
(1, 3, 4) occurs 8 times;
(2, 2, 4) occurs 2 times;
(2, 3, 3) occurs 2 times.
Example 2:
Input: arr = [1,1,2,2,2,2], target = 5
Output: 12
Explanation:
arr[i] = 1, arr[j] = arr[k] = 2 occurs 12 times:
We choose one 1 from [1,1] in 2 ways,
and two 2s from [2,2,2,2] in 6 ways.
Constraints:
3 <= arr.length <= 3000
0 <= arr[i] <= 100
0 <= target <= 300
'''
from typing import List
from collections import Counter
class Solution:
def threeSumMulti(self, arr: List[int], target: int) -> int:
## dictionary
# key : distinct number
# value : occurrence of distinct number
counts = Counter(arr)
# total method count, and modulo constant
result, constant = 0, (10 ** 9 + 7)
# find the method count where i + j + k = target
# all numbers are bounded in interval [0, 100]
for i in range(101):
if counts[i] == 0:
# number i doesn't show up in input array
continue
j, k = i, 100
# find j, k with two-pointers
while j <= k:
if j + k > target - i:
# j + k is too large, try to make it smaller
k -= 1
elif j + k < target - i:
# j + k is too small, try to make it larger
j += 1
else:
# update result with different combination cases
if i == j == k:
# all repeated: (i, j, k) = (i, i, i)
result += counts[i] * (counts[i] - 1) * (counts[i] - 2) // 6
elif i == j:
# i, j repeated: (i, j, k) = (i, i, k)
result += counts[i] * (counts[i] - 1) * counts[k] // 2
elif j == k:
# i, k repeated: (i, j, k) = (i, j, j)
result += counts[i] * counts[j] * (counts[j] - 1) // 2
else:
# all distinct: (i, j, k)
result += counts[i] * counts[j] * counts[k]
# update two pointers for j, k
j += 1
k -= 1
return result % constant
## n : the length of inpu array
## Time Compleity: O( n )
#
# The overhead in time is the cost of nested loop with O( n ) * O( 100 ) = O( n )
## Space Complexity: O( n )
#
# The overhead in space is the storage for dictionary, which is of O( n )
import unittest
class Testing( unittest.TestCase ):
def setUp(self) -> None:
self.solver = Solution().threeSumMulti
def test_case_1( self ):
result = self.solver( arr = [1,1,2,2,3,3,4,4,5,5], target = 8)
self.assertEqual(result, 20)
def test_case_1( self ):
result = self.solver( arr = [1,1,2,2,2,2], target = 5)
self.assertEqual(result, 12)
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
149498 | <filename>utils/findpeaks/callfindpeaksdialog.py
from PyQt5.QtWidgets import QDialog
from PyQt5.QtCore import pyqtSlot, QUrl
from PyQt5.QtGui import QDesktopServices
from utils.findpeaks.findpeaksdialog import Ui_findpeaksdialog
from utils.findpeaks.lib import *
detect_peaks_help_url = "https://nbviewer.jupyter.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb"
Janko_Slavic_findpeaks_help_url = "https://github.com/jankoslavic/py-tools/blob/master/findpeaks/Findpeaks%20example.ipynb"
tony_beltramelli_detect_peaks_help_url = "https://github.com/MonsieurV/py-findpeaks/blob/master/tests/libs/tony_beltramelli_detect_peaks.py"
parameters_detect_peaks = {"Minimum distance": 1, "Minimum height": 0.2, "Relative threshold": 0}
parameters_Janko_Slavic_findpeaks = {"spacing": 1, "limit": 0.2}
parameters_tony_beltramelli_detect_peaks = {"threshold": 0.8}
class findpeaksdialog(QDialog, Ui_findpeaksdialog):
def __init__(self, *args, **kwargs):
super(findpeaksdialog, self).__init__(*args, **kwargs)
self.setupUi(self)
self.comboBox.currentIndexChanged.connect(self.selectionchange)
# 获取标签和数字框数组方便后期调整
self.parameters_lable_list = [self.parameter1label,
self.parameter2label,
self.parameter3label,
self.parameter4label]
self.parameters_values_list = [self.parameter1doubleSpinBox,
self.parameter2doubleSpinBox,
self.parameter3doubleSpinBox,
self.parameter4doubleSpinBox]
# 初始化默认选择的项目参数
self.selectionchange()
# 初始化一些参数供RSA主窗体使用
self.peaksmarklist = []
def selectionchange(self):
current_selection = self.comboBox.currentText()
if current_selection == "detect_peaks":
self.set_parameters(parameters_detect_peaks)
if current_selection == "Janko_Slavic_findpeaks":
self.set_parameters(parameters_Janko_Slavic_findpeaks)
if current_selection == "tony_beltramelli_detect_peaks":
self.set_parameters(parameters_tony_beltramelli_detect_peaks)
def set_parameters(self, parameters):
# 重置所有选项到可用状态
for i in range(0, len(self.parameters_lable_list), 1):
self.parameters_lable_list[i].setEnabled(True)
self.parameters_values_list[i].setEnabled(True)
# 填入参数
i = 0
for parameter_name, parameter_value in parameters.items():
self.parameters_lable_list[i].setText(parameter_name)
# 如果原始方法中对函数的初始值定义为None,则跳过
if parameter_value is not None:
self.parameters_values_list[i].setValue(parameter_value)
i += 1
# 使超出方法所需要的参数选项框关闭
if i <= len(self.parameters_lable_list):
for i in range(i, len(self.parameters_lable_list), 1):
self.parameters_lable_list[i].setText("Parameter{}".format(i+1))
self.parameters_lable_list[i].setEnabled(False)
self.parameters_values_list[i].setValue(0.00)
self.parameters_values_list[i].setEnabled(False)
def get_parameters(self):
parameters_lable_list_text = []
for lable in self.parameters_lable_list:
parameters_lable_list_text.append(lable.text())
parameters_values_list_values = []
for value in self.parameters_values_list:
if value.value() is None:
parameters_values_list_values.append(None)
else:
parameters_values_list_values.append(value.value())
data = dict(map(lambda x,y:[x,y], parameters_lable_list_text, parameters_values_list_values))
return data
def find_peaks(self, data):
parameters = self.get_parameters()
current_selection = self.comboBox.currentText()
peaks_index_list = []
if current_selection == "detect_peaks":
mph = parameters["Minimum height"]
mpd = parameters["Minimum distance"]
threshold = parameters["Relative threshold"]
peaks_index_list = detect_peaks(x=data, mph=mph, mpd=mpd, threshold=threshold)
if current_selection == "Janko_Slavic_findpeaks":
spacing = round(parameters["spacing"]) # 源代码中要求对这个数字必须是整数
limit = parameters["limit"]
peaks_index_list = Janko_Slavic_findpeaks(data=data, spacing=spacing, limit=limit)
if current_selection == "tony_beltramelli_detect_peaks":
threshold = parameters["threshold"]
peaks_index_list = tony_beltramelli_detect_peaks(signal=data, threshold=threshold)
return peaks_index_list
def clear(self):
self.textEdit.clear()
self.textEdit.setEnabled(False)
self.peaksmarklist.clear()
@pyqtSlot()
def on_helpbutton_clicked(self):
current_selection = self.comboBox.currentText()
if current_selection == "detect_peaks":
QDesktopServices.openUrl(QUrl(detect_peaks_help_url))
if current_selection == "Janko_Slavic_findpeaks":
QDesktopServices.openUrl(QUrl(Janko_Slavic_findpeaks_help_url))
if current_selection == "tony_beltramelli_detect_peaks":
QDesktopServices.openUrl(QUrl(tony_beltramelli_detect_peaks_help_url))
@pyqtSlot()
def on_closebutton_clicked(self):
self.close()
if __name__ == '__main__':
import sys
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
w = findpeaksdialog()
w.show()
sys.exit(app.exec_())
| StarcoderdataPython |
1691367 | #!/usr/bin/env python3
#
# MIT License
#
# (C) Copyright 2020-2022 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
import argparse
import os
import pathlib
import subprocess
import sys
import tempfile
outfile_dir = "/tmp"
outfile_base = "cmslogs.tar"
outfile = "%s/%s" % (outfile_dir, outfile_base)
cray_release = "/etc/cray-release"
motd = "/etc/motd"
opt_cray_tests = "/opt/cray/tests"
tmp_cray_tests = "/tmp/cray/tests"
cmsdev = "/usr/local/bin/cmsdev"
cksum_cmd_string = "cksum %s" % cmsdev
rpm_cmd_string = "rpm -qa"
class cmslogsError(Exception):
pass
def get_tmpfile(tempfile_list, **kwargs):
tnum, tpath = tempfile.mkstemp(dir="/tmp", **kwargs)
tempfile_list.append(tpath)
return tpath
def error_exit(msg):
sys.exit("ERROR: %s" % msg)
def create_collect_list(args, mylogfilepath, tempfile_list):
with open(mylogfilepath, "wt") as mylog:
collect_list = []
def printmylog(s):
print(s, file=mylog, flush=True)
def does_not_exist(thing):
printmylog("%s does not exist" % thing)
def skipping(thing):
printmylog("Skipping %s, as specified" % thing)
def collecting(thing):
printmylog("Will collect %s" % thing)
# [1:] to strip the leading / from the absolute path
collect_list.append(thing[1:])
def add_to_collection_list(thing, skip_arg):
if os.path.exists(thing):
if skip_arg:
skipping(thing)
else:
collecting(thing)
else:
does_not_exist(thing)
def record_cmd_to_file(cmdstring, **tmpfile_args):
cmdoutfilepath = get_tmpfile(tempfile_list=tempfile_list, **tmpfile_args)
printmylog("# %s > %s" % (cmdstring, cmdoutfilepath))
with open(cmdoutfilepath, "wt") as cmdoutfile:
print("# %s" % cmdstring, file=cmdoutfile, flush=True)
try:
subprocess.run(cmdstring.split(), stdout=cmdoutfile, stderr=subprocess.PIPE, check=True, universal_newlines=True)
except subprocess.CalledProcessError as e:
raise cmslogsError("%s command failed with return code %d: %s" % (cmdstring, e.returncode, e.stderr))
collecting(cmdoutfilepath)
cmdline=' '.join(sys.argv)
printmylog(cmdline)
printmylog("#"*len(cmdline))
add_to_collection_list(cray_release, args.no_cray_release)
add_to_collection_list(motd, args.no_motd)
add_to_collection_list(tmp_cray_tests, args.no_tmp_cray_tests)
if not args.no_opt_cray_tests_all:
if args.no_opt_cray_tests:
# Need to find log files in /opt/cray/tests
for path in pathlib.Path(opt_cray_tests).rglob('*.log'):
if path.is_file():
collecting(str(path))
else:
# Collecting all of /opt/cray/tests
add_to_collection_list(opt_cray_tests, False)
else:
add_to_collection_list(opt_cray_tests, True)
if os.path.exists(cmsdev):
if os.path.isfile(cmsdev):
if args.no_cmsdev_sum:
printmylog("Not recording cksum of %s, as specified" % cmsdev)
else:
record_cmd_to_file(cmdstring=cksum_cmd_string, prefix="cmslogs-cmsdev-cksum-", suffix=".txt")
else:
raise cmslogsError("%s exists but is not a regular file" % cmsdev)
else:
does_not_exist(cmsdev)
if not args.no_rpms:
record_cmd_to_file(cmdstring=rpm_cmd_string, prefix="cmslogs-rpmlist-", suffix=".txt")
else:
printmylog("Not recording output of %s command, as specified" % rpm_cmd_string)
return collect_list
def do_collect(collect_list):
tar_cmd_list = [ "tar", "-C", "/", "-cf", outfile ]
tar_cmd_list.extend(collect_list)
try:
print("Running command: %s" % ' '.join(tar_cmd_list), flush=True)
cmdproc = subprocess.run(tar_cmd_list, check=True, universal_newlines=True)
except subprocess.CalledProcessError as e:
raise cmslogsError("tar command failed with return code %d" % (cmsdev, e.returncode))
def remove_tempfiles(tempfile_list):
for t in tempfile_list:
os.remove(t)
if __name__ == '__main__':
tempfile_list = list()
parser = argparse.ArgumentParser(
description="Collect files for test debug and stores them in %s" % outfile)
parser.add_argument("-f", dest="overwrite",
action="store_true",
help="Overwrite outfile (%s) if it exists" % outfile)
parser.add_argument("--no-cmsdev-sum", dest="no_cmsdev_sum",
action="store_true",
help="Do not record output of %s command" % cksum_cmd_string)
parser.add_argument("--no-cray-release", dest="no_cray_release",
action="store_true",
help="Do not collect %s" % cray_release)
parser.add_argument("--no-motd", dest="no_motd", action="store_true",
help="Do not collect %s" % motd)
parser.add_argument("--no-opt-cray-tests", dest="no_opt_cray_tests",
action="store_true",
help="Do not collect %s directory (except logs)" % opt_cray_tests)
parser.add_argument("--no-opt-cray-tests-all",
dest="no_opt_cray_tests_all", action="store_true",
help="Do not collect %s directory (including logs)" % opt_cray_tests)
parser.add_argument("--no-rpms", dest="no_rpms",
action="store_true",
help="Do not collect output of %s command" % rpm_cmd_string)
parser.add_argument("--no-tmp-cray-tests", dest="no_tmp_cray_tests",
action="store_true",
help="Do not collect %s directory" % tmp_cray_tests)
args = parser.parse_args()
if os.path.exists(outfile):
if not args.overwrite:
error_exit("Output file already exists: %s" % outfile)
elif not os.path.isfile(outfile):
error_exit("Output file already exists and is not a regular file: %s" % outfile)
mylogfilepath = get_tmpfile(tempfile_list=tempfile_list, prefix="cmslogs-", suffix=".log")
try:
collect_list = create_collect_list(args, mylogfilepath, tempfile_list)
if not collect_list:
raise cmslogsError("Nothing to collect!")
# [1:] to strip the leading / from the absolute path
collect_list.append(mylogfilepath[1:])
do_collect(collect_list)
except cmslogsError as e:
remove_tempfiles(tempfile_list)
error_exit(str(e))
remove_tempfiles(tempfile_list)
print("\nRequested data successfully collected: %s" % outfile, flush=True)
sys.exit(0)
| StarcoderdataPython |
6579033 | '''
A simulated all-in-one site
'''
class simAllInOneSite:
def __init__(self, siteId, siteDevices, updateInterval):
self.sId = siteId
self.siteDevices = siteDevices
self.updateInterval = updateInterval
def setUpdateInterval (self, updateInterval):
self.updateInterval = updateInterval
def getConsumption (self, currentTime):
return 1000;
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.