id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
1794327 | <filename>modules/m_etc.py<gh_stars>1-10
import psutil, base64, os, sys, hashlib, datetime, discord, random
from PIL import Image, ImageDraw, ImageFont
import configparser
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime, timedelta
if __name__=="__main__":
print("FATAL : Run this bot from right way.")
sys.exit(1)
e_txt = ["오류 발생!",
"개발진들이 마실 카페인이 늘어났어요!",
"기계식 루냥이.EXE는 '우에엥 도와줘'를 사용했다!",
"개발진들이 C++의 놀라움을 경험했습니다",
"개발진들이 파이썬의 놀라움을 경험했습니다",
"동작 중이던 코드가 이세계행 트럭과 부딪혔습니다",
"개발진들이 현실을 부정하기 시작했습니다"]
db_path = "db/username_db.dat"
db = configparser.ConfigParser()
db.read(db_path)
def getHash(path, blocksize=65536):
afile = open(path, 'rb')
hasher = hashlib.md5()
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
afile.close()
return hasher.hexdigest()
def base64e(s):
e = base64.b64encode(s.encode('utf-8'))
e = str(e).replace("b'", "")
e = e.replace("'", "")
return e
def base64d(b):
return str(base64.b64decode(b).decode('utf-8'))
def checkIfProcessRunning(processName):
for proc in psutil.process_iter():
try:
if processName.lower() in proc.name().lower():
return True
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return False;
def checkTrait(text):
c = text[-1:]
if int((ord(c) - 0xAC00) % 28) != 0:
return "을"
else:
return "를"
def outline_draw(d, text, x, y, rb=0, gb=0, bb=0, rf=255, gf=255, bf=255):
d.text((x-1, y), text, fill=(rb, gb, bb))
d.text((x+1, y), text, fill=(rb, gb, bb))
d.text((x, y-1), text, fill=(rb, gb, bb))
d.text((x, y+1), text, fill=(rb, gb, bb))
d.text((x-1, y-1), text, fill=(rb, gb, bb))
d.text((x+1, y-1), text, fill=(rb, gb, bb))
d.text((x-1, y-1), text, fill=(rb, gb, bb))
d.text((x+1, y+1), text, fill=(rb, gb, bb))
d.text((x, y), text, fill=(rf, gf, bf))
def make_color(text, head, mode):
if mode:
m = text.replace(head + "색상 ", "")
else:
m = text.replace(head + "색상코드 ", "")
m = m[-6:]
m = m.upper()
ms = "color hex #" + m
try:
h = tuple(int(m[i:i+2], 16) for i in (0, 2, 4))
except:
raise ValueError
img = Image.new('RGB', (200, 120), color = h)
d = ImageDraw.Draw(img)
outline_draw(d, ms, 10, 10)
outline_draw(d, "red : " + str(h[0]) + "(" + str(int((h[0] / 255) * 100)) + "%)", 10, 24, 255, 0, 0)
outline_draw(d, "green : " + str(h[1]) + "(" + str(int((h[1] / 255) * 100)) + "%)", 10, 38, 0, 255, 0)
outline_draw(d, "blue : " + str(h[2]) + "(" + str(int((h[2] / 255) * 100)) + "%)", 10, 52, 0, 0, 255)
d.text((10, 66), "white text", fill=(255, 255, 255))
d.text((10, 80), "black text", fill=(0, 0, 0))
img.save('pil_color.png')
return "pil_color.png"
def make_pil(text, head):
m = text.replace(head + "받아쓰기 ", "")
m = m.encode('utf-8')
font = ImageFont.truetype("font/kopub.ttf", 20, encoding='unic')
img = Image.new('RGB', (320, 240), color = (255, 255, 255))
d = ImageDraw.Draw(img)
d.text((10, 10), m.decode('utf-8'), fill=(0, 0, 0), font=font)
img.save('pil_color.png')
return "pil_color.png"
def get_name(id):
try:
n = db.get("name", str(id))
return n
except:
return None
def set_name(message):
try:
db.set("name", str(message.author.id), str(message.author.name))
with open(db_path, 'w') as configfile:
db.write(configfile)
except:
pass
def err_txt():
return random.choice(e_txt)
| StarcoderdataPython |
1779333 | # Generated by Django 2.1.15 on 2020-06-24 15:39
from django.db import migrations
def forwards(apps, schema_editor):
Category = apps.get_model("news", "Category")
Category.objects.create(name="Dummy Category", slug="dummy-category")
def backwards(apps, schema_editor):
Category = apps.get_model("news", "Category")
obj = Category.objects.get(slug="dummy-category")
obj.delete()
class Migration(migrations.Migration):
dependencies = [
("news", "0002_auto_20200624_1539"),
]
operations = [migrations.RunPython(code=forwards, reverse_code=backwards)]
| StarcoderdataPython |
128028 | <reponame>fengwanwan/st_analysis
#! /usr/bin/env python
"""
This script performs a supervised prediction in ST datasets
using a training set and a test set.
The training set will be one or more matrices of
with counts (genes as columns and spots as rows)
and the test set will be one matrix of counts.
One file or files with class labels for the training set is needed
so the classifier knows what class each spot(row) in
the training set belongs to, the file should
be tab delimited :
SPOT_NAME(as it in the matrix) CLASS_NUMBER
It will then try to predict the classes of the spots(rows) in the
test set. If class labels for the test sets
are given the script will compute accuracy of the prediction.
The script allows to normalize the train/test counts using different
methods.
The script will output the predicted classes and the spots
plotted on top of an image if the image is given.
@Author <NAME> <<EMAIL>>
"""
import argparse
import sys
import os
import numpy as np
import pandas as pd
#from sklearn.feature_selection import VarianceThreshold
from stanalysis.preprocessing import *
from sklearn.svm import LinearSVC, SVC
from sklearn import metrics
from sklearn.multiclass import OneVsRestClassifier
from stanalysis.visualization import scatter_plot, color_map
from stanalysis.alignment import parseAlignmentMatrix
from stanalysis.analysis import weighted_color, composite_colors
from cProfile import label
from matplotlib.colors import LinearSegmentedColormap
def main(train_data,
test_data,
classes_train,
classes_test,
use_log_scale,
normalization,
outdir,
alignment,
image,
spot_size):
if len(train_data) == 0 or any([not os.path.isfile(f) for f in train_data]) \
or len(train_data) != len(classes_train) \
or len(classes_train) == 0 or any([not os.path.isfile(f) for f in classes_train]) \
or not os.path.isfile(classes_test):
sys.stderr.write("Error, input file/s not present or invalid format\n")
sys.exit(1)
if not outdir or not os.path.isdir(outdir):
outdir = os.getcwd()
print("Output folder {}".format(outdir))
# Merge input train datasets (Spots are rows and genes are columns)
train_data_frame = aggregate_datatasets(train_data)
train_genes = list(train_data_frame.columns.values)
# loads all the classes for the training set
train_labels_dict = dict()
for i,labels_file in enumerate(classes_train):
with open(labels_file) as filehandler:
for line in filehandler.readlines():
tokens = line.split()
train_labels_dict["{}_{}".format(i,tokens[0])] = int(tokens[1])
# make sure the spots in the training set data frame
# and the label training spots have the same order
# and are the same
train_labels = list()
for spot in train_data_frame.index:
try:
train_labels.append(train_labels_dict[spot])
except KeyError:
train_data_frame.drop(spot, axis=0, inplace=True)
if len(train_labels) != len(train_data_frame.index):
sys.stderr.write("Error, none of the train labels were not found in the train data\n")
sys.exit(1)
# loads the test set
# spots are rows and genes are columns
test_data_frame = pd.read_table(test_data, sep="\t", header=0, index_col=0)
test_genes = list(test_data_frame.columns.values)
# loads all the classes for the test set
# filter out labels whose spot is not present and
# also make sure that the order of the labels is the same in the data frame
test_labels = list()
if classes_test is not None:
spot_label = dict()
with open(classes_test) as filehandler:
for line in filehandler.readlines():
tokens = line.split()
assert(len(tokens) == 2)
spot_label[tokens[0]] = int(tokens[1])
for spot in test_data_frame.index:
try:
test_labels.append(spot_label[spot])
except KeyError:
test_data_frame.drop(spot, axis=0, inplace=True)
if len(test_labels) != len(test_data_frame.index):
sys.stderr.write("Error, none of the test labels were not found in the test data\n")
sys.exit(1)
# Keep only the record in the training set that intersects with the test set
print("Training genes {}".format(len(train_genes)))
print("Test genes {}".format(len(test_genes)))
intersect_genes = np.intersect1d(train_genes, test_genes)
if len(intersect_genes) == 0:
sys.stderr.write("Error, there are no genes intersecting the train and test datasets\n")
sys.exit(1)
print("Intersected genes {}".format(len(intersect_genes)))
train_data_frame = train_data_frame.ix[:,intersect_genes]
test_data_frame = test_data_frame.ix[:,intersect_genes]
# Classes in test and train must be the same
print("Training elements {}".format(len(train_labels)))
print("Test elements {}".format(len(test_labels)))
print("Class labels {}".format(sorted(set(train_labels))))
# Get the normalized counts
train_data_frame = normalize_data(train_data_frame, normalization)
test_data_frame = normalize_data(test_data_frame, normalization)
test_counts = test_data_frame.values
train_counts = train_data_frame.values
# Log the counts
if use_log_scale:
train_counts = np.log2(train_counts + 1)
test_counts = np.log2(test_counts + 1)
# Train the classifier and predict
# TODO optimize parameters of the classifier (kernel="rbf" or "sigmoid")
classifier = OneVsRestClassifier(SVC(probability=True, random_state=0,
decision_function_shape="ovr", kernel="linear"), n_jobs=4)
classifier = classifier.fit(train_counts, train_labels)
predicted_class = classifier.predict(test_counts)
predicted_prob = classifier.predict_proba(test_counts)
# Compute accuracy
if classes_test is not None:
print("Classification report for classifier {0}:\n{1}\n".
format(classifier, metrics.classification_report(test_labels, predicted_class)))
print("Confusion matrix:\n{}".format(metrics.confusion_matrix(test_labels, predicted_class)))
# Write the spots and their predicted classes/probs to a file
x_points = list()
y_points = list()
merged_prob_colors = list()
unique_colors = [color_map[i] for i in set(sorted(predicted_class))]
with open(os.path.join(outdir, "predicted_classes.txt"), "w") as filehandler:
labels = list(test_data_frame.index)
for i,label in enumerate(predicted_class):
probs = predicted_prob[i].tolist()
merged_prob_colors.append(composite_colors(unique_colors, probs))
tokens = labels[i].split("x")
assert(len(tokens) == 2)
y = float(tokens[1])
x = float(tokens[0])
x_points.append(x)
y_points.append(y)
filehandler.write("{0}\t{1}\t{2}\n".format(labels[i], label,
"\t".join(['{:.6f}'.format(x) for x in probs])))
# Plot the spots with the predicted color on top of the tissue image
# The plotted color will be taken from a linear space from
# all the unique colors from the classes so it shows
# how strong the prediction is for a specific spot
# alignment_matrix will be identity if alignment file is None
alignment_matrix = parseAlignmentMatrix(alignment)
cm = LinearSegmentedColormap.from_list("CustomMap", unique_colors, N=100)
scatter_plot(x_points=x_points,
y_points=y_points,
colors=merged_prob_colors,
output=os.path.join(outdir,"predicted_classes_tissue_probability.pdf"),
alignment=alignment_matrix,
cmap=cm,
title='Computed classes tissue (probability)',
xlabel='X',
ylabel='Y',
image=image,
alpha=1.0,
size=spot_size,
show_legend=False,
show_color_bar=False)
# Plot also the predicted color for each spot (highest probablity)
scatter_plot(x_points=x_points,
y_points=y_points,
colors=[int(c) for c in predicted_class],
output=os.path.join(outdir,"predicted_classes_tissue.pdf"),
alignment=alignment_matrix,
cmap=None,
title='Computed classes tissue',
xlabel='X',
ylabel='Y',
image=image,
alpha=1.0,
size=spot_size,
show_legend=True,
show_color_bar=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--train-data", required=True, nargs='+', type=str,
help="One or more data frames with normalized counts")
parser.add_argument("--test-data", required=True,
help="One data frame with normalized counts")
parser.add_argument("--train-classes", required=True, nargs='+', type=str,
help="One of more files with the class of each spot in the train data as: XxY INT")
parser.add_argument("--test-classes", default=None,
help="One file with the class of each spot in the test data as: XxY INT")
parser.add_argument("--use-log-scale", action="store_true", default=False,
help="Use log2 + 1 for the training and test set instead of raw/normalized counts.")
parser.add_argument("--normalization", default="DESeq2", metavar="[STR]",
type=str,
choices=["RAW", "DESeq2", "DESeq2Linear", "DESeq2PseudoCount",
"DESeq2SizeAdjusted", "REL", "TMM", "RLE", "Scran"],
help="Normalize the counts using:\n" \
"RAW = absolute counts\n" \
"DESeq2 = DESeq2::estimateSizeFactors(counts)\n" \
"DESeq2PseudoCount = DESeq2::estimateSizeFactors(counts + 1)\n" \
"DESeq2Linear = DESeq2::estimateSizeFactors(counts, linear=TRUE)\n" \
"DESeq2SizeAdjusted = DESeq2::estimateSizeFactors(counts + lib_size_factors)\n" \
"RLE = EdgeR RLE * lib_size\n" \
"TMM = EdgeR TMM * lib_size\n" \
"Scran = Deconvolution Sum Factors\n" \
"REL = Each gene count divided by the total count of its spot\n" \
"(default: %(default)s)")
parser.add_argument("--alignment", default=None,
help="A file containing the alignment image " \
"(array coordinates to pixel coordinates) as a 3x3 matrix in tab delimited format\n" \
"This is only useful if you want to plot the image in original size or the image " \
"is not cropped to the array boundaries")
parser.add_argument("--image", default=None,
help="When given the data will plotted on top of the image, \
if the alignment matrix is given the data points will be transformed to pixel coordinates")
parser.add_argument("--outdir", help="Path to output dir")
parser.add_argument("--spot-size", default=20, metavar="[INT]", type=int, choices=range(1, 100),
help="The size of the spots when generating the plots. (default: %(default)s)")
args = parser.parse_args()
main(args.train_data, args.test_data, args.train_classes,
args.test_classes, args.use_log_scale, args.normalization,
args.outdir, args.alignment, args.image, args.spot_size)
| StarcoderdataPython |
3279696 | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Pngwriter(CMakePackage):
"""PNGwriter is a very easy to use open source graphics library that uses
PNG as its output format. The interface has been designed to be as simple
and intuitive as possible. It supports plotting and reading pixels in the
RGB (red, green, blue), HSV (hue, saturation, value/brightness) and CMYK
(cyan, magenta, yellow, black) colour spaces, basic shapes, scaling,
bilinear interpolation, full TrueType antialiased and rotated text support,
bezier curves, opening existing PNG images and more.
"""
homepage = "http://pngwriter.sourceforge.net/"
url = "https://github.com/pngwriter/pngwriter/archive/0.5.6.tar.gz"
git = "https://github.com/pngwriter/pngwriter.git"
maintainers = ['ax3l']
version('develop', branch='dev')
version('master', branch='master')
version('0.7.0', 'a68aa0889f120f5bb07848afce278a95')
version('0.6.0', '0a19bc55c5f6379fea7343752fd3ffae')
version('0.5.6', 'c13bd1fdc0e331a246e6127b5f262136')
depends_on('libpng')
depends_on('zlib')
depends_on('freetype')
def cmake_args(self):
spec = self.spec
args = []
if spec.satisfies('@0.7.0:'):
args += ['-DPNGwriter_USE_FREETYPE:BOOL=ON']
return args
| StarcoderdataPython |
3296902 | <reponame>monishshah18/python-cp-cheatsheet<gh_stars>100-1000
class Solution:
def maxProfit(self, prices: List[int]) -> int:
t0 = [0] * 3
t1 = [float(-inf)] * 3
for p in prices:
for i in range(2,0,-1):
t0[i] = max(t0[i], t1[i] + p)
t1[i] = max(t1[i], t0[i-1] - p)
return t0[2]
| StarcoderdataPython |
1646825 | <gh_stars>1-10
#!/usr/bin/python3
from pwn import *
# Telnet
sh = remote("ip", 30888)
# SSH:
# sh = ssh('user', 'ip', password='<PASSWORD>', port=22)
# Exec
# process('./exec')
# conn.sendlineafter(b"> ", b"1")
sh.sendline(b'ls')
flag = sh.recvline(timeout=5)
log.success(flag)
sh.interactive()
sh.close() | StarcoderdataPython |
1628292 | <gh_stars>0
#
# Copyright (c) 2016 Juniper Networks, Inc. All rights reserved.
#
import os
import docker
from sandesh.nodeinfo.cpuinfo.ttypes import ProcessCpuInfo
class DockerMemCpuUsageData(object):
def __init__(self, _id, last_cpu, last_time):
self.last_cpu = last_cpu
self.last_time = last_time
self.client = docker.from_env()
self._id = hex(_id)[2:-1].zfill(64)
def _get_container_stats(self):
return self.client.stats(self._id, decode=True, stream=False)
def _get_process_cpu_share(self, current_cpu):
last_cpu = self.last_cpu
last_time = self.last_time
current_time = os.times()[4]
cpu_count = len(current_cpu["cpu_usage"]["percpu_usage"])
# docker returns current/previous cpu stats in call
# but it previous data can't be used cause we don't know who calls
# stat previously
interval_time = 0
if last_cpu and (last_time != 0):
sys_time = float(current_cpu['cpu_usage']['usage_in_kernelmode'] -
last_cpu['cpu_usage']['usage_in_kernelmode']) / 1e9
usr_time = float(current_cpu['cpu_usage']['usage_in_usermode'] -
last_cpu['cpu_usage']['usage_in_usermode']) / 1e9
interval_time = current_time - last_time
self.last_cpu = current_cpu
self.last_time = current_time
if interval_time > 0:
sys_percent = 100 * sys_time / interval_time
usr_percent = 100 * usr_time / interval_time
cpu_share = round((sys_percent + usr_percent) / cpu_count, 2)
return cpu_share
else:
return 0
def get_process_mem_cpu_info(self):
stats = self._get_container_stats()
cpu_stats = stats['cpu_stats']
mem_stats = stats['memory_stats']
process_mem_cpu = ProcessCpuInfo()
process_mem_cpu.cpu_share = self._get_process_cpu_share(cpu_stats)
process_mem_cpu.mem_virt = mem_stats['usage'] / 1024
process_mem_cpu.mem_res = mem_stats['stats']['rss'] / 1024
return process_mem_cpu
| StarcoderdataPython |
39660 | import re
from data.scrape.link_extractors.create_extractor import create_extractor
from data.scrape.utils import clean_url
from .constants import ID
class Strategy:
def __init__(self, url_pattern, template=None, **extractor_args):
self.url_pattern = url_pattern.format(ID=ID)
self.url_regex = re.compile(
self.url_pattern.replace(".", r"\."), flags=re.IGNORECASE
)
self.extractor_args = extractor_args
self.guideline_url_template = template
def match_url(self, url):
url = clean_url(url)
return self.url_regex.search(url)
def matches_url(self, url):
return bool(self.match_url(url))
def create_link_extractor(self, url):
return create_extractor(url, allow_domains=[], **self.extractor_args)
def generate_guideline_urls(self, url, row):
if self.guideline_url_template is None:
return []
match = self.match_url(url)
urls = [self.guideline_url_template.format(**match.groupdict(), **row)]
urls = [url for url in urls if url]
return urls
def __repr__(self):
return f"<Strategy: {self.url_pattern}>"
| StarcoderdataPython |
153066 | <filename>app/questionnaire/routing_path.py
class RoutingPath:
"""Holds a list of block_ids and has section_id, list_item_id and list_name attributes"""
def __init__(self, block_ids, section_id, list_item_id=None, list_name=None):
self.block_ids = tuple(block_ids)
self.section_id = section_id
self.list_item_id = list_item_id
self.list_name = list_name
def __len__(self):
return len(self.block_ids)
def __getitem__(self, index):
return self.block_ids[index]
def __iter__(self):
return iter(self.block_ids)
def __reversed__(self):
return reversed(self.block_ids)
def __eq__(self, other):
if isinstance(other, RoutingPath):
return (
self.block_ids == other.block_ids
and self.section_id == other.section_id
and self.list_item_id == other.list_item_id
and self.list_name == other.list_name
)
if isinstance(other, list):
return self.block_ids == tuple(other)
return self.block_ids == other
def index(self, *args):
return self.block_ids.index(*args)
| StarcoderdataPython |
4838390 | # coding:utf8
from setuptools import setup
long_desc = """
easyquotation
===============
* easy to use to get stock info in China Stock
Installation
--------------
pip install easyquotation
Upgrade
---------------
pip install easyquotation --upgrade
Quick Start
--------------
::
import easyquotation
#### 选择 sina 行情
```python
quotation = easyquotation.use('sina')
```
#### 获取所有股票行情
```python
quotation.all
```
**return**
```python
{'000159': {'name': '国际实业', # 股票名
'buy': '8.87', # 竞买价
'sell': '8.88', # 竞卖价
'now': '8.88', # 现价
'open': '8.99', # 开盘价
'close': '8.96', # 昨日收盘价
'high': '9.15', # 今日最高价
'low': '8.83', # 今日最低价
'turnover': '22545048', # 交易股数
'volume': '202704887.74', # 交易金额
'ask1': '8.88', # 卖一价
'ask1_volume': '111900', # 卖一量
'ask2': '8.89',
'ask2_volume': '54700',
'bid1': '8.87', # 买一价
'bid1_volume': '21800', # 买一量
...
'bid2': '8.86',
'bid2_volume': '78400',
'date': '2016-02-19',
'time': '14:30:00',
...},
......
}
```
#### 选择 leverfun 免费十档行情
```
quotation = easyquotation.use('lf') # ['leverfun', 'lf']
```
#### 获取十档行情
##### 单只股票
```
quotation.stocks('162411')
```
##### 多只股票
```
quotation.stocks(['000001', '162411'])
```
**return**
```python
{'000159': {'buy': '8.87', # 竞买价
'sell': '8.88', # 竞卖价
'now': '8.88', # 现价
'close': '8.96', # 昨日收盘价
'ask1': '8.88', # 卖一价
'ask1_volume': '111900', # 卖一量
'ask2': '8.89',
'ask2_volume': '54700',
'bid1': '8.87', # 买一价
'bid1_volume': '21800', # 买一量
...
'bid2': '8.86',
'bid2_volume': '78400',
...},
......
}
```
#### 选择 jsl 行情
```
quotation = easyquotation.use('jsl') # ['jsl']
```
##### 获取分级基金信息
```
quotation.funda() # 参数可选择利率、折价率、交易量、有无下折、是否永续来过滤
quotation.fundb() # 参数如上
```
*****return**
```
{ 150020:
{'abrate': '5:5',
'calc_info': None,
'coupon_descr': '+3.0%',
'coupon_descr_s': '+3.0%',
'fund_descr': '每年第一个工作日定折,无下折,A不参与上折,净值<1元无定折',
'funda_amount': 178823,
'funda_amount_increase': '0',
'funda_amount_increase_rt': '0.00%',
'funda_base_est_dis_rt': '2.27%',
'funda_base_est_dis_rt_t1': '2.27%',
'funda_base_est_dis_rt_t2': '-0.34%',
'funda_base_est_dis_rt_tip': '',
'funda_base_fund_id': '163109',
'funda_coupon': '5.75',
'funda_coupon_next': '4.75',
'funda_current_price': '0.783',
'funda_discount_rt': '24.75%',
'funda_id': '150022',
'funda_increase_rt': '0.00%',
'funda_index_id': '399001',
'funda_index_increase_rt': '0.00%',
'funda_index_name': '深证成指',
'funda_left_year': '永续',
'funda_lower_recalc_rt': '1.82%',
'funda_name': '深成指A',
'funda_nav_dt': '2015-09-14',
'funda_profit_rt': '7.74%',
'funda_profit_rt_next': '6.424%',
'funda_value': '1.0405',
'funda_volume': '0.00',
'fundb_upper_recalc_rt': '244.35%',
'fundb_upper_recalc_rt_info': '深成指A不参与上折',
'last_time': '09:18:22',
'left_recalc_year': '0.30411',
'lower_recalc_profit_rt': '-',
'next_recalc_dt': '<span style="font-style:italic">2016-01-04</span>',
'owned': 0,
'status_cd': 'N'}>'}}
```
#### 更新股票代码
```
easyquotation.update_stock_codes()
```
"""
setup(
name="easyquotation",
version="0.5.13",
description="A utility for Fetch China Stock Info",
long_description=long_desc,
author="shidenggui",
author_email="<EMAIL>",
license="BSD",
url="https://github.com/shidenggui/easyquotation",
keywords="China stock trade",
install_requires=[
"requests",
"aiohttp>=1.1.1",
"yarl",
"six",
"easyutils",
],
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.5",
"License :: OSI Approved :: BSD License",
],
packages=["easyquotation"],
package_data={"": ["*.conf"]},
)
| StarcoderdataPython |
3238339 | <filename>CrySPY/gen_struc/random/with_spg/fw.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------
#
# This code partly includes find_wy (https://github.com/nim-hrkn/find_wy)
# which is distributed under the Apache License, Version 2.0.
#
# --------------------------------------------------------------------------
from __future__ import print_function
import copy
import json
import numpy as np
from pymatgen import Structure
from ..dist import check_min_dist
def fw_input(atype, nat, spg, a, b, c, cosa, cosb, cosg):
with open('input', 'w') as f:
f.write('nspecies {}\n'.format(len(atype)))
f.write('species_name')
for aa in atype:
f.write(' {}'.format(aa))
f.write('\n')
f.write('species_num')
for i in nat:
f.write(' {}'.format(i))
f.write('\n')
f.write('spacegroup {}\n'.format(spg))
f.write('originchoice 1\n')
f.write('\n')
f.write('a {}\n'.format(a))
f.write('b {}\n'.format(b))
f.write('c {}\n'.format(c))
f.write('cosa {}\n'.format(cosa))
f.write('cosb {}\n'.format(cosb))
f.write('cosc {}\n'.format(cosg))
f.write('\n')
# f.write('selectone true\n')
f.write('randomseed auto\n')
def gen_wypos(cumul_nat, mindist, maxcnt):
'''
Success --> return True, structure data
Failure --> return False, _
'''
# ---------- load POS_WY_SKEL_ALL.json
with open('POS_WY_SKEL_ALL.json', 'r') as f:
wydata = json.load(f)
# ---------- generate structure
plat = wydata['primitivevector']
clat = wydata['conventionalvector']
atomnames = []
positions = []
for specie in wydata['atoms']:
for wydata2 in specie: # equivalent atom loop
cnt = 0
while True:
tmp_atomnames, tmp_positions = gen_eq_atoms(wydata2, atomnames, positions)
# ------ Cartesian coordinate
# platではなくclatを使って変換しないと上手くいかない
cart = []
for p in tmp_positions:
v = np.zeros(3)
for i in range(3):
a = np.array(clat[i])
v += p[i] * a
cart.append(v)
# ------ check minimum distance
spgstruc = Structure(plat, tmp_atomnames, cart, coords_are_cartesian=True)
if check_min_dist(spgstruc, cumul_nat, mindist) is False:
cnt += 1
if maxcnt < cnt:
return False, spgstruc # spgstruc is dummy
else:
atomnames, positions = tmp_atomnames, tmp_positions
break
return True, spgstruc
def gen_eq_atoms(wydata2, atomnames, positions):
tmp_atomnames = copy.deepcopy(atomnames)
tmp_positions = copy.deepcopy(positions)
rval = np.random.random_sample(3)
for each in wydata2:
pos = []
for ch in each['xyzch']:
if ch == '-2x':
pos.append(-2.0 * rval[0])
elif ch == '-x+y':
pos.append(-rval[0] + rval[1])
elif ch == '-z':
pos.append(-rval[2])
elif ch == '-y':
pos.append(-rval[1])
elif ch == '-x':
pos.append(-rval[0])
elif ch == '0':
pos.append(0.0)
elif ch == 'x':
pos.append(rval[0])
elif ch == 'y':
pos.append(rval[1])
elif ch == 'z':
pos.append(rval[2])
elif ch == 'x-y':
pos.append(rval[0] - rval[1])
elif ch == '2x':
pos.append(2.0 * rval[0])
else:
raise ValueError('unknown ch in conversion in gen_wycoord')
pos = np.array(pos)
tmp_positions.append(pos + each['add'])
tmp_atomnames.append(each['name'])
return tmp_atomnames, tmp_positions
| StarcoderdataPython |
45992 | <gh_stars>0
import os
import sys
import platform
import shutil
import flopy
import pymake
# make sure exe extension is used on windows
eext = ''
soext = '.so'
if sys.platform.lower() == 'win32':
eext = '.exe'
soext = '.dll'
binpth, temppth = os.path.join('..', 'bin'), os.path.join('temp')
# some flags to check for errors in the code
# add -Werror for compilation to terminate if errors are found
strict_flags = ('-Wtabs -Wline-truncation -Wunused-label '
'-Wunused-variable -pedantic -std=f2008')
def get_zipname():
zipname = sys.platform.lower()
if zipname == "linux2":
zipname = "linux"
elif zipname == "darwin":
zipname = "mac"
elif zipname == "win32":
if platform.architecture()[0] == "64bit":
zipname = "win64"
# return
return zipname
def relpath_fallback(pth):
try:
# throws ValueError on Windows if pth is on a different drive
return os.path.relpath(pth)
except ValueError:
return os.path.abspath(pth)
def create_dir(pth):
# remove pth directory if it exists
if os.path.exists(pth):
print('removing... {}'.format(os.path.abspath(pth)))
shutil.rmtree(pth)
# create pth directory
print('creating... {}'.format(os.path.abspath(pth)))
os.makedirs(pth)
msg = 'could not create... {}'.format(os.path.abspath(pth))
assert os.path.exists(pth), msg
# return
return
def test_update_version():
from make_release import update_version
update_version()
# return
return
def test_create_dirs():
pths = [binpth, temppth]
for pth in pths:
create_dir(pth)
# return
return
def test_build_modflow6():
# determine if app should be build
for idx, arg in enumerate(sys.argv):
if arg.lower() == '--nomf6':
txt = 'Command line cancel of MODFLOW 6 build'
print(txt)
return
# set source and target paths
srcdir = os.path.join('..', 'src')
target = os.path.join('..', 'bin', 'mf6')
target += eext
fc, cc = pymake.set_compiler('mf6')
fflags = None
if fc == 'gfortran':
fflags = strict_flags
pymake.main(srcdir, target, fc=fc, cc=cc, include_subdirs=True,
fflags=fflags)
msg = '{} does not exist.'.format(relpath_fallback(target))
assert os.path.isfile(target), msg
# return
return
def test_build_modflow6_so():
# determine if app should be build
for idx, arg in enumerate(sys.argv):
if arg.lower() == '--nomf6so':
txt = 'Command line cancel of MODFLOW 6 shared object build'
print(txt)
return
# set source and target paths
srcdir = os.path.join('..', 'srcbmi')
comdir = os.path.join('..', 'src')
excludefiles = [os.path.join(comdir, 'mf6.f90')]
target = os.path.join('..', 'bin', 'libmf6')
target += soext
fc, cc = pymake.set_compiler('mf6')
fflags = None
if fc == 'gfortran':
fflags = strict_flags
pymake.main(srcdir, target, fc=fc, cc=cc, include_subdirs=True,
fflags=fflags, srcdir2=comdir, excludefiles=excludefiles,
sharedobject=True)
msg = '{} does not exist.'.format(relpath_fallback(target))
assert os.path.isfile(target), msg
# return
return
def test_build_mf5to6():
# determine if app should be build
for idx, arg in enumerate(sys.argv):
if arg.lower() == '--nomf5to6':
txt = 'Command line cancel of MODFLOW 5 to 6 converter build'
print(txt)
return
# set source and target paths
srcdir = os.path.join('..', 'utils', 'mf5to6', 'src')
target = os.path.join('..', 'bin', 'mf5to6')
target += eext
extrafiles = os.path.join('..', 'utils', 'mf5to6', 'pymake',
'extrafiles.txt')
fc, cc = pymake.set_compiler('mf6')
# build modflow 5 to 6 converter
pymake.main(srcdir, target, fc=fc, cc=cc, include_subdirs=True,
extrafiles=extrafiles)
msg = '{} does not exist.'.format(relpath_fallback(target))
assert os.path.isfile(target), msg
# return
return
def test_build_zonebudget():
# determine if app should be build
for idx, arg in enumerate(sys.argv):
if arg.lower() == '--nozonebudget':
txt = 'Command line cancel of ZONEBUDGET for MODFLOW 6 build'
print(txt)
return
# set source and target paths
srcdir = os.path.join('..', 'utils', 'zonebudget', 'src')
target = os.path.join('..', 'bin', 'zbud6')
target += eext
extrafiles = os.path.join('..', 'utils', 'zonebudget', 'pymake',
'extrafiles.txt')
fc, cc = pymake.set_compiler('mf6')
fflags = None
if fc == 'gfortran':
fflags = strict_flags
pymake.main(srcdir, target, fc=fc, cc=cc, extrafiles=extrafiles,
fflags=fflags)
msg = '{} does not exist.'.format(relpath_fallback(target))
assert os.path.isfile(target), msg
# return
return
def test_update_mf6io():
from mkdist import update_mf6io_tex_files
if not os.path.isdir(temppth):
os.makedirs(temppth)
# build simple model
name = 'mymodel'
ws = os.path.join(temppth, name)
exe_name = 'mf6'
if sys.platform.lower() == 'win32':
exe_name += '.exe'
exe_name = os.path.join(binpth, exe_name)
sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=ws, exe_name=exe_name)
tdis = flopy.mf6.ModflowTdis(sim)
ims = flopy.mf6.ModflowIms(sim)
gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True)
dis = flopy.mf6.ModflowGwfdis(gwf, nrow=10, ncol=10)
ic = flopy.mf6.ModflowGwfic(gwf)
npf = flopy.mf6.ModflowGwfnpf(gwf, save_specific_discharge=True)
chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data=[[(0, 0, 0), 1.],
[(0, 9, 9), 0.]])
oc = flopy.mf6.ModflowGwfoc(gwf,
printrecord=[('BUDGET', 'ALL')])
sim.write_simulation()
# update the mf6io simulation output for LaTeX
update_mf6io_tex_files(None, exe_name, expth=ws)
# return
return
def test_zip_assets():
# create temppth if it does not exist
if not os.path.isdir(temppth):
os.makedirs(temppth)
# zip assets
env = 'GITHUB_ACTIONS'
os.environ[env] = "true"
if env in os.environ:
fpth = get_zipname() + '.zip'
# zip up exe's using directories
zip_pth = os.path.join(temppth, fpth)
success = pymake.zip_all(zip_pth, dir_pths=binpth)
assert success, "could not create '{}'".format(zip_pth)
return
if __name__ == "__main__":
test_update_version()
test_create_dirs()
test_build_modflow6()
test_build_modflow6_so()
test_build_mf5to6()
test_build_zonebudget()
test_update_mf6io()
test_zip_assets()
| StarcoderdataPython |
3340895 | <reponame>JohnGriffiths/dipy
# Init file for visualization package
from __future__ import division, print_function, absolute_import
# We make the visualization requirements optional imports:
try:
import matplotlib
has_mpl = True
except ImportError:
e_s = "You do not have Matplotlib installed. Some visualization functions"
e_s += " might not work for you."
print(e_s)
has_mpl = False
if has_mpl:
from . import projections
| StarcoderdataPython |
1793496 | import unittest
import torch
import alpa.torch.optim as torchoptim
import alpa
from alpa.torch.trainer import train_torch_module
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = torch.nn.Linear(16, 16)
self.linear2 = torch.nn.Linear(16, 16)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
x = x.reshape(x.shape[0], 2, -1)
x = x.reshape(x.shape[0], -1, 2)
x = x.reshape(x.shape[0], 16)
return x
def weight_init_func(pt_module, name_map, params, bufs):
# for k, m in pt_module.named_modules():
# if isinstance(m, torch.nn.Linear):
# params[name_map[f"{k}.weight"]] = torch.nn.init.xavier_uniform(params[name_map[f"{k}.weight"]])
# params[name_map[f"{k}.bias"]] = torch.nn.init.normal(params[name_map[f"{k}.bias"]], std=1e-6)
return params, bufs
class TorchReshapeTest(unittest.TestCase):
def setUp(self):
torch.manual_seed(123)
alpa.set_seed(123)
def test_reshape(self):
B = 64
pt_module_gen = lambda: MyModule()
dataloader = [
(torch.randn(B, 16), torch.randn(B, 16)),
(torch.randn(B, 16), torch.randn(B, 16)),
]
loss_func = lambda *args, **kwargs: torch.nn.functional.mse_loss(
*args, **kwargs)
optim_gen = torchoptim.adam(lr=1e-3)
parallel_method = alpa.ShardParallel()
train_torch_module(pt_module_gen, weight_init_func, dataloader,
loss_func, optim_gen, parallel_method)
def suite():
suite = unittest.TestSuite()
suite.addTest(TorchReshapeTest("test_reshape"))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| StarcoderdataPython |
3313945 | from urllib import request
from url_helper import HeaderHelper
import json
import gzip
import time
import math
class Comment:
def __init__(self, cid: int, floor_num: int, content: str, uid: int) -> None:
# comment id
self.cid = cid
self.floor_num = floor_num
# user id
self.uid = uid
self.content = content
def __str__(self):
return 'floor: ' + str(self.floor_num) + ' cid: ' + str(self.cid) + ' uid: ' + str(
self.uid) + '\n' + 'content: ' + self.content + '\n\n'
class Comments:
data = list()
article_id = 0
def init(self):
self.data.clear()
self.article_id = 0
def __str__(self):
res = str()
for d in self.data:
res += d.__str__()
res += 'Article ID: ' + str(self.article_id)
return res
def latest_floor(self):
if len(self.data) == 0:
return -1
return self.data[0].floor_num
def size(self):
return len(self.data)
class CommentHelper:
comments = Comments()
url_template = 'https://www.acfun.cn/rest/pc-direct/comment/listByFloor?sourceId={_aid}&sourceType=3&' \
'page={_page}&pivotCommentId=0&newPivotCommentId=0&_ts={_ts}'
__comments_js_list = list()
__is_updated = False
def __init__(self):
self.header_helper = HeaderHelper()
return
def __get_comments_json(self, aid: int, curr_page: int, ts: int = None) -> dict:
"""
获取指定页评论json
:param aid: 文章ID
:param curr_page: 当前页面号
:param ts: 时间戳,默认为当前时间
:return: 解析好的json
"""
if ts is None:
ts = int(math.floor(time.time() * 1000))
header = self.header_helper.get_comments_header(aid)
url = self.url_template.format(_aid=aid, _page=curr_page, _ts=ts)
data = None
rq = request.Request(url, data=data, headers=header)
res = request.urlopen(rq)
respond = res.read()
# 解决乱码问题
respond = gzip.decompress(respond)
result = str(respond, encoding="utf-8")
return json.loads(result)
def __get_comment_floor_in_curr_page_js(self, js: dict, index: int) -> int:
"""
获取该评论也第index个评论的楼层
:param js: 评论json
:param index: 评论index
:return: 楼层
"""
return js['commentsMap']['c' + str(js['commentIds'][index])]['floor']
def __get_comments_js_list(self, aid: int, last_floor: int = 0) -> list:
"""
获取指定评论页所有json
:param aid: 文章ID
:return: 解析好的json list
"""
self.__comments_js_list.clear()
# 首次请求确认评论总页数
js = self.__get_comments_json(aid, 1)
if js['totalCount'] > 0:
total_page_num = js['totalPage']
curr_page = js['curPage']
# 暂存所有评论js
while curr_page <= total_page_num:
js = self.__get_comments_json(aid, curr_page)
self.__comments_js_list.append(js)
# 检查last_floor是在当前评论页内
first_floor_in_curr_page = self.__get_comment_floor_in_curr_page_js(js, 0)
last_floor_in_curr_page = self.__get_comment_floor_in_curr_page_js(js, -1)
# last_floor可能被删不存在,导致不满足在[first_floor_in_curr_page, last_floor_in_curr_page]之间
if first_floor_in_curr_page < last_floor or \
first_floor_in_curr_page >= last_floor >= last_floor_in_curr_page:
break
# 每次检查总页数是否变化
total_page_num = js['totalPage']
curr_page = curr_page + 1
return self.__comments_js_list
def __get_comments_from_js(self, js: dict, aid: int, last_floor: int = 0) -> Comments:
"""
从解析好的json中获取评论
:param js: 解析好的json
:param aid: 文章ID
:param last_floor: 最后一个评论的楼层,解析终止条件
:return: 评论list
"""
dic_comments_map = js['commentsMap']
# js['commentsMap']里包含引用的楼层,js['commentIds']为该page每层楼最底层评论的cid
for cid in js['commentIds']:
dic_comment = dic_comments_map['c' + str(cid)]
# 检查是否爬取到上次楼层(该楼层可能被删)
if dic_comment['floor'] <= last_floor:
return self.comments
# 添加评论到list, cid == 0不保存
if dic_comment['cid'] == 0:
continue
self.comments.data.append(
Comment(dic_comment['cid'], dic_comment['floor'], dic_comment['content'], dic_comment['userId']))
return self.comments
def get_comments_by_aid(self, aid: int, last_floor: int = 0) -> Comments:
"""
根据文章ID获取评论,返回last_floor楼层后面的评论
:param aid: 文章ID
:param last_floor: 上次最新楼层
:return: 返回last_floor楼层后的评论list
"""
self.comments.init()
self.comments.article_id = aid
self.__get_comments_js_list(aid)
for cjl in self.__comments_js_list:
self.comments = self.__get_comments_from_js(cjl, aid, last_floor)
return self.comments
| StarcoderdataPython |
1637114 | <reponame>Nathanlauga/transparentai-ui
from transparentai import sustainable
from os.path import dirname, abspath
from ....utils.db import update_in_db, select_from_db
from ....utils.errors import get_errors
from ....utils import key_in_dict_not_empty, is_empty
from ....utils.components import clean_errors, format_str, format_float
from ....models import Project
from ....models.modules import ModuleSustainable
# ======= FORMAT MODULE FUNCTIONS ======= #
def format_module_time(form_data):
"""
"""
return format_float(form_data, key='time')
def format_module_location(form_data):
"""
"""
return format_str(form_data, key='location')
def format_module_watts(form_data):
"""
"""
return format_float(form_data, key='watts')
# ======= CONTROL MODULE FUNCTIONS ======= #
def control_module_time(form_data):
"""
"""
# TODO : create a function with not set and not valid error
errors_dict = get_errors()
if not key_in_dict_not_empty('time', form_data):
return errors_dict['SustainableTimeNotSet']
time = format_module_time(form_data)
if time is None:
return errors_dict['SustainableTimeNotValid']
if time <= 0:
return errors_dict['SustainableTimeNotPositive']
return None
def control_module_location(form_data):
"""
"""
# TODO : create a function with not set and not valid error
errors_dict = get_errors()
if not key_in_dict_not_empty('location', form_data):
return errors_dict['SustainableLocationNotSet']
location = format_module_location(form_data)
if location is None:
return errors_dict['SustainableLocationNotValid']
valid_locations = list(sustainable.get_energy_data().keys())
if location not in valid_locations:
return errors_dict['SustainableLocationNotInList']
return None
def control_module_watts(form_data):
"""
"""
# TODO : create a function with not set and not valid error
errors_dict = get_errors()
if not key_in_dict_not_empty('watts', form_data):
return errors_dict['SustainableWattsNotSet']
watts = format_module_watts(form_data)
if watts is None:
return errors_dict['SustainableWattsNotValid']
if watts <= 0:
return errors_dict['SustainableWattsNotPositive']
return None
def control_module(form_data, create=False, obj=None):
"""
"""
errors = dict()
errors['time'] = control_module_time(form_data)
errors['location'] = control_module_location(form_data)
errors['watts'] = control_module_watts(form_data)
errors = clean_errors(errors)
return errors
def format_module(form_data, create=False, obj=None):
"""
"""
data = dict()
data['time'] = format_module_time(form_data)
data['location'] = format_module_location(form_data)
data['watts'] = format_module_watts(form_data)
return data
# LOAD MODULE FUNCTIONS
def compute_co2_estimation(project):
"""
"""
module = select_from_db(ModuleSustainable, 'project_id', project.id)
update_in_db(module, {'status': 'loading'})
print('===================')
if is_empty(module.time) | is_empty(module.location) | is_empty(module.watts):
update_in_db(module, {'status': 'failed'})
return
print('===================')
co2_emited = sustainable.estimate_co2(
hours=module.time, location=module.location, watts=module.watts)
data = {'status': 'loaded', 'result': co2_emited}
print(data)
try:
res = update_in_db(module, data)
if res != 'updated':
update_in_db(module, {'status': 'failed'})
except:
update_in_db(module, {'status': 'failed'})
| StarcoderdataPython |
4814058 | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
from . import dc
from .. import __version__, __author__
class xlref(object):
def __init__(self,Workbook,Worksheet,Range):
""" xlref
Creates a XL reference handlers:
Params(3):
Workbook as string or integer -> XL Workbook name or equivalent number in .data
Workbook as string or integer -> XL Worksheet name or equivalent number in .data
Range as string -> XL R1C1- or A1- type ranges
"""
self.__reference = [[Workbook, Worksheet, Range]]
self.__type__ = 'SingleCell'
super(xlref,self).__init__()
def __add__(self,obj):
if isinstance(obj,xlref):
oobj = dc(self)
for i in obj.__reference:
oobj.__reference.append(i)
oobj.__type__ = 'MultipleCell'
return oobj
else:
print('Object type-mismatch')
def __sub__(self,obj):
if isinstance(obj,xlref):
oobj = dc(self)
for i in obj.__reference:
if i in oobj.__reference:
oobj.__reference.remove(i)
else:
pass
if len(oobj.__reference) == 1 and oobj.__type__ is not 'SingleCell':
oobj.__type__ = 'SingleCell'
else:
pass
return oobj
else:
raise Exception('Object type-mismatch.')
def __iadd__(self,obj):
if isinstance(obj,xlref):
for i in obj.__reference:
self.__reference.append(i)
self.__type__ = 'MultipleCell'
return self
else:
raise Exception('Object type-mismatch.')
def __isub__(self,obj):
if isinstance(obj,xlref):
for i in obj.__reference:
if i in self.__reference:
self.__reference.remove(i)
else:
pass
if len(self.__reference) == 1 and self.__type__ is not 'SingleCell':
self.__type__ = 'SingleCell'
else:
pass
return self
else:
raise Exception('Object type-mismatch.')
def __call__(self):
if len(self.__reference)==1:
return tuple(self.__reference[0])
else:
return tuple([tuple(i) for i in self.__reference])
| StarcoderdataPython |
115734 | <filename>model.py
# Copyright 2019 <NAME>
# Licensed under the Apache License, Version 2.0
import tensorflow as tf
import numpy as np
def gru(units):
return tf.keras.layers.GRU(units,
return_sequences=True,
return_state=True,
recurrent_activation='sigmoid',
recurrent_initializer='glorot_uniform')
class BahdanauAttention(tf.keras.Model):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, key, query):
# features(CNN_encoder output) shape: (batch_size, 64, embedding_dim)
#print("Key Shape:", key.shape)
#print("Query Shape:", query.shape)
score = tf.nn.tanh(self.W1(key) + self.W2(query))
attention_weights = tf.nn.softmax(self.V(score), axis=1)
context_vector = attention_weights*key
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
class CNN_Encoder(tf.keras.Model):
def __init__(self, embedding_dim):
super(CNN_Encoder, self).__init__()
# initial shape: (batch_size, 64, 2048)
# shape after passing through fc: (batch_size, 64, embedding_dim)
self.fc = tf.keras.layers.Dense(embedding_dim)
def call(self, x):
x = self.fc(x)
x = tf.nn.relu(x)
return x
class Sentence_Encoder(tf.keras.Model):
def __init__(self, units):
super(Sentence_Encoder, self).__init__()
self.attention = BahdanauAttention(units)
self.fc = tf.keras.layers.Dense(units)
def call(self, hidden_states, features):
# hidden_states: (batch_size, max_sentence_length, units + units)
# features: (batch_size, 64, embedding_dim)
features = tf.expand_dims(features, 1)
features = tf.reshape(features, (features.shape[0], features.shape[1], -1))
# context_vector: (batch_size, units + units)
# word_weights: (batch_size, max_sentence_length)
context_vector, word_weights = self.attention(hidden_states, features)
# encoded_sentence: (batch_size, units)
encoded_sentence = self.fc(context_vector)
return encoded_sentence, word_weights
class Paragraph_Encoder(tf.keras.Model):
def __init__(self, units):
super(Paragraph_Encoder, self).__init__()
self.attention = BahdanauAttention(units)
def call(self, encoded_sentences, features):
# encoded_sentences: (batch_size, MAX_PARAGRAPH_LENGTH, units)
# features: (batch_size, 64, embedding_dim)
features = tf.expand_dims(features, 1)
features = tf.reshape(features, (features.shape[0], features.shape[1], -1))
# encoded_paragraph: (batch_size, units)
# sentence_weights: (batch_size, MAX_PARAGRAPH_LENGTH)
encoded_paragraph, sentence_weights = self.attention(encoded_sentences, features)
return encoded_paragraph, sentence_weights
class Word_Decoder(tf.keras.Model):
def __init__(self, embedding_dim, units, vocab_size):
super(Word_Decoder, self).__init__()
self.attention = BahdanauAttention(units)
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = gru(units)
self.fc1 = tf.keras.layers.Dense(units)
self.fc2 = tf.keras.layers.Dense(vocab_size)
def call(self, x, features, prev_sentence, hidden):
# x: (batch_size, 1)
# features: (batch_size, 64, embedding_dim)
# prev_sentence: (batch_size, units)
# hidden: (batch_size, units)
# visual_context: (batch_size, embedding)
# visual_weights: (batch_size, 64)
hidden_with_time_axis = tf.expand_dims(hidden, 1)
visual_context, visual_weights = self.attention(features, hidden_with_time_axis)
# x shape after passing through embedding: (batch_size, 1, embedding_dim)
x = self.embedding(x)
# x shape after concatenation:(batch_size, 1, embedding_dim + embedding_dim + units)
x = tf.concat([tf.expand_dims(visual_context, 1), tf.expand_dims(prev_sentence, 1), x], axis=-1)
# passing the concatenated vector to the GRU
# output: (batch_size, 1, units)
output, state = self.gru(x)
# shape: (batch_size, 1, units)
x = self.fc1(output)
# x shape: (batch_size * 1, units)
x = tf.reshape(x, (-1, x.shape[2]))
# output shape: (batch_size * 1, vocab_size)
x = self.fc2(x)
return x, state, visual_weights
class Trainer():
def __init__(self, tokenizer, embedding_dim, units):
self.tokenizer = tokenizer
self.units = units
self.image_encoder = CNN_Encoder(embedding_dim)
self.sentence_encoder = Sentence_Encoder(units)
self.paragraph_encoder = Paragraph_Encoder(units)
self.fwd_decoder = Word_Decoder(embedding_dim, units, len(tokenizer.word_index))
self.bwd_decoder = Word_Decoder(embedding_dim, units, len(tokenizer.word_index))
def loss_function(self, real, pred):
mask = 1 - np.equal(real, 0)
loss_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=real, logits=pred) * mask
return tf.reduce_mean(loss_)
def tensors_are_same(self, a, b):
r = str(tf.reduce_all(tf.equal(a, b))) # In a perfect world, I would just compare tf.reduce_all(tf.equal(a, b)).numpy()
return r[10] == 'T'
def train_word_decoder(self, batch_size, loss, features, findings, i, \
prev_sentence, fwd_hidden, bwd_hidden):
is_training_impressions = (i >= int(findings.shape[1]/2))
fwd_input = tf.expand_dims([self.tokenizer.word_index['<start>']] * batch_size, 1)
bwd_input = tf.expand_dims([self.tokenizer.word_index['<pad>']] * batch_size, 1)
hidden_states = tf.zeros((batch_size, 1, self.units + self.units)) # concatenated fwd and bwd hidden states
for j in range(findings.shape[2]): # generate each word (each sentence has a fixed # of words)
print("j", j)
predictions, fwd_hidden, _ = self.fwd_decoder(fwd_input, features, prev_sentence, fwd_hidden)
loss += self.loss_function(findings[:, i, j], predictions)
fwd_input = tf.expand_dims(findings[:, i, j], 1)
predictions, bwd_hidden, _ = self.bwd_decoder(bwd_input, features, prev_sentence, bwd_hidden)
loss += self.loss_function(findings[:, i, -(j+1)], predictions)
bwd_input = tf.expand_dims(findings[:, i, -(j+1)], 1)
# Concat the bwd anf fwd hidden states
# (batch_size, 1, units + units)
if not is_training_impressions is True:
hidden = tf.concat([tf.expand_dims(fwd_hidden, 1), tf.expand_dims(bwd_hidden, 1)], axis=-1)
if self.tensors_are_same(hidden_states, tf.zeros((batch_size, 1, self.units + self.units))) is True:
hidden_states = hidden
else:
hidden_states = tf.concat([hidden_states, hidden], axis = 1)
if not is_training_impressions is True:
prev_sentence, _ = self.sentence_encoder(hidden_states, features)
print(hidden_states.shape, prev_sentence.shape)
return loss, prev_sentence, fwd_hidden, bwd_hidden
def train_fn(self, batch_size, img_tensor, findings):
loss = 0
with tf.GradientTape() as tape:
features = self.image_encoder(img_tensor)
encoded_sentences = tf.zeros((batch_size, 1, self.units))
prev_sentence = tf.zeros((batch_size, self.units))
fwd_hidden = tf.zeros((batch_size, self.units))
bwd_hidden = tf.zeros((batch_size, self.units))
# Generate Findings
for i in range(int(findings.shape[1]/2)): # for each sentence in "findings" (each batch has a fixed # of sentences)
print("-------------------------------------i:", i)
loss, prev_sentence, fwd_hidden, bwd_hidden = self.train_word_decoder(batch_size, loss, features, findings, i, \
prev_sentence, fwd_hidden, bwd_hidden)
if self.tensors_are_same(encoded_sentences, tf.zeros((batch_size, 1, self.units))) is True:
encoded_sentences = tf.expand_dims(prev_sentence, 1)
else:
encoded_sentences = tf.concat([encoded_sentences, tf.expand_dims(prev_sentence, 1)], axis = 1)
encoded_paragraph, _ = self.paragraph_encoder(encoded_sentences, features)
# Generate Impressions
prev_sentence = encoded_paragraph
fwd_hidden = tf.zeros((batch_size, self.units))
bwd_hidden = tf.zeros((batch_size, self.units))
for i in range(int(findings.shape[1]/2), findings.shape[1]): # for each sentence in "impressions" (each batch has a fixed # of sentences)
print("-------------------------------------i:", i)
loss, _, fwd_hidden, bwd_hidden = self.train_word_decoder(batch_size, loss, features, findings, i, \
prev_sentence, fwd_hidden, bwd_hidden)
# Outside of "With tf.GradientTape()"
variables = self.image_encoder.variables + self.sentence_encoder.variables + self.paragraph_encoder.variables + \
self.fwd_decoder.variables + self.bwd_decoder.variables
gradients = tape.gradient(loss, variables)
return loss, gradients, variables | StarcoderdataPython |
1750330 | <filename>exoral/admin.py
from django.contrib import admin
from .models import (
Fach,
Dozent,
Testat,
Frage,
)
class FachAdmin(admin.ModelAdmin):
model = Fach
list_display = ('name', 'admin_list_dozent')
admin.site.register(Fach, FachAdmin)
class DozentAdmin(admin.ModelAdmin):
model = Dozent
list_display = ('full_name', 'aktiv', 'fach')
search_fields = ['vorname', 'nachname', 'fach__name']
admin.site.register(Dozent, DozentAdmin)
class TestatAdmin(admin.ModelAdmin):
model = Testat
list_display = (
'name',
'active',
'admin_list_fach',
'admin_list_studienabschnitt',
'admin_list_studiengang',
)
filter_horizontal = ('fach', 'studiengang', 'studienabschnitt')
search_fields = ['name']
admin.site.register(Testat, TestatAdmin)
class FrageAdmin(admin.ModelAdmin):
model = Frage
list_display = ('__str__', 'testat', 'pruefer', 'punkte', 'datum')
filter_horizontal = ('abgestimmte_benutzer',)
search_fields = [
'text',
'testat__name',
'pruefer__nachname',
'pruefer__fach__name',
]
admin.site.register(Frage, FrageAdmin)
| StarcoderdataPython |
109122 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
print('y1')
def main(filename):
raw_data = load_data(filename)
# TODO - Clean data
print(raw_data[0])
return raw_data
def load_data(filename):
with open(filename, 'r') as file:
raw = file.read()
raw_data = frames = raw.split('\n')
return raw_data
if __name__ == "__main__":
print(sys.argv)
args = sys.argv[1:]
import os
filename = os.path.join('sonic_pi_face', 'data', args[0])
main(filename)
| StarcoderdataPython |
144510 | import sys
import os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'textrank'))
from summa.preprocessing.textcleaner import get_sentences # Uses textrank's method for extracting sentences.
BASELINE_WORD_COUNT = 100
def baseline(text):
""" Creates a baseline summary to be used as reference.
The baseline is set to an extract of the first 100 words.
"""
sentences = list(get_sentences(text))
baseline_summary = ""
word_count = 0
for sentence in sentences:
for word in sentence.split():
baseline_summary += word + " "
word_count += 1
if word_count == BASELINE_WORD_COUNT:
return baseline_summary
baseline_summary += "\n"
return baseline_summary | StarcoderdataPython |
3338850 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/12/14 15:08
# @Author : glacier
# @Site :
# @File : tianmao.py
# @Software: PyCharm Edu
import requests
import re
if __name__ == '__main__':
urls = []
for i in range(400):
urls.append("https://rate.tmall.com/list_detail_rate.htm?itemId=544539102631&spuId=719737009&sellerId=3077671836&order=100¤tPage=1"+str(i)+"")
# 构建字段容器
nickname = []
ratedate = []
color = []
ratecontent = []
# 循环抓取数据
for url in urls:
content = requests.get(url).text
# 借助正则表达式使用findall进行匹配查询
nickname.extend(re.findall('"displayUserNick":"(.*?)"',content))
color.extend(re.findall(re.compile('"颜色分类:(.*?)"'),content))
ratecontent.extend(re.findall(re.compile('"rateContent":"(.*?)","rateDate"'),content))
ratedate.extend(re.findall(re.compile('"rateDate":"(.*?)","reply"'),content))
# 写入数据
file =open('time.txt','w',encoding='utf-8')
for i in list(range(0,len(nickname))):
print("正在写入第 %s 条数据"% i)
file.write(''.join(ratedate[i])+'\n')
file.close()
print("爬取完成")
| StarcoderdataPython |
4806157 | import pygame, sys, pymunk
def create_circle(space, pos):
body = pymunk.Body(1,100,body_type = pymunk.Body.DYNAMIC)
body.position = pos
shape = pymunk.Circle(body,80)
space.add(body,shape)
return shape
def draw_circles(circles):
for circle in circles:
pos_x = int(circle.body.position.x)
pos_y = int(circle.body.position.y)
pygame.draw.circle(screen,(153, 170, 181),(pos_x,pos_y),80)
def static_ball(space, pos):
body = pymunk.Body(body_type= pymunk.Body.STATIC)
body.position = pos
shape = pymunk.Circle(body,50)
space.add(body,shape)
return shape
def draw_static_ball(balls):
for ball in balls:
pos_x = int(ball.body.position.x)
pos_y = int(ball.body.position.y)
pygame.draw.circle(screen, (153, 170, 181), (pos_x, pos_y), 50)
pygame.init()
screen = pygame.display.set_mode((1000,1000))
clock = pygame.time.Clock()
space = pymunk.Space()
space.gravity = (0,500)
circles = []
balls = []
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
circles.append(create_circle(space,event.pos))
else:
balls.append(static_ball(space, event.pos))
screen.fill((44,47,51))
draw_circles(circles)
draw_static_ball(balls)
space.step(1/50)
pygame.display.update()
clock.tick(120)
| StarcoderdataPython |
1742115 | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.opset1.ops import absolute
from openvino.opset1.ops import absolute as abs
from openvino.opset1.ops import acos
from openvino.opset4.ops import acosh
from openvino.opset1.ops import add
from openvino.opset1.ops import asin
from openvino.opset4.ops import asinh
from openvino.opset3.ops import assign
from openvino.opset1.ops import atan
from openvino.opset4.ops import atanh
from openvino.opset1.ops import avg_pool
from openvino.opset5.ops import batch_norm_inference
from openvino.opset2.ops import batch_to_space
from openvino.opset1.ops import binary_convolution
from openvino.opset3.ops import broadcast
from openvino.opset3.ops import bucketize
from openvino.opset1.ops import ceiling
from openvino.opset1.ops import ceiling as ceil
from openvino.opset1.ops import clamp
from openvino.opset1.ops import concat
from openvino.opset1.ops import constant
from openvino.opset1.ops import convert
from openvino.opset1.ops import convert_like
from openvino.opset1.ops import convolution
from openvino.opset1.ops import convolution_backprop_data
from openvino.opset1.ops import cos
from openvino.opset1.ops import cosh
from openvino.opset1.ops import ctc_greedy_decoder
from openvino.opset4.ops import ctc_loss
from openvino.opset3.ops import cum_sum
from openvino.opset3.ops import cum_sum as cumsum
from openvino.opset1.ops import deformable_convolution
from openvino.opset1.ops import deformable_psroi_pooling
from openvino.opset1.ops import depth_to_space
from openvino.opset1.ops import detection_output
from openvino.opset1.ops import divide
from openvino.opset1.ops import elu
from openvino.opset3.ops import embedding_bag_offsets_sum
from openvino.opset3.ops import embedding_bag_packed_sum
from openvino.opset3.ops import embedding_segments_sum
from openvino.opset3.ops import extract_image_patches
from openvino.opset1.ops import equal
from openvino.opset1.ops import erf
from openvino.opset1.ops import exp
from openvino.opset1.ops import fake_quantize
from openvino.opset1.ops import floor
from openvino.opset1.ops import floor_mod
from openvino.opset1.ops import gather
from openvino.opset5.ops import gather_nd
from openvino.opset1.ops import gather_tree
from openvino.opset2.ops import gelu
from openvino.opset1.ops import greater
from openvino.opset1.ops import greater_equal
from openvino.opset1.ops import grn
from openvino.opset1.ops import group_convolution
from openvino.opset1.ops import group_convolution_backprop_data
from openvino.opset3.ops import gru_cell
from openvino.opset5.ops import gru_sequence
from openvino.opset1.ops import hard_sigmoid
from openvino.opset5.ops import hsigmoid
from openvino.opset4.ops import hswish
from openvino.opset1.ops import interpolate
from openvino.opset1.ops import less
from openvino.opset1.ops import less_equal
from openvino.opset1.ops import log
from openvino.opset1.ops import logical_and
from openvino.opset1.ops import logical_not
from openvino.opset1.ops import logical_or
from openvino.opset1.ops import logical_xor
from openvino.opset5.ops import log_softmax
from openvino.opset5.ops import loop
from openvino.opset1.ops import lrn
from openvino.opset4.ops import lstm_cell
from openvino.opset1.ops import lstm_sequence
from openvino.opset1.ops import matmul
from openvino.opset1.ops import max_pool
from openvino.opset1.ops import maximum
from openvino.opset1.ops import minimum
from openvino.opset4.ops import mish
from openvino.opset1.ops import mod
from openvino.opset1.ops import multiply
from openvino.opset2.ops import mvn
from openvino.opset1.ops import negative
from openvino.opset5.ops import non_max_suppression
from openvino.opset3.ops import non_zero
from openvino.opset1.ops import normalize_l2
from openvino.opset1.ops import not_equal
from openvino.opset1.ops import one_hot
from openvino.opset1.ops import pad
from openvino.opset1.ops import parameter
from openvino.opset1.ops import power
from openvino.opset1.ops import prelu
from openvino.opset1.ops import prior_box
from openvino.opset1.ops import prior_box_clustered
from openvino.opset1.ops import psroi_pooling
from openvino.opset4.ops import proposal
from openvino.opset1.ops import range
from openvino.opset3.ops import read_value
from openvino.opset4.ops import reduce_l1
from openvino.opset4.ops import reduce_l2
from openvino.opset1.ops import reduce_logical_and
from openvino.opset1.ops import reduce_logical_or
from openvino.opset1.ops import reduce_max
from openvino.opset1.ops import reduce_mean
from openvino.opset1.ops import reduce_min
from openvino.opset1.ops import reduce_prod
from openvino.opset1.ops import reduce_sum
from openvino.opset1.ops import region_yolo
from openvino.opset2.ops import reorg_yolo
from openvino.opset1.ops import relu
from openvino.opset1.ops import reshape
from openvino.opset1.ops import result
from openvino.opset1.ops import reverse_sequence
from openvino.opset3.ops import rnn_cell
from openvino.opset5.ops import rnn_sequence
from openvino.opset3.ops import roi_align
from openvino.opset2.ops import roi_pooling
from openvino.opset5.ops import round
from openvino.opset3.ops import scatter_elements_update
from openvino.opset3.ops import scatter_update
from openvino.opset1.ops import select
from openvino.opset1.ops import selu
from openvino.opset3.ops import shape_of
from openvino.opset3.ops import shuffle_channels
from openvino.opset1.ops import sigmoid
from openvino.opset1.ops import sign
from openvino.opset1.ops import sin
from openvino.opset1.ops import sinh
from openvino.opset1.ops import softmax
from openvino.opset4.ops import softplus
from openvino.opset2.ops import space_to_batch
from openvino.opset1.ops import space_to_depth
from openvino.opset1.ops import split
from openvino.opset1.ops import sqrt
from openvino.opset1.ops import squared_difference
from openvino.opset1.ops import squeeze
from openvino.opset1.ops import strided_slice
from openvino.opset1.ops import subtract
from openvino.opset4.ops import swish
from openvino.opset1.ops import tan
from openvino.opset1.ops import tanh
from openvino.opset1.ops import tensor_iterator
from openvino.opset1.ops import tile
from openvino.opset3.ops import topk
from openvino.opset1.ops import transpose
from openvino.opset1.ops import unsqueeze
from openvino.opset1.ops import variadic_split
| StarcoderdataPython |
3277184 | import io
from abc import ABC
from typing import Generator, Any, TypeVar, Iterable, Iterator, Optional
from ijson import items as ijson_items
from requests import Response
class AbcRest(ABC):
__slots__ = ()
_T = TypeVar('_T')
def debug_iter(iterable: Iterable[_T], file_name: str) -> Generator[_T, None, None]:
with open(file_name, 'wb') as f:
for i in iterable:
f.write(i)
yield i
class AbcData(ABC):
__slots__ = ()
@staticmethod
def items_generator(response: Response) -> Generator[Any, None, None]:
with response:
res_iter = response.iter_content(chunk_size=None)
# res_iter = debug_iter(res_iter, 'http_response_bytes.json')
readable = ReadableIterator(res_iter)
item_iter = ijson_items(readable, prefix='items.item')
yield from item_iter
class AbcModel(ABC):
__slots__ = ()
class ReadableIterator(io.RawIOBase):
# https://github.com/j-planet/Kaggle/blob/master/ValuedShoppers/IterStreamer.py
def __init__(self, iterator: Iterator[bytes]) -> None:
super().__init__()
self.iterator = iterator
self.partial_chunk = b''
def readable(self) -> bool:
return True
def readinto(self, buffer: bytearray) -> Optional[int]:
buffer_size = len(buffer)
chunk = bytearray(self.partial_chunk)
while len(chunk) < buffer_size:
try:
b = next(self.iterator)
chunk += b
except StopIteration:
stopped = True
break
else:
stopped = False
self.partial_chunk = chunk[buffer_size:]
buffer[::] = chunk[:buffer_size]
read = len(buffer)
if read == 0 and stopped:
self.close()
return read
| StarcoderdataPython |
3395747 | # -*- coding: utf-8 -*-
import pili
# 替换成自己 Qiniu 账号的 AccessKey
access_key = "..."
# 替换成自己 Qiniu 账号的 SecretKey
secret_key = "..."
domain = '...'
hub_name = '...'
stream_title = '...'
expire = 3600
mac = pili.Mac(access_key, secret_key)
client = pili.Client(mac)
hub = client.hub(hub_name)
stream = hub.get("...")
print pili.rtmp_publish_url(domain, hub_name, stream_title, mac, expire)
publishKey = ''
print pili.rtmp_publish_url_v1(domain, hub_name, stream_title, expire, publishKey)
print pili.rtmp_play_url(domain, hub_name, stream_title)
print pili.hls_play_url(domain, hub_name, stream_title)
print pili.hdl_play_url(domain, hub_name, stream_title)
print pili.snapshot_play_url(domain, hub_name, stream_title)
| StarcoderdataPython |
1798649 | from collections import OrderedDict
from itertools import product
from sympy import Basic
from sympy.core.singleton import Singleton
from sympy.core.compatibility import with_metaclass
from sympy.core.containers import Tuple
from sympy import AtomicExpr
from sympde.topology import ScalarTestFunction, VectorTestFunction
from sympde.topology import (dx1, dx2, dx3)
from sympde.topology import Mapping
from sympde.topology import SymbolicDeterminant
from sympde.topology import SymbolicInverseDeterminant
from sympde.topology import SymbolicWeightedVolume
from sympde.topology import IdentityMapping
#==============================================================================
# TODO move it
import string
import random
def random_string( n ):
chars = string.ascii_lowercase + string.digits
selector = random.SystemRandom()
return ''.join( selector.choice( chars ) for _ in range( n ) )
#==============================================================================
class ArityType(with_metaclass(Singleton, Basic)):
"""Base class representing a form type: bilinear/linear/functional"""
pass
class BilinearArity(ArityType):
pass
class LinearArity(ArityType):
pass
class FunctionalArity(ArityType):
pass
#==============================================================================
class IndexNode(with_metaclass(Singleton, Basic)):
"""Base class representing one index of an iterator"""
pass
class IndexElement(IndexNode):
pass
class IndexQuadrature(IndexNode):
pass
class IndexDof(IndexNode):
pass
class IndexDofTrial(IndexNode):
pass
class IndexDofTest(IndexNode):
pass
class IndexDerivative(IndexNode):
pass
index_element = IndexElement()
index_quad = IndexQuadrature()
index_dof = IndexDof()
index_dof_trial = IndexDofTrial()
index_dof_test = IndexDofTest()
index_deriv = IndexDerivative()
#==============================================================================
class LengthNode(with_metaclass(Singleton, Basic)):
"""Base class representing one length of an iterator"""
pass
class LengthElement(LengthNode):
pass
class LengthQuadrature(LengthNode):
pass
class LengthDof(LengthNode):
pass
class LengthDofTrial(LengthNode):
pass
class LengthDofTest(LengthNode):
pass
length_element = LengthElement()
length_quad = LengthQuadrature()
length_dof = LengthDof()
length_dof_trial = LengthDofTrial()
length_dof_test = LengthDofTest()
#==============================================================================
class RankNode(with_metaclass(Singleton, Basic)):
"""Base class representing a rank of an iterator"""
pass
class RankDimension(RankNode):
pass
rank_dim = RankDimension()
#==============================================================================
class BaseNode(Basic):
"""
"""
pass
#==============================================================================
class Element(BaseNode):
"""
"""
pass
#==============================================================================
class Pattern(Tuple):
"""
"""
pass
#==============================================================================
class IteratorBase(BaseNode):
"""
"""
def __new__(cls, target, dummies=None):
if not dummies is None:
if not isinstance(dummies, (list, tuple, Tuple)):
dummies = [dummies]
dummies = Tuple(*dummies)
return Basic.__new__(cls, target, dummies)
@property
def target(self):
return self._args[0]
@property
def dummies(self):
return self._args[1]
#==============================================================================
class TensorIterator(IteratorBase):
pass
#==============================================================================
class ProductIterator(IteratorBase):
pass
#==============================================================================
# TODO dummies should not be None
class GeneratorBase(BaseNode):
"""
"""
def __new__(cls, target, dummies):
if not isinstance(dummies, (list, tuple, Tuple)):
dummies = [dummies]
dummies = Tuple(*dummies)
if not isinstance(target, (ArrayNode, MatrixNode)):
raise TypeError('expecting an ArrayNode')
return Basic.__new__(cls, target, dummies)
@property
def target(self):
return self._args[0]
@property
def dummies(self):
return self._args[1]
#==============================================================================
class TensorGenerator(GeneratorBase):
pass
#==============================================================================
class ProductGenerator(GeneratorBase):
pass
#==============================================================================
class Grid(BaseNode):
"""
"""
pass
#==============================================================================
class ScalarNode(BaseNode, AtomicExpr):
"""
"""
pass
#==============================================================================
class ArrayNode(BaseNode, AtomicExpr):
"""
"""
_rank = None
_positions = None
_free_positions = None
@property
def rank(self):
return self._rank
@property
def positions(self):
return self._positions
@property
def free_positions(self):
if self._free_positions is None:
return list(self.positions.keys())
else:
return self._free_positions
def pattern(self, args=None):
if args is None:
args = self.free_positions
positions = {}
for a in args:
positions[a] = self.positions[a]
args = [None]*self.rank
for k,v in positions.items():
args[v] = k
return Pattern(*args)
#==============================================================================
class MatrixNode(BaseNode, AtomicExpr):
"""
"""
_rank = None
@property
def rank(self):
return self._rank
def pattern(self, positions):
raise NotImplementedError('TODO')
#==============================================================================
class GlobalTensorQuadrature(ArrayNode):
"""
"""
_rank = 2
_positions = {index_element: 0, index_quad: 1}
_free_positions = [index_element]
#==============================================================================
class LocalTensorQuadrature(ArrayNode):
# TODO add set_positions
"""
"""
_rank = 1
_positions = {index_quad: 0}
#==============================================================================
class TensorQuadrature(ScalarNode):
"""
"""
pass
#==============================================================================
class MatrixQuadrature(MatrixNode):
"""
"""
_rank = rank_dim
def __new__(cls, target):
# TODO check target
return Basic.__new__(cls, target)
@property
def target(self):
return self._args[0]
#==============================================================================
class WeightedVolumeQuadrature(ScalarNode):
"""
"""
pass
#==============================================================================
class GlobalTensorQuadratureBasis(ArrayNode):
"""
"""
_rank = 4
_positions = {index_quad: 3, index_deriv: 2, index_dof: 1, index_element: 0}
_free_positions = [index_element]
def __new__(cls, target):
if not isinstance(target, (ScalarTestFunction, VectorTestFunction)):
raise TypeError('Expecting a scalar/vector test function')
return Basic.__new__(cls, target)
@property
def target(self):
return self._args[0]
#==============================================================================
class LocalTensorQuadratureBasis(ArrayNode):
"""
"""
_rank = 3
_positions = {index_quad: 2, index_deriv: 1, index_dof: 0}
_free_positions = [index_dof]
def __new__(cls, target):
if not isinstance(target, (ScalarTestFunction, VectorTestFunction)):
raise TypeError('Expecting a scalar/vector test function')
return Basic.__new__(cls, target)
@property
def target(self):
return self._args[0]
#==============================================================================
class TensorQuadratureBasis(ArrayNode):
"""
"""
_rank = 2
_positions = {index_quad: 1, index_deriv: 0}
_free_positions = [index_quad]
def __new__(cls, target):
if not isinstance(target, (ScalarTestFunction, VectorTestFunction)):
raise TypeError('Expecting a scalar/vector test function')
return Basic.__new__(cls, target)
@property
def target(self):
return self._args[0]
#==============================================================================
class CoefficientBasis(ScalarNode):
"""
"""
def __new__(cls, target):
if not isinstance(target, (ScalarTestFunction, VectorTestFunction)):
raise TypeError('Expecting a scalar/vector test function')
return Basic.__new__(cls, target)
@property
def target(self):
return self._args[0]
#==============================================================================
class TensorBasis(CoefficientBasis):
pass
#==============================================================================
class GlobalTensorQuadratureTestBasis(GlobalTensorQuadratureBasis):
_positions = {index_quad: 3, index_deriv: 2, index_dof_test: 1, index_element: 0}
#==============================================================================
class LocalTensorQuadratureTestBasis(LocalTensorQuadratureBasis):
_positions = {index_quad: 2, index_deriv: 1, index_dof_test: 0}
_free_positions = [index_dof_test]
#==============================================================================
class TensorQuadratureTestBasis(TensorQuadratureBasis):
pass
#==============================================================================
class TensorTestBasis(TensorBasis):
pass
#==============================================================================
class GlobalTensorQuadratureTrialBasis(GlobalTensorQuadratureBasis):
_positions = {index_quad: 3, index_deriv: 2, index_dof_trial: 1, index_element: 0}
#==============================================================================
class LocalTensorQuadratureTrialBasis(LocalTensorQuadratureBasis):
_positions = {index_quad: 2, index_deriv: 1, index_dof_trial: 0}
_free_positions = [index_dof_trial]
#==============================================================================
class TensorQuadratureTrialBasis(TensorQuadratureBasis):
pass
#==============================================================================
class TensorTrialBasis(TensorBasis):
pass
#==============================================================================
class MatrixLocalBasis(MatrixNode):
"""
used to describe local dof over an element
"""
_rank = rank_dim
def __new__(cls, target):
# TODO check target
return Basic.__new__(cls, target)
@property
def target(self):
return self._args[0]
#==============================================================================
class StencilMatrixLocalBasis(MatrixNode):
"""
used to describe local dof over an element as a stencil matrix
"""
def __new__(cls, pads):
if not isinstance(pads, (list, tuple, Tuple)):
raise TypeError('Expecting an iterable')
pads = Tuple(*pads)
rank = 2*len(pads)
tag = random_string( 6 )
return Basic.__new__(cls, pads, rank, tag)
@property
def pads(self):
return self._args[0]
@property
def rank(self):
return self._args[1]
@property
def tag(self):
return self._args[2]
#==============================================================================
class StencilVectorLocalBasis(MatrixNode):
"""
used to describe local dof over an element as a stencil vector
"""
def __new__(cls, pads):
if not isinstance(pads, (list, tuple, Tuple)):
raise TypeError('Expecting an iterable')
pads = Tuple(*pads)
rank = len(pads)
tag = random_string( 6 )
return Basic.__new__(cls, pads, rank, tag)
@property
def pads(self):
return self._args[0]
@property
def rank(self):
return self._args[1]
@property
def tag(self):
return self._args[2]
#==============================================================================
class StencilMatrixGlobalBasis(MatrixNode):
"""
used to describe local dof over an element as a stencil matrix
"""
def __new__(cls, pads):
if not isinstance(pads, (list, tuple, Tuple)):
raise TypeError('Expecting an iterable')
pads = Tuple(*pads)
rank = 2*len(pads)
tag = random_string( 6 )
return Basic.__new__(cls, pads, rank, tag)
@property
def pads(self):
return self._args[0]
@property
def rank(self):
return self._args[1]
@property
def tag(self):
return self._args[2]
#==============================================================================
class StencilVectorGlobalBasis(MatrixNode):
"""
used to describe local dof over an element as a stencil vector
"""
def __new__(cls, pads):
if not isinstance(pads, (list, tuple, Tuple)):
raise TypeError('Expecting an iterable')
pads = Tuple(*pads)
rank = len(pads)
tag = random_string( 6 )
return Basic.__new__(cls, pads, rank, tag)
@property
def pads(self):
return self._args[0]
@property
def rank(self):
return self._args[1]
@property
def tag(self):
return self._args[2]
#==============================================================================
class GlobalSpan(ArrayNode):
"""
"""
_rank = 1
_positions = {index_element: 0}
def __new__(cls, target):
if not isinstance(target, (ScalarTestFunction, VectorTestFunction)):
raise TypeError('Expecting a scalar/vector test function')
return Basic.__new__(cls, target)
@property
def target(self):
return self._args[0]
#==============================================================================
class Span(ScalarNode):
"""
"""
def __new__(cls, target=None):
if not( target is None ):
if not isinstance(target, (ScalarTestFunction, VectorTestFunction)):
raise TypeError('Expecting a scalar/vector test function')
return Basic.__new__(cls, target)
@property
def target(self):
return self._args[0]
#==============================================================================
class Evaluation(BaseNode):
"""
"""
pass
#==============================================================================
class FieldEvaluation(Evaluation):
"""
"""
pass
#==============================================================================
class MappingEvaluation(Evaluation):
"""
"""
pass
#==============================================================================
class ComputeNode(Basic):
"""
"""
def __new__(cls, expr):
return Basic.__new__(cls, expr)
@property
def expr(self):
return self._args[0]
#==============================================================================
class ComputePhysical(ComputeNode):
"""
"""
pass
#==============================================================================
class ComputePhysicalBasis(ComputePhysical):
"""
"""
pass
#==============================================================================
class ComputeKernelExpr(ComputeNode):
"""
"""
pass
#==============================================================================
class ComputeLogical(ComputeNode):
"""
"""
pass
#==============================================================================
class ComputeLogicalBasis(ComputeLogical):
"""
"""
pass
#==============================================================================
class Reduction(Basic):
"""
"""
def __new__(cls, op, expr, lhs=None):
# TODO add verification on op = '-', '+', '*', '/'
return Basic.__new__(cls, op, expr, lhs)
@property
def op(self):
return self._args[0]
@property
def expr(self):
return self._args[1]
@property
def lhs(self):
return self._args[2]
#==============================================================================
class Reduce(Basic):
"""
"""
def __new__(cls, op, rhs, lhs, loop):
# TODO add verification on op = '-', '+', '*', '/'
if not isinstance(loop, Loop):
raise TypeError('Expecting a Loop')
return Basic.__new__(cls, op, rhs, lhs, loop)
@property
def op(self):
return self._args[0]
@property
def rhs(self):
return self._args[1]
@property
def lhs(self):
return self._args[2]
@property
def loop(self):
return self._args[3]
#==============================================================================
class Reset(Basic):
"""
"""
def __new__(cls, expr):
return Basic.__new__(cls, expr)
@property
def expr(self):
return self._args[0]
#==============================================================================
class ElementOf(Basic):
"""
"""
def __new__(cls, target):
return Basic.__new__(cls, target)
@property
def target(self):
return self._args[0]
#==============================================================================
class ExprNode(Basic):
"""
"""
pass
#==============================================================================
class AtomicNode(ExprNode, AtomicExpr):
"""
"""
@property
def expr(self):
return self._args[0]
#==============================================================================
class ValueNode(ExprNode):
"""
"""
def __new__(cls, expr):
return Basic.__new__(cls, expr)
@property
def expr(self):
return self._args[0]
#==============================================================================
class PhysicalValueNode(ValueNode):
"""
"""
pass
#==============================================================================
class LogicalValueNode(ValueNode):
"""
"""
pass
#==============================================================================
class PhysicalBasisValue(PhysicalValueNode):
"""
"""
pass
#==============================================================================
class LogicalBasisValue(LogicalValueNode):
"""
"""
pass
#==============================================================================
class PhysicalGeometryValue(PhysicalValueNode):
"""
"""
pass
#==============================================================================
class LogicalGeometryValue(LogicalValueNode):
"""
"""
pass
#==============================================================================
class BasisAtom(AtomicNode):
"""
"""
def __new__(cls, expr):
ls = list(expr.atoms(ScalarTestFunction))
ls += list(expr.atoms(VectorTestFunction))
if not(len(ls) == 1):
print(expr, type(expr))
print(ls)
raise ValueError('Expecting an expression with one test function')
u = ls[0]
obj = Basic.__new__(cls, expr)
obj._atom = u
return obj
@property
def expr(self):
return self._args[0]
@property
def atom(self):
return self._atom
#==============================================================================
class GeometryAtom(AtomicNode):
"""
"""
def __new__(cls, expr):
ls = list(expr.atoms(Mapping))
if not(len(ls) == 1):
print(expr, type(expr))
print(ls)
raise ValueError('Expecting an expression with one mapping')
# TODO
u = ls[0]
obj = Basic.__new__(cls, expr)
obj._atom = u
return obj
@property
def expr(self):
return self._args[0]
@property
def atom(self):
return self._atom
#==============================================================================
class GeometryExpr(Basic):
"""
"""
def __new__(cls, expr):
# TODO assert on expr
atom = GeometryAtom(expr)
expr = MatrixQuadrature(expr)
return Basic.__new__(cls, atom, expr)
@property
def atom(self):
return self._args[0]
@property
def expr(self):
return self._args[1]
#==============================================================================
class Loop(BaseNode):
"""
class to describe a loop of an iterator over a generator.
"""
def __new__(cls, iterable, index, stmts=None):
# ...
if not( isinstance(iterable, (list, tuple, Tuple)) ):
iterable = [iterable]
iterable = Tuple(*iterable)
# ...
# ... replace GeometryExpressions by a list of expressions
others = [i for i in iterable if not isinstance(i, GeometryExpressions)]
geos = [i.arguments for i in iterable if isinstance(i, GeometryExpressions)]
with_geo = False # TODO remove
if len(geos) == 1:
geos = list(geos[0])
elif len(geos) > 1:
raise NotImplementedError('TODO')
iterable = others + geos
iterable = Tuple(*iterable)
# ...
# ...
if not( isinstance(index, IndexNode) ):
print(type(index), index)
raise TypeError('Expecting an index node')
# ...
# ... TODO - add assert w.r.t index type
# - this should be splitted/moved somewhere
iterator = []
generator = []
for a in iterable:
i,g = construct_itergener(a, index)
iterator.append(i)
generator.append(g)
# ...
# ...
iterator = Tuple(*iterator)
generator = Tuple(*generator)
# ...
# ...
if stmts is None:
stmts = []
elif not isinstance(stmts, (tuple, list, Tuple)):
stmts = [stmts]
stmts = Tuple(*stmts)
# ...
obj = Basic.__new__(cls, iterable, index, stmts)
obj._iterator = iterator
obj._generator = generator
return obj
@property
def iterable(self):
return self._args[0]
@property
def index(self):
return self._args[1]
@property
def stmts(self):
return self._args[2]
@property
def iterator(self):
return self._iterator
@property
def generator(self):
return self._generator
def get_geometry_stmts(self, mapping):
args = []
l_quad = list(self.generator.atoms(LocalTensorQuadrature))
if len(l_quad) == 0:
return Tuple(*args)
assert(len(l_quad) == 1)
l_quad = l_quad[0]
if isinstance(mapping, IdentityMapping):
args += [ComputeLogical(WeightedVolumeQuadrature(l_quad))]
return Tuple(*args)
args += [ComputeLogical(WeightedVolumeQuadrature(l_quad))]
# add stmts related to the geometry
# TODO add other expressions
args += [ComputeLogical(SymbolicDeterminant(mapping))]
args += [ComputeLogical(SymbolicInverseDeterminant(mapping))]
args += [ComputeLogical(SymbolicWeightedVolume(mapping))]
return Tuple(*args)
#==============================================================================
class TensorIteration(BaseNode):
"""
"""
def __new__(cls, iterator, generator):
# ...
if not( isinstance(iterator, TensorIterator) ):
raise TypeError('Expecting an TensorIterator')
if not( isinstance(generator, TensorGenerator) ):
raise TypeError('Expecting a TensorGenerator')
# ...
return Basic.__new__(cls, iterator, generator)
@property
def iterator(self):
return self._args[0]
@property
def generator(self):
return self._args[1]
#==============================================================================
class ProductIteration(BaseNode):
"""
"""
def __new__(cls, iterator, generator):
# ...
if not( isinstance(iterator, ProductIterator) ):
raise TypeError('Expecting an ProductIterator')
if not( isinstance(generator, ProductGenerator) ):
raise TypeError('Expecting a ProductGenerator')
# ...
return Basic.__new__(cls, iterator, generator)
@property
def iterator(self):
return self._args[0]
@property
def generator(self):
return self._args[1]
#==============================================================================
class SplitArray(BaseNode):
"""
"""
def __new__(cls, target, positions, lengths):
if not isinstance(positions, (list, tuple, Tuple)):
positions = [positions]
positions = Tuple(*positions)
if not isinstance(lengths, (list, tuple, Tuple)):
lengths = [lengths]
lengths = Tuple(*lengths)
return Basic.__new__(cls, target, positions, lengths)
@property
def target(self):
return self._args[0]
@property
def positions(self):
return self._args[1]
@property
def lengths(self):
return self._args[2]
#==============================================================================
def construct_logical_expressions(u, nderiv):
dim = u.space.ldim
ops = [dx1, dx2, dx3][:dim]
r = range(nderiv+1)
ranges = [r]*dim
indices = product(*ranges)
indices = list(indices)
indices = [ijk for ijk in indices if sum(ijk) <= nderiv]
args = []
for ijk in indices:
atom = u
for n,op in zip(ijk, ops):
for i in range(1, n+1):
atom = op(atom)
args.append(atom)
return [ComputeLogicalBasis(i) for i in args]
#==============================================================================
class GeometryExpressions(Basic):
"""
"""
def __new__(cls, M, nderiv):
dim = M.rdim
ops = [dx1, dx2, dx3][:dim]
r = range(nderiv+1)
ranges = [r]*dim
indices = product(*ranges)
indices = list(indices)
indices = [ijk for ijk in indices if sum(ijk) <= nderiv]
args = []
for d in range(dim):
for ijk in indices:
atom = M[d]
for n,op in zip(ijk, ops):
for i in range(1, n+1):
atom = op(atom)
args.append(atom)
args = [GeometryExpr(i) for i in args]
args = Tuple(*args)
return Basic.__new__(cls, args)
@property
def arguments(self):
return self._args[0]
#==============================================================================
def construct_itergener(a, index):
"""
"""
# ... create generator
if isinstance(a, GlobalTensorQuadrature):
generator = TensorGenerator(a, index)
element = LocalTensorQuadrature()
elif isinstance(a, LocalTensorQuadrature):
generator = TensorGenerator(a, index)
element = TensorQuadrature()
elif isinstance(a, GlobalTensorQuadratureTrialBasis):
generator = TensorGenerator(a, index)
element = LocalTensorQuadratureTrialBasis(a.target)
elif isinstance(a, LocalTensorQuadratureTrialBasis):
generator = TensorGenerator(a, index)
element = TensorQuadratureTrialBasis(a.target)
elif isinstance(a, TensorQuadratureTrialBasis):
generator = TensorGenerator(a, index)
element = TensorTrialBasis(a.target)
elif isinstance(a, GlobalTensorQuadratureTestBasis):
generator = TensorGenerator(a, index)
element = LocalTensorQuadratureTestBasis(a.target)
elif isinstance(a, LocalTensorQuadratureTestBasis):
generator = TensorGenerator(a, index)
element = TensorQuadratureTestBasis(a.target)
elif isinstance(a, TensorQuadratureTestBasis):
generator = TensorGenerator(a, index)
element = TensorTestBasis(a.target)
elif isinstance(a, GlobalTensorQuadratureBasis):
generator = TensorGenerator(a, index)
element = LocalTensorQuadratureBasis(a.target)
elif isinstance(a, LocalTensorQuadratureBasis):
generator = TensorGenerator(a, index)
element = TensorQuadratureBasis(a.target)
elif isinstance(a, TensorQuadratureBasis):
generator = TensorGenerator(a, index)
element = TensorBasis(a.target)
elif isinstance(a, GlobalSpan):
generator = TensorGenerator(a, index)
element = Span(a.target)
elif isinstance(a, MatrixLocalBasis):
generator = ProductGenerator(a, index)
element = CoefficientBasis(a.target)
elif isinstance(a, GeometryExpr):
generator = ProductGenerator(a.expr, index)
element = a.atom
else:
raise TypeError('{} not available'.format(type(a)))
# ...
# ... create iterator
if isinstance(element, LocalTensorQuadrature):
iterator = TensorIterator(element)
elif isinstance(element, TensorQuadrature):
iterator = TensorIterator(element)
elif isinstance(element, LocalTensorQuadratureTrialBasis):
iterator = TensorIterator(element)
elif isinstance(element, TensorQuadratureTrialBasis):
iterator = TensorIterator(element)
elif isinstance(element, TensorTrialBasis):
iterator = TensorIterator(element)
elif isinstance(element, LocalTensorQuadratureTestBasis):
iterator = TensorIterator(element)
elif isinstance(element, TensorQuadratureTestBasis):
iterator = TensorIterator(element)
elif isinstance(element, TensorTestBasis):
iterator = TensorIterator(element)
elif isinstance(element, LocalTensorQuadratureBasis):
iterator = TensorIterator(element)
elif isinstance(element, TensorQuadratureBasis):
iterator = TensorIterator(element)
elif isinstance(element, TensorBasis):
iterator = TensorIterator(element)
elif isinstance(element, Span):
iterator = TensorIterator(element)
elif isinstance(element, CoefficientBasis):
iterator = ProductIterator(element)
elif isinstance(element, GeometryAtom):
iterator = ProductIterator(element)
else:
raise TypeError('{} not available'.format(type(element)))
# ...
return iterator, generator
#==============================================================================
class Block(Basic):
"""
"""
def __new__(cls, body):
if not isinstance(body, (list, tuple, Tuple)):
body = [body]
body = Tuple(*body)
return Basic.__new__(cls, body)
@property
def body(self):
return self._args[0]
#==============================================================================
def is_scalar_field(expr):
if isinstance(expr, _partial_derivatives):
return is_scalar_field(expr.args[0])
elif isinstance(expr, _logical_partial_derivatives):
return is_scalar_field(expr.args[0])
elif isinstance(expr, ScalarField):
return True
return False
#==============================================================================
def is_vector_field(expr):
if isinstance(expr, _partial_derivatives):
return is_vector_field(expr.args[0])
elif isinstance(expr, _logical_partial_derivatives):
return is_vector_field(expr.args[0])
elif isinstance(expr, (VectorField, IndexedVectorField)):
return True
return False
#==============================================================================
from sympy import Matrix, ImmutableDenseMatrix
from sympy import symbols
from pyccel.ast.core import _atomic
from sympde.expr import TerminalExpr
from sympde.expr import LinearForm
from sympde.expr import BilinearForm
from sympde.topology import element_of
from sympde.topology import ScalarField
from sympde.topology import VectorField, IndexedVectorField
from sympde.topology.space import ScalarTestFunction
from sympde.topology.space import VectorTestFunction
from sympde.topology.space import IndexedTestTrial
from sympde.topology.derivatives import _partial_derivatives
from sympde.topology.derivatives import _logical_partial_derivatives
from sympde.topology.derivatives import get_max_partial_derivatives
class AST(object):
"""
"""
def __init__(self, expr):
# ... compute terminal expr
# TODO check that we have one single domain/interface/boundary
terminal_expr = TerminalExpr(expr)
domain = terminal_expr[0].target
terminal_expr = terminal_expr[0].expr
# print('> terminal expr = ', terminal_expr)
# ...
# ... compute max deriv
nderiv = 0
if isinstance(terminal_expr, Matrix):
n_rows, n_cols = terminal_expr.shape
for i_row in range(0, n_rows):
for i_col in range(0, n_cols):
d = get_max_partial_derivatives(terminal_expr[i_row,i_col])
nderiv = max(nderiv, max(d.values()))
else:
d = get_max_partial_derivatives(terminal_expr)
nderiv = max(nderiv, max(d.values()))
# print('> nderiv = ', nderiv)
# ...
# ...
is_bilinear = False
is_linear = False
is_functional = False
tests = []
trials = []
if isinstance(expr, LinearForm):
is_linear = True
tests = expr.test_functions
elif isinstance(expr, BilinearForm):
is_bilinear = True
tests = expr.test_functions
trials = expr.trial_functions
else:
raise NotImplementedError('TODO')
# ...
# ...
atoms_types = (_partial_derivatives,
VectorTestFunction,
ScalarTestFunction,
IndexedTestTrial,
ScalarField,
VectorField, IndexedVectorField)
atoms = _atomic(expr, cls=atoms_types)
# ...
# ...
atomic_expr_field = [atom for atom in atoms if is_scalar_field(atom)]
atomic_expr_vector_field = [atom for atom in atoms if is_vector_field(atom)]
atomic_expr = [atom for atom in atoms if not( atom in atomic_expr_field ) and
not( atom in atomic_expr_vector_field)]
# ...
# ...
d_tests = {}
for v in tests:
d = {}
d['global'] = GlobalTensorQuadratureTestBasis(v)
d['local'] = LocalTensorQuadratureTestBasis(v)
d['array'] = TensorQuadratureTestBasis(v)
d['basis'] = TensorTestBasis(v)
d['span'] = GlobalSpan(v)
d_tests[v] = d
# ...
# ...
d_trials = {}
for v in trials:
d = {}
d['global'] = GlobalTensorQuadratureTrialBasis(v)
d['local'] = LocalTensorQuadratureTrialBasis(v)
d['array'] = TensorQuadratureTrialBasis(v)
d['basis'] = TensorTrialBasis(v)
d['span'] = GlobalSpan(v)
d_trials[v] = d
# ...
# ...
if is_linear:
ast = _create_ast_linear_form(terminal_expr, atomic_expr, tests, d_tests,
nderiv, domain.dim)
elif is_bilinear:
ast = _create_ast_bilinear_form(terminal_expr, atomic_expr,
tests, d_tests,
trials, d_trials,
nderiv, domain.dim)
else:
raise NotImplementedError('TODO')
# ...
self._expr = ast
self._nderiv = nderiv
self._domain = domain
@property
def expr(self):
return self._expr
@property
def nderiv(self):
return self._nderiv
@property
def domain(self):
return self._domain
@property
def dim(self):
return self.domain.dim
#==============================================================================
def _create_ast_linear_form(terminal_expr, atomic_expr, tests, d_tests, nderiv, dim):
"""
"""
pads = symbols('p1, p2, p3')[:dim]
g_quad = GlobalTensorQuadrature()
l_quad = LocalTensorQuadrature()
# ...
stmts = []
for v in tests:
stmts += construct_logical_expressions(v, nderiv)
stmts += [ComputePhysicalBasis(i) for i in atomic_expr]
# ...
# ...
a_basis = tuple([d['array'] for v,d in d_tests.items()])
loop = Loop((l_quad, *a_basis), index_quad, stmts)
# ...
# ... TODO
l_vec = StencilVectorLocalBasis(pads)
# ...
# ...
loop = Reduce('+', ComputeKernelExpr(terminal_expr), ElementOf(l_vec), loop)
# ...
# ... loop over tests
l_basis = tuple([d['local'] for v,d in d_tests.items()])
stmts = [loop]
loop = Loop(l_basis, index_dof_test, stmts)
# ...
# ... TODO
body = (Reset(l_vec), loop)
stmts = Block(body)
# ...
# ...
g_basis = tuple([d['global'] for v,d in d_tests.items()])
g_span = tuple([d['span'] for v,d in d_tests.items()])
loop = Loop((g_quad, *g_basis, *g_span), index_element, stmts)
# ...
# ... TODO
g_vec = StencilVectorGlobalBasis(pads)
# ...
# ... TODO
body = (Reset(g_vec), Reduce('+', l_vec, g_vec, loop))
stmt = Block(body)
# ...
return stmt
#==============================================================================
def _create_ast_bilinear_form(terminal_expr, atomic_expr,
tests, d_tests,
trials, d_trials,
nderiv, dim):
"""
"""
pads = symbols('p1, p2, p3')[:dim]
g_quad = GlobalTensorQuadrature()
l_quad = LocalTensorQuadrature()
# ...
stmts = []
for v in tests:
stmts += construct_logical_expressions(v, nderiv)
stmts += [ComputePhysicalBasis(i) for i in atomic_expr]
# ...
# ...
a_basis_tests = tuple([d['array'] for v,d in d_tests.items()])
a_basis_trials = tuple([d['array'] for v,d in d_trials.items()])
loop = Loop((l_quad, *a_basis_tests, *a_basis_trials), index_quad, stmts)
# ...
# ... TODO
l_mat = StencilMatrixLocalBasis(pads)
# ...
# ...
loop = Reduce('+', ComputeKernelExpr(terminal_expr), ElementOf(l_mat), loop)
# ...
# ... loop over trials
l_basis = tuple([d['local'] for v,d in d_trials.items()])
stmts = [loop]
loop = Loop(l_basis, index_dof_trial, stmts)
# ...
# ... loop over tests
l_basis = tuple([d['local'] for v,d in d_tests.items()])
stmts = [loop]
loop = Loop(l_basis, index_dof_test, stmts)
# ...
# ... TODO
body = (Reset(l_mat), loop)
stmts = Block(body)
# ...
# ...
g_basis_tests = tuple([d['global'] for v,d in d_tests.items()])
g_basis_trials = tuple([d['global'] for v,d in d_trials.items()])
# TODO d_trials or d_tests here?
g_span = tuple([d['span'] for v,d in d_trials.items()])
loop = Loop((g_quad, *g_basis_tests, *g_basis_trials, *g_span),
index_element, stmts)
# ...
# ... TODO
g_mat = StencilMatrixGlobalBasis(pads)
# ...
# ... TODO
body = (Reset(g_mat), Reduce('+', l_mat, g_mat, loop))
stmt = Block(body)
# ...
return stmt
| StarcoderdataPython |
11259 | # -*- coding: utf-8 -*-
"""API routes config for notifai_recruitment project.
REST framework adds support for automatic URL routing to Django, and provides simple, quick and consistent
way of wiring view logic to a set of URLs.
For more information on this file, see
https://www.django-rest-framework.org/api-guide/routers/
"""
from rest_framework import routers
from textify.api.views import NoteViewSet
router = routers.DefaultRouter()
router.register(r'notes', NoteViewSet)
| StarcoderdataPython |
28498 | <reponame>foropolo/task
from rest_framework import serializers
class HelloSerializer(serializers.Serializer):
"""Serializes a name field for testing out APIView"""
city_name = serializers.CharField(max_length=30)
| StarcoderdataPython |
3226457 | <gh_stars>0
# Ask the user to enter a word. Have the program keep asking them to
# enter one while the user writes "continue" as their word.
keyword = "continue"
current_word = ""
while current_word == keyword:
current_word = input("Enter a word: ").strip()
| StarcoderdataPython |
159306 | # File name: __init__.py
# Author: <NAME>
# Date created: 27-07-2018
# TODO: should I import the entire module, or just the relevant functions?
from . import chemical, recipe, make, help | StarcoderdataPython |
97468 | from typing import List
from logging import getLogger
import notify
logger = getLogger(__name__)
class _RegisteredNotifyTaskList():
def __init__(self):
self._task_list: List[notify.NotifyTask] = []
def append_task(self, task: notify.NotifyTask) -> None:
self._task_list.append(task)
def remove_task(self, task: notify.NotifyTask) -> None:
self._task_list.remove(task)
def find_task(self, message_id: int) -> notify.NotifyTask:
for task in self._task_list:
if message_id == task._register_send_message.id:
return task
logger.debug(f"not found:{len(self._task_list)=}")
return None
registered_notify_task_list: _RegisteredNotifyTaskList = _RegisteredNotifyTaskList()
| StarcoderdataPython |
76572 | <reponame>naviocean/imgclsmob
import random
import threading
import numpy as np
from PIL import Image, ImageOps, ImageFilter
from tensorflow.keras.preprocessing.image import ImageDataGenerator, DirectoryIterator
class SegDataset(object):
"""
Segmentation base dataset.
Parameters:
----------
root : str
Path to data folder.
mode : str
'train', 'val', 'test', or 'demo'.
transform : callable
A function that transforms the image.
"""
def __init__(self,
root,
mode,
transform,
base_size=520,
crop_size=480):
super(SegDataset, self).__init__()
assert (mode in ("train", "val", "test", "demo"))
assert (mode in ("test", "demo"))
self.root = root
self.mode = mode
self.transform = transform
self.base_size = base_size
self.crop_size = crop_size
def _val_sync_transform(self, image, mask):
outsize = self.crop_size
short_size = outsize
w, h = image.size
if w > h:
oh = short_size
ow = int(1.0 * w * oh / h)
else:
ow = short_size
oh = int(1.0 * h * ow / w)
image = image.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# center crop
w, h = image.size
x1 = int(round(0.5 * (w - outsize)))
y1 = int(round(0.5 * (h - outsize)))
image = image.crop((x1, y1, x1 + outsize, y1 + outsize))
mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize))
# final transform
image, mask = self._img_transform(image), self._mask_transform(mask)
return image, mask
def _sync_transform(self, image, mask):
# random mirror
if random.random() < 0.5:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
crop_size = self.crop_size
# random scale (short edge)
short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
w, h = image.size
if h > w:
ow = short_size
oh = int(1.0 * h * ow / w)
else:
oh = short_size
ow = int(1.0 * w * oh / h)
image = image.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# pad crop
if short_size < crop_size:
padh = crop_size - oh if oh < crop_size else 0
padw = crop_size - ow if ow < crop_size else 0
image = ImageOps.expand(image, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
# random crop crop_size
w, h = image.size
x1 = random.randint(0, w - crop_size)
y1 = random.randint(0, h - crop_size)
image = image.crop((x1, y1, x1 + crop_size, y1 + crop_size))
mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size))
# gaussian blur as in PSP
if random.random() < 0.5:
image = image.filter(ImageFilter.GaussianBlur(
radius=random.random()))
# final transform
image, mask = self._img_transform(image), self._mask_transform(mask)
return image, mask
@staticmethod
def _img_transform(image):
return np.array(image)
@staticmethod
def _mask_transform(mask):
return np.array(mask).astype(np.int32)
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class SegDirectoryIterator(DirectoryIterator):
allowed_class_modes = {'categorical', 'binary', 'sparse', 'input', None}
def __init__(self,
directory,
image_data_generator,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
data_format='channels_last',
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
dtype='float32',
dataset=None):
super(SegDirectoryIterator, self).set_processing_attrs(
image_data_generator,
target_size,
color_mode,
data_format,
save_to_dir,
save_prefix,
save_format,
subset,
interpolation)
self.dataset = dataset
self.class_mode = class_mode
self.dtype = dtype
self.n = len(self.dataset)
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns:
A batch of transformed samples.
"""
# batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=self.dtype)
# batch_y = np.zeros((len(index_array),) + self.image_shape, dtype=np.int32)
batch_x = None
batch_y = None
for i, j in enumerate(index_array):
x, y = self.dataset[j]
if batch_x is None:
batch_x = np.zeros((len(index_array),) + x.shape, dtype=self.dtype)
batch_y = np.zeros((len(index_array),) + y.shape, dtype=np.int32)
# if self.data_format == "channel_first":
# print("*")
# print("batch_x.shape={}".format(batch_x.shape))
# print("batch_y.shape={}".format(batch_y.shape))
# print("x.shape={}".format(x.shape))
# print("y.shape={}".format(y.shape))
batch_x[i] = x
batch_y[i] = y
return batch_x, batch_y
class SegImageDataGenerator(ImageDataGenerator):
def flow_from_directory(self,
directory,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
dataset=None):
return SegDirectoryIterator(
directory,
self,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation,
dataset=dataset)
| StarcoderdataPython |
4835522 | """This module is responsible for running the Flask restful API to
communicate the the values of basic_math to a localhost on port 5000 """
from flask import Flask, request
from flask_restful import Resource, Api
import basic_math
APP = Flask(__name__)
API = Api(APP)
def verify_list(list1, list2):
"""
Verifies whether all the elements are in both lists are identical
Args:
list1: This is a list with string values
list2: This is a list with string values
Returns:
A boolean whether the lists contain the same values
Raises:
None
"""
return all(elem in list1 for elem in list2)
def convert_data(json_data):
"""
This converts a json string into a dictionary to process later.
Args:
json_data: This is data it accepts in a json format
an example [{'num_1': 2, 'num_2': 3, 'operation': 'add'}]
Returns:
This returns a dictionary with keys:
num_1: The first number which the program program should apply the operation to
num_2: The second number the program uses to apply the operation to
operation: This is the mathematical operation that is used to apply to the numbers given
Raises:
KeyError:
if JSON does not contain the required keys it returns with a json 'message':
'the json does not contain required fields in the specified format, namely'
if JSON does not have values stored in the correct
format for num_1, num_2 or, operation,
the program returns with a json
'message': 'could not read in data properly'
"""
keylist = []
data = json_data
dictionary = []
required_keys = ['num_1', 'num_2', 'operation']
for key in data[0]:
keylist.append(key)
if not verify_list(keylist, required_keys):
return {'message':
'the json does not contain required fields in the spesified format, namely '
+ str(required_keys)
}
try:
for datapoint in data:
dictionary.append(
{'num_1': datapoint['num_1'],
'num_2': datapoint['num_2'],
'operation': datapoint['operation']})
return dictionary
except IndexError:
return {'message': 'could not read in data properly'}
class ApiManager(Resource):
'''
The api which manages the resources for a flask restful api
which can be used to communicate with
Attributes
----------
None, this is the format required for the flask restful api standard
Methods
-------
post(self)
This receives resources when listening on the API and sends it off
to be converted at convert_data(json_format) and communicates with the basic_math module
'''
def post(self):
'''
The default flask required format, communicates with requests
and returns the result of the processing in a json format
Args:
Resources from the URL request
Returns:
JSON with the function processed with the basic_math module,
along with a success code (or a message specifying why it could not be processed)
'''
json_format = request.get_json()
converted_format = convert_data(json_format)
result = []
functions = {'add': basic_math.adding,
'subtract': basic_math.subtraction,
'multiplication': basic_math.multiplication,
'integer_divide': basic_math.integer_divide,
'remainder': basic_math.remainder,
'power': basic_math.power,
'root': basic_math.root}
for datapoint in converted_format:
# todo remove old code
'''#original fixed to new function for better styling
if(datapoint['operation']=='add'):
result.append(BasicMath.adding(datapoint['num_1'],datapoint['num_2']))
elif(datapoint['operation']=='subtract'):
result.append(BasicMath.subtraction(datapoint['num_1'],datapoint['num_2']))
elif(datapoint['operation']=='multiplication'):
result.append(BasicMath.multiplication(datapoint['num_1'],datapoint['num_2']))
elif(datapoint['operation']=='integer_divide'):
result.append(BasicMath.integer_divide(datapoint['num_1'],datapoint['num_2']))
elif(datapoint['operation']=='remainder'):
result.append(BasicMath.remainder(datapoint['num_1'],datapoint['num_2']))
elif(datapoint['operation']=='power'):
result.append(BasicMath.power(datapoint['num_1'],datapoint['num_2']))
elif(datapoint['operation']=='root'):
result.append(BasicMath.root(datapoint['num_1'],datapoint['num_2']))'''
try:
result.append(
functions[datapoint['operation']]
(datapoint['num_1'], datapoint['num_2'])
)
except IndexError:
result.append('operation not supported')
return result, 201
API.add_resource(ApiManager, '/')
if __name__ == '__main__':
APP.run(debug=True)
| StarcoderdataPython |
56343 | <reponame>Execut3/django-discount
# Generated by Django 2.2.5 on 2020-12-02 02:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('django_discount', '0002_auto_20201201_1425'),
]
operations = [
migrations.AddField(
model_name='discount',
name='user',
field=models.ForeignKey(blank=True, help_text="When empty discount will be used for everyone and should not be any duplicates. But if User set it can have discount's with different users and duplicate codes. And that specific discount will only be available for that user.", null=True, on_delete=django.db.models.deletion.CASCADE, related_name='discounts', to=settings.AUTH_USER_MODEL, verbose_name='Specific user to use this discount'),
),
migrations.AlterField(
model_name='discount',
name='code',
field=models.CharField(db_index=True, max_length=20, verbose_name='Code'),
),
migrations.AlterField(
model_name='discount',
name='is_deleted',
field=models.BooleanField(db_index=True, default=False, verbose_name='حذف شد'),
),
migrations.AlterField(
model_name='useddiscount',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='used_discounts', to=settings.AUTH_USER_MODEL, verbose_name='User who used discount code'),
),
]
| StarcoderdataPython |
4814592 | import os
import networkx as nx
import pandas as pd
def induced_subgraph(
G, filter_type, filter_attribute, filter_values, ignore_attrs=False
):
"""
Create custom induced subgraph.
Args:
filter_type: 'node' or 'edge'
filter_attribute: attribute to filter on
filter_values: attribute values to evaluate to `True`
"""
G = nx.MultiDiGraph(G)
if filter_type == "node":
nodes = [
n for n in G.nodes() if G.nodes[n].get(filter_attribute) in filter_values
]
sG = nx.induced_subgraph(G, nodes)
elif filter_type == "edge":
sG = nx.MultiDiGraph()
sG.add_nodes_from(G.nodes(data=not ignore_attrs))
sG.add_edges_from(
[
(e[0], e[1], e[-1])
for e in G.edges(data=True)
if e[-1][filter_attribute] in filter_values
]
)
else:
raise
sG.graph["name"] = "_".join(
[G.graph["name"], filter_type, filter_attribute, str(*filter_values)]
)
return sG
def hierarchy_graph(G: nx.DiGraph, ignore_attrs=False):
"""
Remove reference edges from G.
Wrapper around induced_subgraph.
"""
hG = induced_subgraph(G, "edge", "edge_type", ["containment"], ignore_attrs)
return hG
def multi_to_weighted(G: nx.MultiDiGraph):
"""
Converts a multidigraph into a weighted digraph.
"""
nG = nx.DiGraph(G)
# nG.add_nodes_from(G.nodes)
nG.name = G.name + "_weighted_nomulti"
edge_weights = {(u, v): 0 for u, v, k in G.edges}
for u, v, key in G.edges:
edge_weights[(u, v)] += 1
# nG.add_edges_from(edge_weights.keys())
nx.set_edge_attributes(nG, edge_weights, "weight")
return nG
def get_leaves(G: nx.DiGraph):
"""
Args:
G: A tree as directed graph with edges from root to leaves
Returns: Set of leaves of the tree G
"""
H = hierarchy_graph(G, ignore_attrs=True)
return set([node for node in H.nodes if H.out_degree(node) == 0])
def decay_function(key: int):
"""
Returns a decay function to create a weighted sequence graph.
"""
return lambda x: (x - 1) ** (-key)
def sequence_graph(
G: nx.MultiDiGraph, seq_decay_func=decay_function(1), seq_ref_ratio=1
):
"""
Creates sequence graph for G, consisting of seqitems and their cross-references
only,
where neighboring seqitems are connected via edges in both directions.
Args:
seq_decay_func: function to calculate sequence edge weight based on distance
between neighboring nodes
seq_ref_ratio: ratio between a sequence edge weight when nodes in the
sequence are at minimum distance from each other and a reference edge weight
"""
hG = hierarchy_graph(G, ignore_attrs=True)
# make sure we get _all_ seqitems as leaves, not only the ones without outgoing
# references
leaves = [n for n in hG.nodes() if hG.out_degree(n) == 0]
sG = nx.MultiDiGraph(nx.induced_subgraph(G, leaves))
if seq_ref_ratio:
nx.set_edge_attributes(sG, 1 / seq_ref_ratio, name="weight")
node_headings = dict(sG.nodes(data="heading"))
ordered_seqitems = sorted(list(node_headings.keys()))
# connect neighboring seqitems sequentially
new_edges = get_new_edges(G, ordered_seqitems, seq_decay_func)
sG.add_edges_from(new_edges)
else:
nx.set_edge_attributes(sG, 1, name="weight")
sG.graph["name"] = f'{G.graph["name"]}_sequence_graph_seq_ref_ratio_{seq_ref_ratio}'
return sG
def get_new_edges(G, ordered_seqitems, seq_decay_func):
"""
Convenience function to avoid list comprehension over four lines.
"""
there = []
back = []
hG = hierarchy_graph(G).to_undirected()
for idx, n in enumerate(ordered_seqitems[:-1]):
next_item = ordered_seqitems[idx + 1]
if (
n.split("_")[0] == next_item.split("_")[0]
): # n and next_item are in the same law
distance = nx.shortest_path_length(hG, source=n, target=next_item)
weight = seq_decay_func(distance)
there.append(
(
n,
next_item,
{"edge_type": "sequence", "weight": weight, "backwards": False},
)
)
back.append(
(
next_item,
n,
{"edge_type": "sequence", "weight": weight, "backwards": True},
)
)
return there + back
def quotient_graph(
G,
node_attribute,
edge_types=["reference", "cooccurrence"],
self_loops=False,
root_level=-1,
aggregation_attrs=("chars_n", "chars_nowhites", "tokens_n", "tokens_unique"),
):
"""
Generate the quotient graph with all nodes sharing the same node_attribute condensed
into a single node. Simplest use case is aggregation by law_name.
"""
# node_key:attribute_value map
attribute_data = dict(G.nodes(data=node_attribute))
# set cluster -1 if they were not part of the clustering
# (guess: those are empty laws)
attribute_data = {
k: (v if v is not None else -1) for k, v in attribute_data.items()
}
# remove the root if root_level is given
root = None
if root_level is not None:
roots = [x for x in G.nodes() if G.nodes[x]["level"] == root_level]
if roots:
root = roots[0]
# unique values in that map with root node
unique_values = sorted({v for k, v in attribute_data.items() if k != root})
# build a new MultiDiGraph
nG = nx.MultiDiGraph()
# add nodes
new_nodes = {x: [] for x in unique_values}
nG.add_nodes_from(unique_values)
# sort nodes into buckets
for n in attribute_data.keys():
if n != root:
mapped_to = attribute_data[n]
new_nodes[mapped_to].append(n)
if G.nodes[n].get("heading") == mapped_to:
for x in G.nodes[n].keys():
nG.nodes[mapped_to][x] = G.nodes[n][x]
# add edges
for e in G.edges(data=True):
if e[-1]["edge_type"] not in edge_types:
continue
if (True if self_loops else attribute_data[e[0]] != attribute_data[e[1]]) and (
True if root_level is None else G.nodes[e[0]]["level"] != root_level
): # special treatment for root
k = nG.add_edge(
attribute_data[e[0]], attribute_data[e[1]], edge_type=e[-1]["edge_type"]
)
if e[-1]["edge_type"] == "sequence":
nG.edges[attribute_data[e[0]], attribute_data[e[1]], k]["weight"] = e[
-1
]["weight"]
nG.graph["name"] = f'{G.graph["name"]}_quotient_graph_{node_attribute}'
if aggregation_attrs:
aggregate_attr_in_quotient_graph(nG, G, new_nodes, aggregation_attrs)
return nG
def aggregate_attr_in_quotient_graph(nG, G, new_nodes, aggregation_attrs):
"""
Sums attributes of nodes in an original graph per community and adds the sum to the
nodes in a quotient graph.
Args:
nG: Quotient graph
G: Original graph
new_nodes: Mapping of nodes in the quotient graph to an iterable of nodes in
the original graph that are represented by the node in the quotient graph.
aggregation_attrs: attributes to aggregate
"""
for attr in aggregation_attrs:
attr_data = nx.get_node_attributes(G, attr)
for community_id, nodes in new_nodes.items():
aggregated_value = sum(attr_data.get(n) for n in nodes)
nG.nodes[community_id][attr] = aggregated_value
def load_graph_from_csv_files(
crossreference_folder,
file_basename,
filter="exclude_subseqitems",
filter_by_edge_types=None,
):
"""
Loads a networkx MultiDiGraph from a nodelist and edgelist
formatted as .csv.gz files.
The node csv must have a 'key' column that serves as a node key.
Other columns are added as node attributes.
The edge csv must have a the columns 'u', 'v', 'edge_type'.
If filter is node all nodes will be loaded.
By default subeqitems will be excluded.
If filter is a callable, it is called with a pandas.DataFrame loaded
from the csv as the only argument.
The callable must return values to filter the DataFrame.
Args:
crossreference_folder: Folder containing the edgelists
file_basename: base filename of the edgelists
(will be suffixed with '.nodes.csv.gz' and '.edges.csv.gz')
filter: Filters the nodes to load. Options "exclude_subseqitems",
None or a function that filters a pandas.DataFrame
filter_by_edge_types: Filters the edges to load. None in includes all
edges. You can also provide a list of edge_types.
E.g. `['containment', 'reference']`.
"""
nodes_csv_path = os.path.join(
crossreference_folder, f"{file_basename}.nodes.csv.gz"
)
edges_csv_path = os.path.join(
crossreference_folder, f"{file_basename}.edges.csv.gz"
)
G = nx.MultiDiGraph(name=str(file_basename))
nodes_df = pd.read_csv(nodes_csv_path)
if filter == "exclude_subseqitems":
nodes_df = nodes_df[nodes_df.type != "subseqitem"]
elif callable(filter):
nodes_df = nodes_df[filter(nodes_df)]
G.add_nodes_from(list(nodes_df.key))
for column in nodes_df.columns:
attrs_dict = {
k: v for k, v in zip(nodes_df.key, nodes_df[column]) if not pd.isna(v)
}
nx.set_node_attributes(
G,
attrs_dict,
column,
)
all_nodes = set(nodes_df.key)
del nodes_df
edges_df = pd.read_csv(edges_csv_path)
edges = [
(u, v, {"edge_type": edge_type})
for u, v, edge_type in zip(edges_df.u, edges_df.v, edges_df.edge_type)
if (
(filter_by_edge_types is None or edge_type in filter_by_edge_types)
and u in all_nodes
and v in all_nodes
)
]
del edges_df
G.add_edges_from(edges)
return G
| StarcoderdataPython |
1751374 | #Import modules
import os
import pandas as pd
import numpy as np
from pandas import DatetimeIndex
import dask
import scipy
from scipy.optimize import minimize, LinearConstraint
import time
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import pickle
#Define Column Name
indexName = 'date'
indexExpiry = 'optionExpiry'
indexTenor = 'underlyingTerm'
indexStrike = 'Strike'
indexRelStrike = 'RelativeStrike'
def getTTMFromCoordinates(dfList):
return dfList[1].applymap(lambda x : x[0])
def getMoneynessFromCoordinates(dfList):
return dfList[1].applymap(lambda x : x[1])
def readfile(file):
print("file")
print(file)
def iterateOnFolderContent(folderName):
for elt in os.scandir(folderName):
if os.DirEntry.is_dir(elt):
print("Folder")
print(elt)
iterateOnFolderContent(elt)
else :
readfile(elt)
def parseTerm(stringTerm):
if 'M' == stringTerm[-1]:
return float(stringTerm[:-1])/12
elif 'Y' == stringTerm[-1]:
return float(stringTerm[:-1])
else :
raise Exception("Can not parse term")
def parseTenor(row):
return [parseTerm(row['underlyingTerm']), parseTerm(row['optionExpiry'])]
def smileFromSkew(skew):
atmVol = skew['A']
#smile = atmVol + skew[skewShift]
#return smile#.append(skew.drop(smile.index))
return atmVol + skew.drop('A')
def parseStrike(relStrike):
if relStrike.name[3] == 'A':
return relStrike['forward']
if "+" in relStrike.name[3]:
shift = int(relStrike.name[3].split("+")[1])
return relStrike['forward'] + shift/1000
if "-" in relStrike.name[3]:
shift = int(relStrike.name[3].split("-")[1])
return relStrike['forward'] - shift/1000
raise Exception(' Can not parse Strike ')
#intersection of all dates across history
def intersectionGrid(grid) :
nbDates = grid.index.get_level_values(0).unique().shape[0]
if nbDates <= 1:
return grid.index.droplevel(0)
else :
midDate = grid.index.get_level_values(0).unique()[int(nbDates/2)]
g1 = grid[grid.index.get_level_values(0) < midDate]
g2 = grid[grid.index.get_level_values(0) >= midDate]
return intersectionGrid(g1).intersection(intersectionGrid(g2))
def splitTrainTestDataRandomly(gridHistory, trainingSetPercentage):
nbDates = gridHistory.index.get_level_values(0).unique().shape[0]
trainingDates = np.random.choice(gridHistory.index.get_level_values(0).unique(),
replace=False,
size=int(nbDates * trainingSetPercentage))
trainingData = gridHistory.loc[pd.IndexSlice[trainingDates,:,:], :]
testingData = gridHistory.drop(trainingData.index)
trainingData.index = trainingData.index.droplevel([1,2])
testingData.index = testingData.index.droplevel([1,2])
return trainingData, testingData
def splitTrainTestDataChronologically(gridHistory, trainingSetPercentage):
firstTestingDate = int(gridHistory.index.get_level_values(0).unique().shape[0]
* trainingSetPercentage)
trainingDates = gridHistory.index.get_level_values(0).unique()[:firstTestingDate]
trainingData = gridHistory.loc[pd.IndexSlice[trainingDates,:,:], :]
testingData = gridHistory.drop(trainingData.index)
trainingData.index = trainingData.index.droplevel([1,2])
testingData.index = testingData.index.droplevel([1,2])
return trainingData, testingData
def sampleBatchOfDays(dataSet, nbDrawn):
trainingDates = np.random.choice(dataSet.index.get_level_values(0).unique(),
replace=False,
size=nbDrawn)
return dataSet.loc[trainingDates, :]
def splitHistory(history, colName):
return pd.pivot_table(history,
values = colName,
index = history.index.names,
columns=['Expiry','Tenor'])
def extractDataFromCSV(dataSetPath):
#Read csv file
data = pd.read_csv(dataSetPath)
#Parse tenor and expiry as float years
data['Tenor'],data['Expiry'] = zip(*data.apply(parseTenor,axis=1))
#Parse date as a datetime
data[indexName] = pd.to_datetime(data['businessDate'], dayfirst=True)
#Set Index as as a three dimension vector and sort observation
indexedData = data.set_index([indexExpiry, indexTenor, indexName]).sort_index()
#Keep relevant features
#Columns used for representing a Strike Value
skewShift = [shift for shift in indexedData.columns if ('A' in shift )]#and 'A' != shift
#Other Columns to keep
otherColumns = ['forward', 'Tenor', 'Expiry']
#Get columns indexed by a relative strike
skewHistory = indexedData[skewShift + otherColumns]#.apply(smileFromSkew,axis=1)
#Merge with other useful columns
#Stacking Smile
#Left outer Join on (tenor, expiry, date)
joinColumns = skewHistory.index.names
leftTable = skewHistory.drop(otherColumns, axis = 1).stack().rename("Vol")#Features depending on strike value
leftTable.index.names = [leftTable.index.names[0],
leftTable.index.names[1],
leftTable.index.names[2],
'RelativeStrike']
formattedHistory = leftTable.reset_index().merge(skewHistory[otherColumns].reset_index(),
on=joinColumns,
validate = "m:1").set_index(leftTable.index.names).sort_index()
#Convert strike shift as a float from a stringTerm
formattedHistory[indexStrike] = formattedHistory.apply(parseStrike,axis=1)
return formattedHistory
def equalDf(df1, df2):
if df1.shape == df2.shape :
if np.sum(np.isnan(df1.values)) != np.sum(np.isnan(df2.values)) :
print("Not the same number of nan")
return False
tol = 1e-6
gap = np.nansum(np.abs(df1.values - df2.values))
if gap < tol :
return True
else :
print("Large df error : ", gap)
return False
print("Not the same shape")
return False
def sampleSwaptionsToDelete(dataSet, completionRate):
return dataSet.iloc[0].sample(frac = completionRate).index
def removeSwaptionsToDelete(dataSet):
listToDelete = [(0.08333333333333333,0.25),(0.08333333333333333,10.0),
(0.08333333333333333,30.0),(0.5,2.0),(0.5,15.0),
(5.0,1.0),(5.0,20.0),(10.0,5.0)]
return dataSet.iloc[0].index.difference(listToDelete)
#Different from minmax scaler of scikit learn
#Min and Max are computed on the dataset, not column wise
class customMinMaxScale:
def __init__(self, feature_range = (0,1)):
self.min = feature_range[0]
self.max = feature_range[1]
#We can enforce the minimum if we expect smaller data in the testing set
def fit(self, dataset,
enforceDataSetMin = None,
enforceDataSetMax = None):
self.datasetMin = dataset.min().min()
if enforceDataSetMin is not None :
self.datasetMin = min(enforceDataSetMin, self.datasetMin)
self.datasetMax = dataset.max().max()
if enforceDataSetMax is not None :
self.datasetMax = max(enforceDataSetMax, self.datasetMax)
return
def transform(self, dataset):
scale = (self.max - self.min) / (self.datasetMax - self.datasetMin)
return (dataset - self.datasetMin) * scale + self.min
def inverse_transform(self, scaledDataset):
scale = (self.max - self.min) / (self.datasetMax - self.datasetMin)
return (scaledDataset - self.min) / scale + self.datasetMin
#Encapsulation class for Sklearn Standard scaling
class customMeanStdScale:
def __init__(self, feature_range = (0,1)):
self.scalerList = []
#We can enforce the minimum if we expect smaller data in the testing set
def fit(self, dataset,
enforceDataSetMin = None,
enforceDataSetMax = None):
hasTupleElt = (type(dataset.iloc[0,0] if dataset.ndim==2 else dataset.iloc[0])==type(tuple()))
if hasTupleElt :
tupleSize = len(dataset.iloc[0,0] if dataset.ndim==2 else dataset.iloc[0])
self.scalerList = [StandardScaler() for i in range(tupleSize)]
for k in range(tupleSize):
funcAccess = lambda x : x[k]
scaler = self.scalerList[k]
dfElt = dataset.applymap(funcAccess) if (type(dataset) != type(pd.Series())) else dataset.map(funcAccess)
scaler.fit(dfElt)
else :
self.scalerList = []
self.scalerList.append(StandardScaler())
self.scalerList[0].fit(dataset)
return
def transformSingleDf(self, scaler, dfElt):
totalVariance = np.sum(scaler.var_)
if totalVariance <= 1e-6 : #Avoid mean scaling for constant data
return dfElt
if type(dfElt) == type(pd.Series()):
return pd.Series(np.ravel(scaler.transform(dfElt.values.reshape(1, -1))),
index = dfElt.index).rename(dfElt.name)
return pd.DataFrame(scaler.transform(dfElt),
index = dfElt.index,
columns = dfElt.columns)
def transform(self, dataset):
hasTupleElt = (type(dataset.iloc[0,0] if dataset.ndim==2 else dataset.iloc[0])==type(tuple()))
if hasTupleElt :
tupleSize = len(dataset.iloc[0,0] if dataset.ndim==2 else dataset.iloc[0])
scaledDfList = []
for k in range(tupleSize):
funcAccess = lambda x : x[k]
dfElt = dataset.applymap(funcAccess) if (type(dataset) != type(pd.Series())) else dataset.map(funcAccess)
scaler = self.scalerList[k]
scaledDfList.append(np.ravel(self.transformSingleDf(scaler, dfElt).values))
#Flattened list of tuples
tupleList= list(zip(*scaledDfList))
#Merge all datasets into a single structure
if dataset.ndim==2 :
reshapedList = [tupleList[(i*dataset.shape[1]):((i+1)*dataset.shape[1])] for i in range(dataset.shape[0])]
return pd.DataFrame(reshapedList,
index = dataset.index,
columns = dataset.columns)
else :
reshapedList = tupleList
return pd.Series(reshapedList, index = dataset.index)
else :
return self.transformSingleDf(self.scalerList[0], dataset)
return None
def inverTransformSingleDf(self, scaler, dfElt):
totalVariance = np.sum(scaler.var_)
if totalVariance <= 1e-6 : #Avoid mean scaling for constant data
return dfElt
if type(dfElt) == type(pd.Series()):
return pd.Series(np.ravel(scaler.inverse_transform(dfElt.values.reshape(1, -1))),
index = dfElt.index).rename(dfElt.name)
return pd.DataFrame(scaler.inverse_transform(dfElt),
index = dfElt.index,
columns = dfElt.columns)
def inverse_transform(self, scaledDataset):
hasTupleElt = (type(scaledDataset.iloc[0,0] if scaledDataset.ndim==2 else scaledDataset.iloc[0])==type(tuple()))
if hasTupleElt :
tupleSize = len(scaledDataset.iloc[0,0] if scaledDataset.ndim==2 else scaledDataset.iloc[0])
scaledDfList = []
for k in range(tupleSize):
funcAccess = lambda x : x[k]
dfElt = scaledDataset.applymap(funcAccess) if (type(scaledDataset) != type(pd.Series())) else scaledDataset.map(funcAccess)
scaler = self.scalerList[k]
scaledDfList.append(np.ravel(self.inverTransformSingleDf(scaler, dfElt).values))
#Flattened list of tuples
tupleList= list(zip(*scaledDfList))
#Merge all datasets into a single structure
if scaledDataset.ndim==2 :
reshapedList = [tupleList[(i*scaledDataset.shape[1]):((i+1)*scaledDataset.shape[1])] for i in range(scaledDataset.shape[0])]
return pd.DataFrame(reshapedList,
index = scaledDataset.index,
columns = scaledDataset.columns)
else :
reshapedList = tupleList
return pd.Series(reshapedList, index = scaledDataset.index)
else :
return self.inverTransformSingleDf(self.scalerList[0], scaledDataset)
return None
#Encapsulation class for Sklearn min max scaling
class standardMinMaxScale(customMeanStdScale):
def __init__(self, feature_range = (0,1)):
super().__init__()
#We can enforce the minimum if we expect smaller data in the testing set
def fit(self, dataset,
enforceDataSetMin = None,
enforceDataSetMax = None):
hasTupleElt = (type(dataset.iloc[0,0] if dataset.ndim==2 else dataset.iloc[0])==type(tuple()))
if hasTupleElt :
tupleSize = len(dataset.iloc[0,0] if dataset.ndim==2 else dataset.iloc[0])
self.scalerList = [MinMaxScaler() for i in range(tupleSize)]
for k in range(tupleSize):
funcAccess = lambda x : x[k]
scaler = self.scalerList[k]
dfElt = dataset.applymap(funcAccess) if (type(dataset) != type(pd.Series())) else dataset.map(funcAccess)
scaler.fit(dfElt)
else :
self.scalerList = []
self.scalerList.append(MinMaxScaler())
self.scalerList[0].fit(dataset)
return
def selectLessCorrelatedFeatures(featureCorr, nbPoints):
objectiveFunction = lambda x : x.T @ featureCorr.values @ x
gradient = lambda x : (featureCorr.values + featureCorr.values.T) @ x
hessian = lambda x : featureCorr.values + featureCorr.values.T
nbRestart = 5
x0s = np.random.uniform(size=(nbRestart , featureCorr.shape[1]))
x0s = x0s * nbPoints / np.sum(x0s, axis = 1, keepdims=True)
bestSol = x0s[0,:]
bestVar = featureCorr.shape[1]
bounds = [[0,1]] * featureCorr.shape[1]
budgetAllocation = LinearConstraint(np.ones((1,featureCorr.shape[1])), [nbPoints], [nbPoints], keep_feasible = True)
for k in range(nbRestart):
res = minimize(objectiveFunction, x0s[k,:],
bounds = bounds,
constraints = budgetAllocation,
method = "trust-constr",
jac = gradient,
hess = hessian)
if (res.fun < bestVar) or (k==0) :
bestSol = res.x
bestVar = res.fun
print("Attempt no ", k, " ; best solution : ", bestSol, " ; best inertia : ", bestVar)
topnbPointsValue = -(np.sort(-bestSol)[nbPoints - 1])
optimalAllocation = pd.Series(bestSol, index = featureCorr.index)
return optimalAllocation[optimalAllocation >= topnbPointsValue].index
def isCSVFile(filename):
extension = filename[-3:]
return (extension == "csv")
#These class are responsible for :
# - passing the right data to the model for trainingData
# - converting data to the original format for plotting
class datasetATM:
def __init__(self, pathToDataset,
trainingSetPercentage,
minExpiry,
completionRate,
scaleFeatures = False):
self.trainingSetPercentage = trainingSetPercentage
self.pathToDataset = pathToDataset
self.activateScaling = scaleFeatures
self.isGridStable = True
self.testVol = None
self.trainVol = None
self.VolSerie = None
self.volScaler = None
self.scaledTrainVol = None
self.scaledTestVol = None
self.testCoordinates = None
self.trainCoordinates = None
self.CoordinatesSerie = None
self.coordinatesScaler = None
self.scaledTrainCoordinates = None
self.scaledTestCoordinates = None
self.testFwd = None
self.trainFwd = None
self.FwdSerie = None
self.fwdScaler = None
self.scaledTrainFwd = None
self.scaledTestFwd = None
self.testStrike = None
self.trainStrike = None
self.StrikeSerie = None
self.loadData()
self.scaleDataSets()
lambdaAppend = (lambda x : x[0].append(x[1]) if x[0] is not None else None)
self.fullHistory = list(map(lambdaAppend, zip(self.getTrainingDataForModel(),self.getTestingDataForModel())))
self.fullScaler = [self.volScaler, self.coordinatesScaler, self.fwdScaler, None]
self.gridSize = self.getTestingDataForModel()[0].shape[1]
return
def loadData(self):
raise NotImplementedError("Abstract class")
return
def sanityCheck(self):
print("Testing formatModelDataAsDataSet")
assert(equalDf(self.testVol.dropna(how="all").head(),
self.formatModelDataAsDataSet(self.getTestingDataForModel())[0].head()))
origData = self.formatModelDataAsDataSet(self.getTrainingDataForModel())
print("Testing coordinates")
assert(equalDf(self.trainCoordinates.head().applymap(lambda x : x[0]),
origData[1].head().applymap(lambda x : x[0])))
assert(equalDf(self.trainCoordinates.head().applymap(lambda x : x[1]),
origData[1].head().applymap(lambda x : x[1])))
print("Testing Forward")
assert(equalDf(self.getTrainingDataForModel()[2].head(),
self.convertRealDataToModelFormat(self.formatModelDataAsDataSet(self.getTrainingDataForModel()))[2].head()))
print("Testing masking function")
maskedDf = self.maskDataset(self.getTrainingDataForModel()[1]).dropna(how="all",axis=1).head()
assert(maskedDf.shape[1] == (self.gridSize - self.maskedPoints.size))
print("Testing convertRealDataToModelFormat")
assert(equalDf(self.trainVol.loc[origData[0].index].head(),
self.formatModelDataAsDataSet(self.convertRealDataToModelFormat(origData))[0].head()))
print("Success")
return
#When the grid is not fixed - i.e. volatilities time to maturities are sliding -
#we need to decide which instruments can be compared between two dates
def decideInvestableInstruments(self):
coordinatesDf = self.formatModelDataAsDataSet(self.getDataForModel())[1]
pairIndexHistory = []#series of pair of index
nextTTMDf = coordinatesDf.shift(-1).dropna(how = "all")
for serie in coordinatesDf.head(-1).iterrows():
currentDay = serie[1]
nextDay = nextTTMDf.loc[serie[0]]
currentRankForHedgeablePoints = currentDay.index
nextRankForHedgeablePoints = nextDay.index
pairIndexHistory.append((currentRankForHedgeablePoints, nextRankForHedgeablePoints))
pairIndexHistory.append((nextRankForHedgeablePoints, nextRankForHedgeablePoints))
pairIndexHistory = pd.Series(pairIndexHistory, index = coordinatesDf.index)
return pairIndexHistory
#List Format : First position vol, second position coordinates, third position forward, fourth position strike
def getTestingDataForModel(self):
return [self.scaledTestVol, self.scaledTestCoordinates, self.scaledTestFwd, self.testStrike]
def getTrainingDataForModel(self):
return [self.scaledTrainVol, self.scaledTrainCoordinates, self.scaledTrainFwd, self.trainStrike]
def getDataForModel(self, dates = None):
if dates is None :
return self.fullHistory
funcExtractDate = lambda x : x.loc[dates] if x is not None else None
return list(map(funcExtractDate, self.fullHistory))
#Tranform synthetic surfaces as model data
#Name of surfaces should be the date
def convertRealDataToModelFormat(self, unformattedSurface):
if(self.activateScaling):
if (type(unformattedSurface)==type(list())) and (len(unformattedSurface)==4):
lambdaTransform = lambda x : x[0] if x[1] is None else x[1].transform(x[0])
return list(map(lambdaTransform, zip(unformattedSurface, self.fullScaler)))
elif (type(unformattedSurface)!=type(list())) :
return self.volScaler.transform(unformattedSurface)
else :
raise("Can not format as model data")
return
return unformattedSurface
#Format data returned by a model to format
#For instance variation are transformed as level with yesterday volatilities
def formatModelDataAsDataSet(self, modelData):
if(self.activateScaling):
if (type(modelData)==type(list())) and (len(modelData)==4):
lambdaTransform = lambda x : x[0] if x[1] is None else x[1].inverse_transform(x[0])
return list(map(lambdaTransform, zip(modelData, self.fullScaler)))
elif (type(modelData)!=type(list())) :
return self.volScaler.inverse_transform(modelData)
else :
raise("Can not format as model data")
return
return modelData
def scaleDataSets(self):
if(self.activateScaling):
#Define MinMax scaling for volatility
self.volScaler = customMeanStdScale() #customMinMaxScale()
self.volScaler.fit(self.trainVol, enforceDataSetMin = 0)#Positive volatilities of course
self.scaledTrainVol = self.volScaler.transform(self.trainVol)
self.scaledTestVol = self.volScaler.transform(self.testVol)
#Define MinMax scaling for volatility
self.coordinatesScaler = customMeanStdScale() #customMinMaxScale()
self.coordinatesScaler.fit(self.trainCoordinates, enforceDataSetMin = 0)#Positive volatilities of course
self.scaledTrainCoordinates = self.coordinatesScaler.transform(self.trainCoordinates)
self.scaledTestCoordinates = self.coordinatesScaler.transform(self.testCoordinates)
#Define MinMax scaling for forward swap rates
self.fwdScaler = customMeanStdScale() # customMinMaxScale()
self.fwdScaler.fit(self.trainFwd)
self.scaledTrainFwd = self.fwdScaler.transform(self.trainFwd)
self.scaledTestFwd = self.fwdScaler.transform(self.testFwd)
else :
self.scaledTrainVol = self.trainVol
self.scaledTestVol = self.testVol
self.scaledTrainCoordinates = self.trainCoordinates
self.scaledTestCoordinates = self.testCoordinates
self.scaledTrainFwd = self.trainFwd
self.scaledTestFwd = self.testFwd
return
def getATMDataFromCSV(dataSetPath, trainingSetPercentage=0.8):
formattedHistory = extractDataFromCSV(dataSetPath)
#Filter only ATM volatility
ATMHistory = (formattedHistory[formattedHistory.index.get_level_values(indexRelStrike)=='A']
.reorder_levels([indexName, indexExpiry, indexTenor, indexRelStrike])
.sort_index())
#Remove strike from index as we consider only ATM
ATMHistory.index = ATMHistory.index.droplevel(3)
#Get Expiry and tenors shared by all dates
commonGridPoints = intersectionGrid(ATMHistory)
#Get indexer for multiindex
idx = pd.IndexSlice
#Filter data for Expiry and tenors common to all dates
commonATMHistory = ATMHistory.loc[idx[:,commonGridPoints.get_level_values(0),
commonGridPoints.get_level_values(1)],:]
#Feeding Data
#Take the first 80% dates as training set and the remaining ones as testing set
trainTmp,testTmp = splitTrainTestDataChronologically(commonATMHistory,trainingSetPercentage)
#Separate features between volatility, forward rate and Strike
testVol = splitHistory(testTmp,"Vol")
trainVol = splitHistory(trainTmp,"Vol")
testFwd = splitHistory(testTmp,"forward")
trainFwd = splitHistory(trainTmp,"forward")
testStrike = None
trainStrike = None
indexFunc = lambda x : pd.Series(x.index.values,
index = x.index)
trainCoordinates = trainVol.apply(indexFunc, axis=1)
testCoordinates = testVol.apply(indexFunc, axis=1)
trainVol = pd.DataFrame(trainVol.values, index=trainVol.index)
testVol = pd.DataFrame(testVol.values, index=testVol.index)
trainCoordinates = pd.DataFrame(trainCoordinates.values, index=trainCoordinates.index)
testCoordinates = pd.DataFrame(testCoordinates.values, index=testCoordinates.index)
return testVol, trainVol, testFwd, trainFwd, testCoordinates, trainCoordinates, testStrike, trainStrike
class dataSetATMCSV(datasetATM):
def __init__(self, pathToDataset,
trainingSetPercentage,
minExpiry,
completionRate,
scaleFeatures = False):
self.nbExpiry = 0
self.nbTenors = 0
self.minExpiry = minExpiry
self.expiryTenorToRankSerie = None
super().__init__(pathToDataset,
trainingSetPercentage,
minExpiry,
completionRate,
scaleFeatures = scaleFeatures)
listTokeep = [(0.08333333333333333,0.25),(0.08333333333333333,10.0),
(0.08333333333333333,30.0),(0.5,2.0),(0.5,15.0),
(5.0,1.0),(5.0,20.0),(10.0,5.0)]
self.setMaskedPoints(listTokeep)
def setMaskedPoints(self, completionPoints):
# self.maskedPoints = sampleSwaptionsToDelete(self.getTestingDataForModel(),
# completionRate)
fullObs = self.getTestingDataForModel()[1]
self.maskedPoints = fullObs.columns.difference(completionPoints)
if self.isGridStable :#Surface coordinates are the same for each day
#Matrix where True indicates that this point is completed (i.e. hidden on the market), false otherwise
maskMatrix = pd.Series(False, index = self.expiryTenorToRankSerie.index)
maskMatrix.loc[fullObs.iloc[0].loc[self.maskedPoints]] = True
self.maskSerie = pd.Series(maskMatrix.values, index = self.expiryTenorToRankSerie.values)
self.maskMatrix = maskMatrix.unstack(level=-1)
#Return a deep copy with masked values
def maskDataset(self, completeDataset):
maskedRank = self.maskedPoints
maskedDataset = completeDataset.copy()
if completeDataset.ndim == 1 :
maskedDataset.loc[maskedRank] = np.NaN
elif completeDataset.ndim == 2 :
maskedDataset[maskedRank] = np.NaN
return maskedDataset
def removeShortestExpiry(self, dataset):
if dataset is None :
return
#remove data with expiry inferior than minExpiry
hasExpiryColumn = ("Expiry" in dataset.columns.names)
columnsFilter = ((dataset.columns.get_level_values("Expiry")>=self.minExpiry) if hasExpiryColumn else
self.expiryTenorToRankSerie[self.expiryTenorToRankSerie.index.get_level_values("Expiry")>=self.minExpiry].values)
return dataset.filter(items=dataset.columns[columnsFilter])
def loadData(self):
tmp = getATMDataFromCSV(self.pathToDataset, self.trainingSetPercentage)
self.expiryTenorToRankSerie = pd.Series(tmp[4].columns,
index = pd.MultiIndex.from_tuples(tmp[4].iloc[0].values,
names=('Expiry', 'Tenor')))
self.expiryTenorToRankSerie = self.expiryTenorToRankSerie[self.expiryTenorToRankSerie.index.get_level_values("Expiry")>=self.minExpiry]
self.testVol = self.removeShortestExpiry(tmp[0])
self.trainVol = self.removeShortestExpiry(tmp[1])
self.testCoordinates = self.removeShortestExpiry(tmp[4])
self.trainCoordinates = self.removeShortestExpiry(tmp[5])
self.testFwd = self.removeShortestExpiry(tmp[2])
self.trainFwd = self.removeShortestExpiry(tmp[3])
self.testStrike = self.removeShortestExpiry(tmp[6])
self.trainStrike = self.removeShortestExpiry(tmp[7])
self.nbExpiry = self.trainFwd.columns.get_level_values("Expiry").unique().size
self.nbTenors = self.trainFwd.columns.get_level_values("Tenor").unique().size
self.gridSize = self.trainFwd.columns.size
return
def datasetSummary(self):
print("Number of days in dataset",
self.getDataForModel()[0].shape[0])
print("Number of days for testing", self.getTestingDataForModel()[0].shape[0])
print("Number of days for training", self.getTrainingDataForModel()[0].shape[0])
print("Working on ATM volatility level")
print("Number of points in the grid : ", self.gridSize)
print("Number of expiries : ", self.nbExpiry)
print("List : ", self.getTrainingDataForModel()[2].columns.get_level_values("Expiry").unique())
print("Number of tenors : ", self.nbTenors)
print("List : ", self.getTrainingDataForModel()[2].columns.get_level_values("Tenor").unique())
return
def getATMDataFromPickle(dataSetPath,
trainingSetPercentage=0.8,
minStrikeIndex = 0,
maturityStrikeIndex = 0):
with open(dataSetPath, "rb") as f :
objectRead = pickle.load(f)
def rankCalDays(dfDay):
return dfDay["nBizDays"].rank()
listRank = list(map(rankCalDays, objectRead))
dfRank = pd.concat(listRank)
dfConcat = pd.concat(objectRead)
dfConcat["Rank"] = dfRank
volDf = dfConcat.reset_index().set_index(["index", "Rank"]).drop(["Date", "Forwards", "nBizDays", "nCalDays", "diff Days"], axis=1, errors="ignore").unstack()
volDf.columns = volDf.columns.set_names("Moneyness",level=0)
volDf = volDf.dropna(how="all",axis=1).astype("float64")
fwdDf = dfConcat.reset_index().set_index(["index", "Rank"])["Forwards"].unstack()
coordinatesRankDf = dfConcat.reset_index().set_index(["index", "Rank"])["nBizDays"].unstack()
def bindBizDays(rows):
bizDays = coordinatesRankDf.loc[rows.name].astype("float64")
return pd.Series(list(zip(bizDays[rows.index.get_level_values("Rank")].values / 252.0,
np.log(rows.index.get_level_values("Moneyness").astype("float64")) )),
index = rows.index)
coordinatesDf = volDf.apply(bindBizDays, axis=1)
def getFwd(rowVol):
ttmRank = rowVol.index.get_level_values("Rank")
return pd.Series(fwdDf.loc[rowVol.name, ttmRank].values, index = rowVol.index)
#Search for point in the vol dataframe the corresponding forward
fwdDf = volDf.apply(getFwd, axis=1).dropna(how="all",axis=1).astype("float64")
firstTestingDate = int(volDf.index.shape[0] * trainingSetPercentage)
trainingDates = volDf.index[:firstTestingDate]
trainVol = volDf.loc[trainingDates]
testVol = volDf.drop(trainVol.index)
trainVol = pd.DataFrame(trainVol.values, index=trainVol.index)
testVol = pd.DataFrame(testVol.values, index=testVol.index)
trainFwd = fwdDf.loc[trainVol.index]
trainFwd = pd.DataFrame(trainFwd.values, index=trainFwd.index)[trainVol.columns]
testFwd = fwdDf.drop(trainVol.index)
testFwd = pd.DataFrame(testFwd.values, index=testFwd.index)[testVol.columns]
testStrike = None
trainStrike = None
trainCoordinates = coordinatesDf.loc[trainingDates]
trainCoordinates = pd.DataFrame(trainCoordinates.values, index=trainCoordinates.index)[trainVol.columns]
testCoordinates = coordinatesDf.drop(trainVol.index)
testCoordinates = pd.DataFrame(testCoordinates.values, index=testCoordinates.index)[testVol.columns]
strikeDf = trainCoordinates.applymap(lambda x : x[1]).iloc[0]
strikeList = np.sort(strikeDf.unique())
minStrike = strikeList[minStrikeIndex]
strikesKept = strikeDf[strikeDf >= minStrike].index
maturityDf = trainCoordinates.applymap(lambda x : x[0]).iloc[0][strikesKept]
maturityList = np.sort(maturityDf.unique())
minMaturity = maturityList[minStrikeIndex]
maturityKept = maturityDf[maturityDf >= minMaturity].index
testVol = testVol[maturityKept]
trainVol = trainVol[maturityKept]
trainCoordinates = trainCoordinates[maturityKept]
testCoordinates = testCoordinates[maturityKept]
trainFwd = trainFwd[maturityKept]
testFwd = testFwd[maturityKept]
return testVol, trainVol, testFwd, trainFwd, testCoordinates, trainCoordinates, testStrike, trainStrike
def saveInterpolationResult(pathFile, paramDf, interpDf):
pathTestFileInterp = pathFile + 'Interp'
dictPickle = {}
dictPickle["InterpParam"] = paramDf
dictPickle["InterpolatedDf"] = interpDf
with open(pathTestFileInterp, "wb") as f :
pickle.dump(dictPickle, f, protocol=3)
return
def removePointsWithInvalidCoordinates(incompleteSurface, coordinates):
#Filter location with incomplete observations
def invalidCoordinates(x):
if isinstance(x, tuple):
return not any(np.isnan(x))
return not np.isnan(x)
filteredCoordinates = np.array(list(map(invalidCoordinates, coordinates)))
return incompleteSurface[filteredCoordinates], coordinates[filteredCoordinates]
def readInterpolationResult(pathFile):
pathTestFileInterp = pathFile + 'Interp'
with open(pathTestFileInterp, "rb") as f :
dictPickle = pickle.load(f)
return dictPickle["InterpParam"], dictPickle["InterpolatedDf"]
class dataSetATMPickle(datasetATM):
def __init__(self, pathToDataset,
trainingSetPercentage,
minExpiry,
completionRate,
scaleFeatures = False):
self.nbMoneyness = 0
self.MoneynessList = []
self.nbTTM = 0
self.ttmList = []
self.minTTM = None
self.isGridStable = False
self.minStrike = 4
self.minMaturity = 0
self.logTransform = True
super().__init__(pathToDataset,
trainingSetPercentage,
minExpiry,
completionRate,
scaleFeatures = scaleFeatures)
listTokeep = [1.0, 2.0, 3.0, 4.0]
self.setMaskedPoints(listTokeep)
def datasetSummary(self):
print("Number of days in dataset",
self.getDataForModel()[0].shape[0])
print("Number of days for testing", self.getTestingDataForModel()[0].shape[0])
print("Number of days for training", self.getTrainingDataForModel()[0].shape[0])
print("Working on Equity volatility level")
print("Number of points in the grid : ", self.gridSize)
print("Number of Moneyness : ", self.nbMoneyness)
print("List : ", self.MoneynessList)
print("Number of Time to maturities : ", self.nbTTM)
print("List : ", self.ttmList)
return
def loadData(self):
tmp = getATMDataFromPickle(self.pathToDataset, self.trainingSetPercentage, self.minStrike, self.minMaturity)
self.testVol = tmp[0]
self.trainVol = tmp[1]
self.testCoordinates = tmp[4]
self.trainCoordinates = tmp[5]
self.testFwd = tmp[2]
self.trainFwd = tmp[3]
self.testStrike = tmp[6]
self.trainStrike = tmp[7]
def extractSingleton(df, coordIndex):
valueList = np.unique(list(map(lambda x : x[coordIndex], np.ravel(df.values))))
return valueList[~np.isnan(valueList)]
fullCoordinatedDf = self.testCoordinates.append(self.trainCoordinates)
self.MoneynessList = extractSingleton(fullCoordinatedDf, 1)
self.ttmList = extractSingleton(fullCoordinatedDf, 0)
self.nbMoneyness = self.MoneynessList.size
self.nbTTM = self.ttmList.size
self.gridSize = self.trainVol.columns.size
return
def setMaskedPoints(self, completionPoints):
# self.maskedPoints = sampleSwaptionsToDelete(self.getTestingDataForModel(),
# completionRate)
fullObs = self.getTestingDataForModel()[0].iloc[0]
self.maskedPoints = fullObs.index.difference(completionPoints)
#Matrix where True indicates that this point is completed (i.e. hidden on the market), false otherwise
maskMatrix = pd.Series(False, index = fullObs.index)
maskMatrix.loc[self.maskedPoints] = True
self.maskSerie = maskMatrix
#self.maskMatrix = maskMatrix.unstack(level=-1)
#Return a deep copy with masked values
def maskDataset(self, completeDataset):
maskedRank = self.maskedPoints
maskedDataset = completeDataset.copy()
if completeDataset.ndim == 1 :
maskedDataset.loc[maskedRank] = np.NaN
elif completeDataset.ndim == 2 :
maskedDataset[maskedRank] = np.NaN
return maskedDataset
#When the grid is not fixed - i.e. volatilities time to maturities are sliding -
#we need to decide which instruments can be compared between two dates
def decideInvestableInstruments(self):
ttmDf = getTTMFromCoordinates(self.formatModelDataAsDataSet(self.getDataForModel()))
pairIndexHistory = []#series of pair of index
nextTTMDf = ttmDf.shift(-1).dropna(how = "all")
for serie in ttmDf.head(-1).iterrows():
currentDay = serie[1]
nextDay = nextTTMDf.loc[serie[0]]
currentRankForHedgeablePoints = currentDay[(currentDay - 1).isin(nextDay) & (~currentDay.isna())].index
nextRankForHedgeablePoints = nextDay[(nextDay).isin(currentDay - 1) & (~nextDay.isna())].index
if currentRankForHedgeablePoints.empty :#case where current or day is not considered as a business day
currentRankForHedgeablePoints = currentDay[(currentDay).isin(nextDay) & (~currentDay.isna())].index
nextRankForHedgeablePoints = nextDay[(nextDay).isin(currentDay) & (~nextDay.isna())].index
pairIndexHistory.append((currentRankForHedgeablePoints, nextRankForHedgeablePoints))
#Last day
pairIndexHistory.append((nextRankForHedgeablePoints, nextRankForHedgeablePoints))
pairIndexHistory = pd.Series(pairIndexHistory, index = ttmDf.index)
return pairIndexHistory
class datasetATMVariation(dataSetATMCSV):
def __init__(self, pathToDataset,
trainingSetPercentage,
minExpiry,
completionRate,
scaleFeatures = False):
self.trainingVolVariation = None
self.testingVolVariation = None
self.yesterdayVolSerie = None
self.trainingCoordinatesVariation = None
self.testingCoordinatesVariation = None
self.trainingFwdVariation = None
self.testingFwdVariation = None
self.yesterdayFwdSerie = None
self.trainingStrikeVariation = None
self.testingStrikeVariation = None
self.yesterdayStrikeSerie = None
#No variation
super().__init__(pathToDataset,
trainingSetPercentage,
minExpiry,
completionRate,
scaleFeatures = scaleFeatures)
def addYesterdayLevel(self, variationDataset, levelDataSet):
if variationDataset.ndim == 1 :
return variationDataset + levelDataSet.loc[variationDataset.name]
elif variationDataset.ndim == 2 :
return variationDataset + levelDataSet.loc[variationDataset.index]
raise ValueError("Incorrect tensor order !")
return None
def removeYesterdayLevel(self, todayDataset, yesterdayDataSet):
if todayDataset.ndim == 1 :
return todayDataset - yesterdayDataSet.loc[todayDataset.name]
elif todayDataset.ndim == 2 :
return todayDataset - yesterdayDataSet.loc[todayDataset.index]
raise ValueError("Incorrect tensor order !")
return None
#Apply scaling and various transform to fall on model data
#Name of surface should be the date
def convertRealDataToModelFormat(self, unformattedSurface):
if (type(unformattedSurface)==type(list())) and (len(unformattedSurface)==4):
date = unformattedSurface[0].index
variation = [unformattedSurface[0] - self.yesterdayVolSerie.loc[date],
unformattedSurface[1],
unformattedSurface[2] - self.yesterdayFwdSerie.loc[unformattedSurface[2].index],
unformattedSurface[3]]
if(self.activateScaling):
lambdaTransform = lambda x : x[0] if x[1] is None else x[1].transform(x[0])
return list(map(lambdaTransform, zip(variation, self.fullScaler)))
else :
return variation
elif (type(unformattedSurface)!=type(list())) :
date = unformattedSurface.name
variation = unformattedSurface - self.yesterdayVolSerie.loc[date]
if(self.activateScaling):
return self.volScaler.transform(variation)
else :
return variation
else :
raise("Can not format as model data")
return None
#Format data returned by a model to format
#For instance variation are transformed as level with yesterday
def formatModelDataAsDataSet(self,modelData):
unScaledModelData = super().formatModelDataAsDataSet(modelData)
if (type(modelData)==type(list())) and (len(modelData)==4):
originalFormat = [self.addYesterdayLevel(unScaledModelData[0], self.yesterdayVolSerie),
unScaledModelData[1],
self.addYesterdayLevel(unScaledModelData[2], self.yesterdayFwdSerie),
unScaledModelData[3]]
elif (type(modelData)!=type(list())) :
originalFormat = self.addYesterdayLevel(unScaledModelData, self.yesterdayVolSerie)
else :
raise("Can not format as model data")
return originalFormat
def formatDataAsVariation(self, trainingDataSet, testingDataSet):
trainingVariation = trainingDataSet.diff().dropna(how='all')
testingVariation = testingDataSet.diff()
testingVariation.iloc[0] = testingDataSet.iloc[0] - trainingDataSet.iloc[-1]
#Shift date to have a serie of past values
yesterdayTraining = trainingDataSet.shift().dropna(how='all')
yesterdayTesting = testingDataSet.shift()
yesterdayTesting.iloc[0] = trainingDataSet.iloc[-1]
return trainingVariation, testingVariation, yesterdayTraining.append(yesterdayTesting)
def loadData(self):
super().loadData()
tmp1 = self.formatDataAsVariation(self.trainVol, self.testVol)
self.trainingVolVariation = tmp1[0]
self.testingVolVariation = tmp1[1]
self.yesterdayVolSerie = tmp1[2]
#Coordiantes are not formatted as variation
self.trainingCoordinatesVariation = self.trainCoordinates.loc[self.trainingVolVariation.index]
self.testingCoordinatesVariation = self.testCoordinates.loc[self.testingVolVariation.index]
tmp2 = self.formatDataAsVariation(self.trainFwd, self.testFwd)
self.trainingFwdVariation = tmp2[0]
self.testingFwdVariation = tmp2[1]
self.yesterdayFwdSerie = tmp2[2]
# tmp3 = self.formatDataAsVariation(self.trainStrike, self.testStrike)
# self.trainingStrikeVariation = tmp3[0]
# self.testingStrikeVariation = tmp3[1]
# self.yesterdayStrikeSerie = tmp3[2]
return
def scaleDataSets(self):
if(self.activateScaling):
#Define MinMax scaling for volatility
self.volScaler = customMeanStdScale() #customMinMaxScale()
self.volScaler.fit(self.trainingVolVariation)#Positive volatilities of course
self.scaledTrainVol = self.volScaler.transform(self.trainingVolVariation)
self.scaledTestVol = self.volScaler.transform(self.testingVolVariation)
#Define MinMax scaling for volatility
self.coordinatesScaler = customMeanStdScale() #customMinMaxScale()
self.coordinatesScaler.fit(self.trainCoordinates, enforceDataSetMin = 0)#Positive volatilities of course
self.scaledTrainCoordinates = self.coordinatesScaler.transform(self.trainingCoordinatesVariation)
self.scaledTestCoordinates = self.coordinatesScaler.transform(self.testingCoordinatesVariation)
#Define MinMax scaling for forward swap rates
self.fwdScaler = customMeanStdScale() #customMinMaxScale()
self.fwdScaler.fit(self.trainingFwdVariation)
self.scaledTrainFwd = self.fwdScaler.transform(self.trainingFwdVariation)
self.scaledTestFwd = self.fwdScaler.transform(self.testingFwdVariation)
else :
self.scaledTrainVol = self.trainingVolVariation
self.scaledTestVol = self.testingVolVariation
self.scaledTrainCoordinates = self.trainingCoordinatesVariation
self.scaledTestCoordinates = self.testingCoordinatesVariation
self.scaledTrainFwd = self.trainingFwdVariation
self.scaledTestFwd = self.testingFwdVariation
return
def getSkewDataFromCSV(dataSetPath, trainingSetPercentage=0.8):
formattedHistory = (extractDataFromCSV(dataSetPath)
.reorder_levels([indexName, indexExpiry, indexTenor, indexRelStrike])
.sort_index())
#Get Expiry and tenors shared by all dates
commonGridPoints = intersectionGrid(formattedHistory)
#Get indexer for multiindex
idx = pd.IndexSlice
#Filter data for Expiry, Tenors and Strike common to all dates
commonHistory = formattedHistory.loc[idx[:,commonGridPoints.get_level_values(0),
commonGridPoints.get_level_values(1),
commonGridPoints.get_level_values(2)],:]
#Feeding Data
#Take the first 80% dates as training set and the remaining ones as testing set
trainTmp,testTmp = splitTrainTestDataChronologically(commonHistory,trainingSetPercentage)
#Separate features between volatility, forward rate and Strike
testVol = splitHistory(testTmp,"Vol")
trainVol = splitHistory(trainTmp,"Vol")
trainVol = pd.DataFrame(trainVol.values, index=trainVol.index)
testVol = pd.DataFrame(testVol.values, index=testVol.index)
testFwd = splitHistory(testTmp,"forward")
trainFwd = splitHistory(trainTmp,"forward")
testStrike = splitHistory(testTmp,indexStrike)
trainStrike = splitHistory(trainTmp,indexStrike)
indexFunc = lambda x : pd.Series(x.index.values,
index = x.index)
trainCoordinates = trainVol.apply(indexFunc, axis=1)
testCoordinates = testVol.apply(indexFunc, axis=1)
trainCoordinates = pd.DataFrame(trainCoordinates.values, index=trainCoordinates.index)
testCoordinates = pd.DataFrame(testCoordinates.values, index=testCoordinates.index)
return testVol, trainVol, testFwd, trainFwd, testCoordinates, trainCoordinates, testStrike, trainStrike
class datasetStrike(dataSetATMCSV):
def __init__(self, pathToDataset,
trainingSetPercentage,
minExpiry,
completionRate,
scaleFeatures = False):
self.nbStrike = 0
super().__init__(pathToDataset,
trainingSetPercentage,
minExpiry,
completionRate,
scaleFeatures = scaleFeatures)
def loadData(self):
tmp = getSkewDataFromCSV(pathToDataset, self.trainingSetPercentage)
self.expiryTenorToRankSerie = pd.Series(tmp[4].columns,
index = pd.MultiIndex.from_tuples(tmp[4].iloc[0].values,
names=('Expiry', 'Tenor')))
self.testVol = self.removeShortestExpiry(tmp[0])
self.trainVol = self.removeShortestExpiry(tmp[1])
self.testCoordinates = self.removeShortestExpiry(tmp[4])
self.trainCoordinates = self.removeShortestExpiry(tmp[5])
self.testFwd = self.removeShortestExpiry(tmp[2])
self.trainFwd = self.removeShortestExpiry(tmp[3])
self.testStrike = self.removeShortestExpiry(tmp[6])
self.trainStrike = self.removeShortestExpiry(tmp[7])
self.nbExpiry = self.testFwd.columns.get_level_values("Expiry").unique().size
self.nbTenors = self.testFwd.columns.get_level_values("Tenor").unique().size
self.nbStrike = self.testStrike.columns.get_level_values(3).unique().size
return
def datasetSummary(self):
super().datasetSummary()
print("Number of relative strikes : ", self.nbStrike)
print("List : ", self.testVol.columns.get_level_values(3).unique())
return
class datasetATMLogVariation(datasetATMVariation):
def __init__(self, pathToDataset,
trainingSetPercentage,
minExpiry,
completionRate,
scaleFeatures = False):
super().__init__(pathToDataset,
trainingSetPercentage,
minExpiry,
completionRate,
scaleFeatures = scaleFeatures)
def formatDataAsVariation(self, trainingDataSet, testingDataSet):
trainingVariation = np.log(trainingDataSet).diff().dropna(how='all')
testingVariation = np.log(testingDataSet).diff()
testingVariation.iloc[0] = np.log(testingDataSet).iloc[0] - np.log(trainingDataSet).iloc[-1]
#Shift date to have a serie of past values
yesterdayTraining = trainingDataSet.shift().dropna(how='all')
yesterdayTesting = testingDataSet.shift()
yesterdayTesting.iloc[0] = trainingDataSet.iloc[-1]
return trainingVariation, testingVariation, yesterdayTraining.append(yesterdayTesting)
def addYesterdayLevel(self, variationDataset, levelDataSet):
if variationDataset.ndim == 1 :
return np.exp(variationDataset) * levelDataSet.loc[variationDataset.name]
elif variationDataset.ndim == 2 :
return np.exp(variationDataset) * levelDataSet.loc[variationDataset.index]
raise ValueError("Incorrect tensor order !")
return variationDataset
#Apply scaling and various transform to fall on model data
#Name of surface should be the date
def convertRealDataToModelFormat(self, unformattedSurface):
if (type(unformattedSurface)==type(list())) and (len(unformattedSurface)==4):
date = unformattedSurface[0].index
variation = [np.log(unformattedSurface[0]) - np.log(self.yesterdayVolSerie.loc[date]),
unformattedSurface[1],
np.log(unformattedSurface[2]) - np.log(self.yesterdayFwdSerie.loc[date]),
unformattedSurface[3]]
if(self.activateScaling):
lambdaTransform = lambda x : x[0] if x[1] is None else x[1].transform(x[0])
return list(map(lambdaTransform, zip(variation, self.fullScaler)))
else :
return variation
elif (type(unformattedSurface)!=type(list())) :
date = unformattedSurface.name
variation = np.log(unformattedSurface) - np.log(self.yesterdayVolSerie.loc[date])
if(self.activateScaling):
return self.volScaler.transform(variation)
else :
return variation
else :
raise("Can not format as model data")
return None
| StarcoderdataPython |
18760 | class ToolNameAPI:
thing = 'thing'
toolname_tool = 'example'
tln = ToolNameAPI()
the_repo = "reponame"
author = "authorname"
profile = "authorprofile" | StarcoderdataPython |
1697251 | <reponame>NeuralFlux/greenwheels
from django import forms
from Eprint_users.models import PrintDocs
from . models import RatePerPage
class UpdateForm(forms.ModelForm):
class Meta:
model = PrintDocs
fields = ['task_by', 'completed', 'paid', 'collected', 'id']
class ChangeRate(forms.ModelForm):
class Meta:
model = RatePerPage
fields = ['rppBW']
def __init__(self, *args, **kwargs):
super(ChangeRate, self).__init__(*args, **kwargs)
self.fields['rppBW'].label = "Black and White"
# self.fields['rppC'].label = "Colour"
self.fields['rppBW'].help_text = "Per Page (One Side)"
# self.fields['rppC'].help_text = "Per Page (One Side)"
| StarcoderdataPython |
81923 | #!/usr/bin/env python
import sys
def get_output_dir(target_arch, component):
# Build in "out_ffmpeg" for Chromium branding of ffmpeg.
if component == 'ffmpeg':
return 'out_ffmpeg'
# Build in "out_component" for component build.
output_dir = 'out'
if component == 'shared_library':
output_dir += '_component'
# Build in "out_32" for 32bit target.
if target_arch == 'ia32':
output_dir += '_32'
elif target_arch == 'arm':
output_dir += '_arm'
return output_dir
def get_configuration(target_arch):
config = 'Release'
if target_arch == 'x64' and sys.platform in ['win32', 'cygwin']:
config += '_x64'
return config
| StarcoderdataPython |
5451 | <reponame>Ayansam1152/translate
#!/usr/bin/env python3
import importlib
import os
# automatically import any Python files in the models/ directory
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
model_name = file[: file.find(".py")]
importlib.import_module("pytorch_translate.models." + model_name)
| StarcoderdataPython |
3286186 | from abc import abstractmethod
import numpy as np
from rlgym.utils import math
from rlgym.utils.common_values import BLUE_TEAM, ORANGE_GOAL_CENTER, BLUE_GOAL_CENTER, ORANGE_TEAM
from rlgym.utils.gamestates import GameState, PlayerData
from rlgym.utils.reward_functions import RewardFunction
class EventReward(RewardFunction):
def __init__(self, goal=0., team_goal=0., concede=-0., touch=0., shot=0., save=0., demo=0.):
"""
:param goal: reward for goal scored by player.
:param team_goal: reward for goal scored by player's team.
:param concede: reward for goal scored by opponents. Should be negative if used as punishment.
:param touch: reward for touching the ball.
:param shot: reward for shooting the ball (as detected by Rocket League).
:param save: reward for saving the ball (as detected by Rocket League).
:param demo: reward for demolishing a player.
"""
super().__init__()
self.weights = np.array([goal, team_goal, concede, touch, shot, save, demo])
# Need to keep track of last registered value to detect changes
self.last_registered_values = {}
@staticmethod
def _extract_values(player: PlayerData, state: GameState):
if player.team_num == BLUE_TEAM:
team, opponent = state.blue_score, state.orange_score
else:
team, opponent = state.orange_score, state.blue_score
return np.array([player.match_goals, team, opponent, player.ball_touched, player.match_shots,
player.match_saves, player.match_demolishes])
def reset(self, initial_state: GameState, optional_data=None):
# Update every reset since rocket league may crash and be restarted with clean values
self.last_registered_values = {}
for player in initial_state.players:
self.last_registered_values[player.car_id] = self._extract_values(player, initial_state)
def get_reward(self, player: PlayerData, state: GameState, previous_action: np.ndarray, optional_data=None):
old_values = self.last_registered_values[player.car_id]
new_values = self._extract_values(player, state)
diff_values = new_values - old_values
diff_values[diff_values < 0] = 0 # We only care about increasing values
reward = np.dot(self.weights, diff_values)
self.last_registered_values[player.car_id] = new_values
return reward
class DistanceBallToGoalReward(RewardFunction):
def __init__(self, own_goal=False):
super().__init__()
self.own_goal = own_goal
def reset(self, initial_state: GameState):
pass
def get_reward(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> float:
if player.team_num == BLUE_TEAM and not self.own_goal \
or player.team_num == ORANGE_TEAM and self.own_goal:
objective = np.array(ORANGE_GOAL_CENTER)
else:
objective = np.array(BLUE_GOAL_CENTER)
objective[1] *= 6000 / 5120 # Use back of net
dist = np.linalg.norm(state.ball.position - objective) - 786 # Compensate for moving objective to back of net
return np.exp(-0.5 * dist / 100) # From https://arxiv.org/abs/2105.12196
class DistancePlayerToBallReward(RewardFunction):
def reset(self, initial_state: GameState):
pass
def get_reward(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> float:
dist = np.linalg.norm(player.car_data.position - state.ball.position) - 94 # Compensate for ball radius
return np.exp(-0.5 * dist / 100) # From https://arxiv.org/abs/2105.12196
class VelocityPlayerToBallReward(RewardFunction):
def __init__(self, use_scalar_projection=True):
super().__init__()
self.use_scalar_projection = use_scalar_projection
def reset(self, initial_state: GameState):
pass
def get_reward(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> float:
vel = player.car_data.linear_velocity
pos_diff = state.ball.position - player.car_data.position
if self.use_scalar_projection:
# Vector version of v=d/t <=> t=d/v <=> 1/t=v/d
# Max value should be max_speed / ball_radius = 2300 / 94 = 24.5
# Used to guide the agent towards the ball
inv_t = math.scalar_projection(vel, pos_diff)
return inv_t
else:
# Regular component velocity
norm_pos_diff = pos_diff / np.linalg.norm(pos_diff)
vel /= 100 # uu = cm -> m
return float(np.dot(norm_pos_diff, vel))
class VelocityBallToGoalReward(RewardFunction):
def __init__(self, own_goal=False, use_scalar_projection=True):
super().__init__()
self.own_goal = own_goal
self.use_scalar_projection = use_scalar_projection
def reset(self, initial_state: GameState):
pass
def get_reward(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> float:
if player.team_num == BLUE_TEAM and not self.own_goal \
or player.team_num == ORANGE_TEAM and self.own_goal:
objective = np.array(ORANGE_GOAL_CENTER)
else:
objective = np.array(BLUE_GOAL_CENTER)
objective[1] *= 6000 / 5120 # Use back of net instead to prevent exploding reward
vel = state.ball.linear_velocity
pos_diff = objective - state.ball.position
if self.use_scalar_projection:
# Vector version of v=d/t <=> t=d/v <=> 1/t=v/d
# Max value should be max_speed / ball_radius = 2300 / 94 = 24.5
# Used to guide the agent towards the ball
inv_t = math.scalar_projection(vel, pos_diff)
return inv_t
else:
# Regular component velocity
norm_pos_diff = pos_diff / np.linalg.norm(pos_diff)
vel /= 100 # uu/s = cm/s -> m/s
return float(np.dot(norm_pos_diff, vel))
class VelocityReward(RewardFunction):
# Simple reward function to ensure the model is training.
def __init__(self, negative=False):
super().__init__()
self.negative = negative
def reset(self, initial_state: GameState):
pass
def get_reward(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> float:
return np.linalg.norm(player.car_data.linear_velocity) / 100 * (1 - 2 * self.negative)
class SaveBoostReward(RewardFunction):
def reset(self, initial_state: GameState):
pass
def get_reward(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> float:
# 1 reward for each frame with 100 boost, sqrt because 0->20 makes bigger difference than 80->100
return np.sqrt(player.boost_amount)
class ConstantReward(RewardFunction):
def reset(self, initial_state: GameState):
pass
def get_reward(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> float:
return 1
class BallYCoordinateReward(RewardFunction):
def reset(self, initial_state: GameState):
pass
def get_reward(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> float:
if player.team_num == BLUE_TEAM:
return (state.ball.position[1] / (5120 + 94)) ** 3
else:
return (state.inverted_ball.position[1] / (5120 + 94)) ** 3
class FaceBallReward(RewardFunction):
def reset(self, initial_state: GameState):
pass
def get_reward(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> float:
pos_diff = state.ball.position - player.car_data.position
norm_pos_diff = pos_diff / np.linalg.norm(pos_diff)
return float(np.dot(player.car_data.forward(), norm_pos_diff))
class ConditionalRewardFunction(RewardFunction):
def __init__(self, reward_func: RewardFunction):
super().__init__()
self.reward_func = reward_func
@abstractmethod
def condition(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> bool:
raise NotImplementedError
def reset(self, initial_state: GameState):
pass
def get_reward(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> float:
if self.condition(player, state, previous_action):
return self.reward_func.get_reward(player, state, previous_action)
return 0
def get_final_reward(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> float:
if self.condition(player, state, previous_action):
return self.reward_func.get_final_reward(player, state, previous_action)
return 0
class RewardIfClosestToBall(ConditionalRewardFunction):
def __init__(self, reward_func: RewardFunction, team_only=True):
super().__init__(reward_func)
self.team_only = team_only
def condition(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> bool:
dist = np.linalg.norm(player.car_data.position - state.ball.position)
for player2 in state.players:
if not self.team_only or player2.team_num == player.team_num:
dist2 = np.linalg.norm(player2.car_data.position - state.ball.position)
if dist2 < dist:
return False
return True
class RewardIfTouchedLast(ConditionalRewardFunction):
def condition(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> bool:
return state.last_touch == player.car_id
class RewardIfBehindBall(ConditionalRewardFunction):
def condition(self, player: PlayerData, state: GameState, previous_action: np.ndarray) -> bool:
return player.team_num == BLUE_TEAM and player.car_data.position[1] < state.ball.position[1] \
or player.team_num == ORANGE_TEAM and player.car_data.position[1] > state.ball.position[1]
| StarcoderdataPython |
3287069 | """
Test that using a non-existent architecture name does not crash LLDB.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.lldbtest import *
import lldbsuite.test.lldbutil as lldbutil
class NoSuchArchTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def test(self):
self.build()
exe = os.path.join(os.getcwd(), "a.out")
# Check that passing an invalid arch via the command-line fails but
# doesn't crash
self.expect(
"target crete --arch nothingtoseehere %s" %
(exe), error=True)
# Check that passing an invalid arch via the SB API fails but doesn't
# crash
target = self.dbg.CreateTargetWithFileAndArch(exe, "nothingtoseehere")
self.assertFalse(target.IsValid(), "This target should not be valid")
# Now just create the target with the default arch and check it's fine
target = self.dbg.CreateTarget(exe)
self.assertTrue(target.IsValid(), "This target should now be valid")
| StarcoderdataPython |
3340432 | <filename>kay/tests/jinja2_test.py
#:coding=utf-8:
from kay.utils.test import Client
from kay.utils import url_for
from kay.app import get_application
from kay.conf import LazySettings
from kay.ext.testutils.gae_test_base import GAETestBase
class Jinja2TestCase(GAETestBase):
def setUp(self):
s = LazySettings(settings_module='kay.tests.google_settings')
self.app = get_application(settings=s)
def test_lazy_jinja2(self):
self.assertFalse(hasattr(self.app.app, '_jinja2_env'),
"Jinja2 environment is loaded to early.")
self.assertTrue(self.app.app.jinja2_env)
self.assertTrue(hasattr(self.app.app, '_jinja2_env'),
"Jinja2 environment is not loaded")
| StarcoderdataPython |
1642695 | <gh_stars>10-100
# Generated by Django 2.2.16 on 2020-11-04 19:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("api", "0101_scratch_org_nullable_email"),
]
operations = [
migrations.AddField(
model_name="product",
name="layout",
field=models.CharField(
choices=[("Default", "Default"), ("Card", "Card")],
default="Default",
max_length=64,
),
),
]
| StarcoderdataPython |
1607043 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the file-like object implementation using pyvshadow."""
import os
import unittest
from dfvfs.file_io import vshadow_file_io
from dfvfs.lib import definitions
from dfvfs.lib import errors
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import context
from tests import test_lib as shared_test_lib
class VShadowFileTest(shared_test_lib.BaseTestCase):
"""The unit test for the Volume Shadow Snapshots (VSS) file-like object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_path = self._GetTestFilePath(['vss.raw'])
self._SkipIfPathNotExists(test_path)
self._os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
self._raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=self._os_path_spec)
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def testOpenClose(self):
"""Test the open and close functionality."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_VSHADOW, parent=self._raw_path_spec,
store_index=1)
file_object = vshadow_file_io.VShadowFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 82771968)
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_VSHADOW, parent=self._raw_path_spec,
store_index=13)
file_object = vshadow_file_io.VShadowFile(self._resolver_context, path_spec)
with self.assertRaises(errors.PathSpecError):
file_object.Open()
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_VSHADOW, location='/vss1',
parent=self._raw_path_spec)
file_object = vshadow_file_io.VShadowFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 82771968)
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_VSHADOW, location='/vss0',
parent=self._raw_path_spec)
file_object = vshadow_file_io.VShadowFile(self._resolver_context, path_spec)
with self.assertRaises(errors.PathSpecError):
file_object.Open()
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_VSHADOW, location='/vss13',
parent=self._raw_path_spec)
file_object = vshadow_file_io.VShadowFile(self._resolver_context, path_spec)
with self.assertRaises(errors.PathSpecError):
file_object.Open()
def testSeek(self):
"""Test the seek functionality."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_VSHADOW, parent=self._raw_path_spec,
store_index=1)
file_object = vshadow_file_io.VShadowFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 82771968)
file_object.seek(0x1c9)
self.assertEqual(file_object.get_offset(), 0x1c9)
self.assertEqual(file_object.read(16), b'rl+Alt+Del to re')
self.assertEqual(file_object.get_offset(), 473)
file_object.seek(-40, os.SEEK_END)
self.assertEqual(file_object.get_offset(), 82771928)
self.assertEqual(file_object.read(8), b'estart\r\n')
self.assertEqual(file_object.get_offset(), 82771936)
file_object.seek(3, os.SEEK_CUR)
self.assertEqual(file_object.get_offset(), 82771939)
self.assertEqual(file_object.read(7), b'\x00\x00\x00\x00\x00\x00\x00')
self.assertEqual(file_object.get_offset(), 82771946)
# Conforming to the POSIX seek the offset can exceed the file size
# but reading will result in no data being returned.
expected_offset = 82771968 + 100
file_object.seek(expected_offset, os.SEEK_SET)
self.assertEqual(file_object.get_offset(), expected_offset)
self.assertEqual(file_object.read(20), b'')
with self.assertRaises(IOError):
file_object.seek(-10, os.SEEK_SET)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), expected_offset)
with self.assertRaises(IOError):
file_object.seek(10, 5)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), expected_offset)
def testRead(self):
"""Test the read functionality."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_VSHADOW, parent=self._raw_path_spec,
store_index=1)
file_object = vshadow_file_io.VShadowFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 82771968)
file_object.seek(0x18e)
expected_data = b'disk read error occurred\x00\r\nBOOTMGR is compresse'
self.assertEqual(file_object.read(47), expected_data)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
67142 | <filename>katacomb/katacomb/aips_parser.py
import logging
import InfoList
import ParserUtil
from katacomb import obit_err, handle_obit_err
from katacomb.obit_types import OBIT_TYPE_ENUM
log = logging.getLogger('katacomb')
def parse_aips_config(aips_cfg_file):
"""
Parses an AIPS config file into a
dictionary with schema
:code:`{ option: [type, dimensions, value]}`
:code:`type_` is an enum. Look at ObitTypes.h
to figure it out.
:code:`dims` indicate dimensionality of input
For scalar types :code:`[64,1,1,1,1]` indicates
a 1D array of length 64 floats.
String dims need to be handled slightly differently
First dimension indicates string length so for e.g.
:code:`["obit", " ", "abcd"]` has dims :code:`[4,3,1,1,1]`
So a :code:`[4,1,1,1,1]` implies one string of length 4
:code:`value` will always be a list and is probably
nested if :code:`dims is setup appropriately.
Parameters
----------
aips_cfg_file : str
AIPS configuration file
Returns
-------
dict
A dictionary of AIPS configuration options
"""
err = obit_err()
info_list = InfoList.InfoList()
ParserUtil.PParse(aips_cfg_file, info_list, err)
handle_obit_err("Error parsing Obit configuration file '{}'"
.format(aips_cfg_file), err)
return InfoList.PGetDict(info_list)
def obit_config_from_aips(aips_cfg_file):
"""
Extract key-values from AIPS configuration file
into a { option: value } dictionary.
Processes the configuration so that the values are
suitable to apply to ObitTask objects, converting
singleton list to objects, for example.
Parameters
----------
aips_cfg_file : str
AIPS configuration file
Returns
-------
dict
Configuration dictionary
"""
def _massage(option):
"""
Massage values into structures suitable for
setting on Obit Task objects
"""
# Split into type, dimensions and value
type_, dims, value = option
assert isinstance(value, list)
enum = OBIT_TYPE_ENUM[type_]
is_str = enum.name == "string"
# Coerce values into their python equivalents
if is_str:
value = [enum.coerce(v).ljust(dims[0], ' ') for v in value]
else:
value = [enum.coerce(v) for v in value]
# Check second dimension to test singletons
# if we're handling strings else the first dim
check_dim = 1 if is_str else 0
# Return first element from singleton lists
if dims[check_dim] == 1 and len(value) == 1:
return value[0]
return value
return {k: _massage(o) for k, o
in parse_aips_config(aips_cfg_file).items()}
| StarcoderdataPython |
3270800 | import pathlib
import asyncio
import argparse
from typing import List
FPS = 60
SPEED_MULTIPLIER = float(input("Video speed multiplier: "))
FRAME_TIME = round(1 / SPEED_MULTIPLIER, 4)
FFMPEG_CMD = "ffmpeg -y -i {} -r 60 -filter:v \"setpts={}*PTS\" {}"
async def main():
file_list: List[pathlib.Path] = args.VIDEO
new_file_list = [file.with_name(file.stem + f"_x{SPEED_MULTIPLIER}" + file.suffix) for file in file_list]
for file, new_file in zip(file_list, new_file_list):
formatted = FFMPEG_CMD.format(file.as_posix(), FRAME_TIME, new_file.as_posix())
proc = await asyncio.create_subprocess_shell(formatted, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT)
while line := await proc.stdout.readline():
print(line.decode(), end="")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("VIDEO", metavar="VID", type=pathlib.Path, nargs="+")
args = parser.parse_args()
asyncio.run(main())
| StarcoderdataPython |
1721699 | <filename>sppas/documentation/scripting_solutions/ex16_annotations_dur_filter.py
#!/usr/bin python
"""
:author: <NAME>
:date: 2018-07-09
:contact: <EMAIL>
:license: GPL, v3
:copyright: Copyright (C) 2018 <NAME>, Laboratoire Parole et Langage
:summary: Open an annotated file and filter depending on the duration/time.
Use of this software is governed by the GNU Public License, version 3.
This is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import os.path
sys.path.append(os.path.join("..", ".."))
from sppas.src.analysis import sppasTierFilters
from sppas.src.utils.makeunicode import u
from .ex15_annotations_label_filter import get_tier
# ----------------------------------------------------------------------------
# Variables
# ----------------------------------------------------------------------------
filename = 'F_F_B003-P9-merge.TextGrid'
tier_name = "PhonAlign"
output_filename = filename.replace('.TextGrid', '.csv')
verbose = True
# ----------------------------------------------------------------------------
# Main
# ----------------------------------------------------------------------------
if __name__ == '__main__':
tier = get_tier(filename, tier_name, verbose)
f = sppasTierFilters(tier)
# Apply a filter: Extract phonemes 'a' or 'e' during more than 100ms
# ------------------------------------------------------------------
phon_set = f.dur(gt=0.1) & (f.tag(exact=u("e")) | f.tag(exact=u("a")))
if verbose:
print("{:s} has the following {:d} 'e' or 'a' during more than 100ms:"
"".format(tier.get_name(), len(phon_set)))
for ann in phon_set:
print(' - {}: {}'.format(ann.get_location().get_best(), phon_set.get_value(ann)))
| StarcoderdataPython |
3278567 | <filename>analyzer/darwin/lib/common/config.py
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import ConfigParser
class Config:
def __init__(self, cfg):
"""@param cfg: configuration file."""
config = ConfigParser.ConfigParser(allow_no_value=True)
config.read(cfg)
for section in config.sections():
for name, raw_value in config.items(section):
if name == "file_name":
value = config.get(section, name)
else:
try:
value = config.getboolean(section, name)
except ValueError:
try:
value = config.getint(section, name)
except ValueError:
value = config.get(section, name)
setattr(self, name, value)
def get_options(self):
"""Get analysis options.
@return: options dict.
"""
# The analysis package can be provided with some options in the
# following format:
# option1=value1,option2=value2,option3=value3
#
# Here we parse such options and provide a dictionary that will be made
# accessible to the analysis package.
options = {}
if hasattr(self, "options") and len(self.options) > 0:
try:
# Split the options by comma.
fields = self.options.split(",")
except ValueError:
pass
else:
for field in fields:
# Split the name and the value of the option.
try:
# Sometimes, we have a key without a value (i.e. it's a
# command line argument), so we can't use the
# `key, value = field.split("=", 1)` style here
parts = field.split("=", 1)
except ValueError:
pass
else:
key = parts[0].strip()
arg_prefix = "arg-"
if not key.startswith(arg_prefix):
# If the parsing went good, we add the option to the
# dictionary.
value = parts[1].strip()
options[key] = value
elif len(key) > len(arg_prefix):
# Remove "arg-" prefix from the key
key = key[4:]; parts[0] = key
# Add this key (with a value maybe) to the args
if "args" not in options: options["args"] = []
options["args"] += parts
return options
| StarcoderdataPython |
1607818 | <reponame>arita37/ptl2r.github.io
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by <NAME> | 26/09/2018 | https://y-research.github.io
"""Description
"""
import torch
from org.archive.eval.metric import tor_nDCG_at_k, tor_nDCG_at_ks, EMD_at_k
from org.archive.l2r_global import L2R_GLOBAL
gpu, device = L2R_GLOBAL.global_gpu, L2R_GLOBAL.global_device
def idcg_std(sorted_labels):
'''
nums = np.power(2, sorted_labels) - 1.0
denoms = np.log2(np.arange(len(sorted_labels)) + 2)
idcgs = np.sum(nums/denoms, axis=1)
return idcgs
'''
nums = torch.pow(2.0, sorted_labels) - 1.0
a_range = torch.arange(sorted_labels.size(1), dtype=torch.float).to(device) if gpu else torch.arange(sorted_labels.size(1), dtype=torch.float)
denoms = torch.log2(2.0 + a_range)
idcgs = torch.sum(nums / denoms, dim=1)
return idcgs
def tor_ndcg_at_k(ranker=None, test_Qs=None, k=10, multi_level_rele=True, query_aware=False, dict_query_cnts=None):
'''
There is no check based on the assumption (say light_filtering() is called)
that each test instance Q includes at least k documents, and at least one relevant document.
Or there will be errors.
'''
sum_ndcg_at_k = torch.zeros(1)
cnt = torch.zeros(1)
for entry in test_Qs:
tor_test_ranking, tor_test_std_label_vec, qid = entry[0], torch.squeeze(entry[1], dim=0), entry[2][0] # remove the size 1 of dim=0 from loader itself
if tor_test_std_label_vec.size(0) < k: continue # skip the query if the number of associated documents is smaller than k
if gpu:
if query_aware:
tor_rele_pred = ranker.predict(tor_test_ranking.to(device), query_context=dict_query_cnts[qid])
else:
tor_rele_pred = ranker.predict(tor_test_ranking.to(device))
tor_rele_pred = torch.squeeze(tor_rele_pred)
tor_rele_pred = tor_rele_pred.cpu()
else:
if query_aware:
tor_rele_pred = ranker.predict(tor_test_ranking, query_context=dict_query_cnts[qid])
else:
tor_rele_pred = ranker.predict(tor_test_ranking)
tor_rele_pred = torch.squeeze(tor_rele_pred)
_, tor_sorted_inds = torch.sort(tor_rele_pred, descending=True)
sys_sorted_labels = tor_test_std_label_vec[tor_sorted_inds]
ideal_sorted_labels, _ = torch.sort(tor_test_std_label_vec, descending=True)
ndcg_at_k = tor_nDCG_at_k(sys_sorted_labels=sys_sorted_labels, ideal_sorted_labels=ideal_sorted_labels, k=k, multi_level_rele=multi_level_rele)
sum_ndcg_at_k += ndcg_at_k
cnt += 1
avg_ndcg_at_k = sum_ndcg_at_k/cnt
return avg_ndcg_at_k
def tor_ndcg_at_ks(ranker=None, test_Qs=None, ks=[1, 5, 10], multi_level_rele=True, query_aware=False, dict_query_cnts=None):
'''
There is no check based on the assumption (say light_filtering() is called)
that each test instance Q includes at least k(k=max(ks)) documents, and at least one relevant document.
Or there will be errors.
'''
sum_ndcg_at_ks = torch.zeros(len(ks))
cnt = torch.zeros(1)
for entry in test_Qs:
tor_test_ranking, tor_test_std_label_vec, qid = entry[0], torch.squeeze(entry[1], dim=0), entry[2][0] # remove the size 1 of dim=0 from loader itself
if gpu:
if query_aware:
tor_rele_pred = ranker.predict(tor_test_ranking.to(device), query_context=dict_query_cnts[qid])
else:
tor_rele_pred = ranker.predict(tor_test_ranking.to(device))
tor_rele_pred = torch.squeeze(tor_rele_pred)
tor_rele_pred = tor_rele_pred.cpu()
else:
if query_aware:
tor_rele_pred = ranker.predict(tor_test_ranking, query_context=dict_query_cnts[qid])
else:
tor_rele_pred = ranker.predict(tor_test_ranking)
tor_rele_pred = torch.squeeze(tor_rele_pred)
_, tor_sorted_inds = torch.sort(tor_rele_pred, descending=True)
sys_sorted_labels = tor_test_std_label_vec[tor_sorted_inds]
ideal_sorted_labels, _ = torch.sort(tor_test_std_label_vec, descending=True)
ndcg_at_ks = tor_nDCG_at_ks(sys_sorted_labels=sys_sorted_labels, ideal_sorted_labels=ideal_sorted_labels, ks=ks, multi_level_rele=multi_level_rele)
sum_ndcg_at_ks = torch.add(sum_ndcg_at_ks, ndcg_at_ks)
cnt += 1
avg_ndcg_at_ks = sum_ndcg_at_ks/cnt
return avg_ndcg_at_ks
def emd_at_k(ranker=None, test_Qs=None, k=10, TL_AF=None, multi_level_rele=True):
'''
There is no check based on the assumption (say light_filtering() is called)
that each test instance Q includes at least k(k=max(ks)) documents, and at least one relevant document.
Or there will be errors.
'''
assert 'S'==TL_AF or 'ST'==TL_AF
sum_emd = 0.0
cnt = 0
for entry in test_Qs:
tor_test_ranking, tor_test_std_label_vec = torch.squeeze(entry[0], dim=0), torch.squeeze(entry[1], dim=0) # remove the size 1 of dim=0 from loader itself
if tor_test_std_label_vec.size(0) < k:
continue
if gpu:
tor_test_ranking = tor_test_ranking.to(device)
tor_rele_pred = ranker(tor_test_ranking)
tor_rele_pred = torch.squeeze(tor_rele_pred)
tor_rele_pred = tor_rele_pred.cpu()
else:
tor_rele_pred = ranker(tor_test_ranking)
tor_rele_pred = torch.squeeze(tor_rele_pred)
ideal_desc_labels, ideal_sorted_inds = torch.sort(tor_test_std_label_vec, descending=True)
sys_corresponding_scores = tor_rele_pred[ideal_sorted_inds]
tor_max_rele_level = torch.max(ideal_desc_labels)
sys_corresponding_scores = sys_corresponding_scores * tor_max_rele_level
emd_v = EMD_at_k(k=k, ideal_desc_labels=ideal_desc_labels[0:k].numpy(), sys_corresponding_scores=sys_corresponding_scores[0:k].numpy())
sum_emd += emd_v
cnt += 1
avg_emd = sum_emd/cnt
return avg_emd # averaged value | StarcoderdataPython |
13893 | <filename>servermn/core/__init__.py
def init():
# Set locale environment
# Set config
# Set user and group
# init logger
pass | StarcoderdataPython |
3356356 | <reponame>ContinuumIO/enaml
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from PyQt4.QtCore import Qt, QSize, QPoint, QMargins, QEvent, pyqtSignal
from PyQt4.QtGui import QWidget, QLayout, QPainter, QPainterPath
from .q_single_widget_layout import QSingleWidgetLayout
class QBubbleView(QWidget):
""" A Bubble popup widget.
This widget implements a popup style with rounded corners and an
arrow anchoring it to an underlying widget. Useful for transient
dialogs.
"""
#: A signal emitted when the popup is closed
closed = pyqtSignal()
#: Enum to specify BubbleView orientation
AnchorTop = 0
AnchorBottom = 1
AnchorLeft = 2
AnchorRight = 3
def __init__(self, parent=None):
super(QBubbleView, self).__init__(parent)
self._central_widget = None
# Set up the window flags to get a non-bordered window
self.setWindowFlags(Qt.ToolTip | Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground)
layout = QSingleWidgetLayout()
layout.setSizeConstraint(QLayout.SetMinAndMaxSize)
self.setLayout(layout)
# Default anchoring and configuration options
self.setAnchor(QBubbleView.AnchorBottom)
self.setRelativePos((0.5, 0.5))
self.setArrowSize(20)
self.setRadius(10)
# track parent window movement
parent.window().installEventFilter(self)
parent.destroyed.connect(self.deleteLater)
def centralWidget(self):
""" Returns the central widget for the popup.
Returns
-------
result : QWidget or None
The central widget of the popup, or None if no widget
was provided.
"""
return self._central_widget
def setCentralWidget(self, widget):
""" Set the central widget for this popup.
Parameters
----------
widget : QWidget
The widget to use as the content of the popup.
"""
self._central_widget = widget
self.layout().setWidget(widget)
def setAnchor(self, anchor):
""" Set the positioning of the popup relative to the parent widget.
Parameters
----------
anchor : int
Can be one of AnchorLeft, AnchorRight, AnchorTop, AnchorBottom
"""
if anchor not in (QBubbleView.AnchorLeft, QBubbleView.AnchorRight,
QBubbleView.AnchorTop, QBubbleView.AnchorBottom):
err = "Anchor must be one of AnchorLeft, AnchorRight, " \
"AnchorTop, AnchorBottom"
raise ValueError(err)
self._anchor_type = anchor
if self.isVisible():
self._rebuild()
def anchor(self):
""" Return the relative positioning.
Returns
-------
result : int
An enum specifying the position relative to the parent widget.
One of AnchorLeft, AnchorRight, AnchorTop, AnchorBottom
"""
return self._anchor_type
def setArrowSize(self, arrow):
""" Set size of the arrow.
Parameters
----------
arrow : int
The size of the arrow (in pixels). A size of zero indicates
that no arrow is desired
"""
if arrow < 0:
raise ValueError("Arrow size must be greater than or equal to 0")
self._arrow = QSize(arrow / 1.5, arrow)
if self.isVisible():
self._rebuild()
def arrowSize(self):
""" Return the size of the arrow.
Returns
-------
result : int
The size of the arrow (in pixels)
"""
return self._arrow.height()
def setRadius(self, radius):
""" Set the radius of the popup corners.
Parameters
----------
radius : int
The radius of the popup corners (in pixels). Must be greater
than or equal to 2.
"""
if radius < 2:
raise ValueError("Radius must be greater than or equal to 2")
self._radius = radius
if self.isVisible():
self._rebuild()
def radius(self):
""" Return the radius of the corners.
Returns
-------
result : Int
The radius of the popup corners (in pixels)
"""
return self._radius
def setRelativePos(self, pos):
""" Set the relative position of the popup.
This method sets the relative position of the anchor point (the
tip of the arrow) relative the bounds of the parent widget.
Parameters
----------
pos : tuple
A 2-tuple of the form ((0,1), (0,1) specifying the position
of the popup coordindates relative to the parent widget's
bounds. Each member of the tuple should be between 0 and 1
inclusive.
"""
self._relative_pos = pos
if self.isVisible():
self._rebuild()
def relativePos(self):
""" Return the relative position of the popup.
Returns
-------
result : Tuple
The relative anchoring of the popup relative to the parent
widget's bounds
"""
return self._relative_pos
def paintEvent(self, event):
""" Reimplement the paint event
"""
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
palette = self.palette()
painter.setPen(palette.dark().color())
painter.setBrush(palette.window())
painter.drawPath(self._path)
def resizeEvent(self, event):
""" Reimplement the resize event
Rebuild the popup path used to paint the widget when the size of
the popup changes.
"""
self._rebuild()
super(QBubbleView, self).resizeEvent(event)
def closeEvent(self, event):
""" Handle the QCloseEvent from the window system.
By default, this handler calls the superclass' method to close
the window and then emits the 'closed' signal.
"""
super(QBubbleView, self).closeEvent(event)
self.closed.emit()
def eventFilter(self, obj, event):
""" Track parent window move events.
"""
if event.type() == QEvent.Move:
self.move(self.pos() + event.pos() - event.oldPos())
return False
def _rebuild(self):
""" Rebuild the path used to draw the outline of the popup.
"""
# anchor to center of parent
anchor = self.parent()
anchor_size = anchor.size()
pt = QPoint(anchor_size.width() * self._relative_pos[0],
anchor_size.height() * self._relative_pos[1])
anchor_pt = anchor.mapToGlobal(pt)
h = self._arrow.height()
rect = self.rect()
margins = QMargins()
anchor_type = self._anchor_type
if anchor_type == QBubbleView.AnchorRight:
adj = QPoint(0, rect.height() / 2)
margins.setLeft(h)
elif anchor_type == QBubbleView.AnchorBottom:
adj = QPoint(rect.width() / 2, 0)
margins.setTop(h)
elif anchor_type == QBubbleView.AnchorLeft:
adj = QPoint(rect.width(), rect.height() / 2)
margins.setRight(h)
else:
adj = QPoint(rect.width() / 2, rect.height())
margins.setBottom(h)
self.move(anchor_pt - adj)
self.setContentsMargins(margins)
self._path = _generate_popup_path(
rect, self._radius, self._radius, self._arrow, anchor_type
)
self.update()
def _generate_popup_path(rect, xRadius, yRadius, arrowSize, anchor):
""" Generate the QPainterPath used to draw the outline of the popup.
Parameters
----------
rect : QRect
Bounding rect for the popup.
xRadius, yRadius : int
x and y radius of the popup.
arrowSize : QSize
Width and height of the popup anchor arrow.
anchor : int
Positioning of the popup relative to the parent. Determines the
position of the arrow.
Returns
-------
result : QPainterPath
Path that can be passed to QPainter.drawPath to render popup.
"""
awidth, aheight = arrowSize.width(), arrowSize.height()
draw_arrow = (awidth > 0 and aheight > 0)
if anchor == QBubbleView.AnchorRight:
rect.adjust(aheight, 0, 0, 0)
elif anchor == QBubbleView.AnchorLeft:
rect.adjust(0, 0, -aheight, 0)
elif anchor == QBubbleView.AnchorBottom:
rect.adjust(0, aheight, 0, 0)
else:
rect.adjust(0, 0, 0, -aheight)
r = rect.normalized()
if r.isNull():
return
hw = r.width() / 2
hh = r.height() / 2
xRadius = 100 * min(xRadius, hw) / hw
yRadius = 100 * min(yRadius, hh) / hh
# The starting point of the path is the top left corner
x = r.x()
y = r.y()
w = r.width()
h = r.height()
rxx2 = w * xRadius / 100
ryy2 = h * yRadius / 100
center = r.center()
path = QPainterPath()
path.arcMoveTo(x, y, rxx2, ryy2, 180)
path.arcTo(x, y, rxx2, ryy2, 180, -90)
if anchor == QBubbleView.AnchorBottom and draw_arrow:
path.lineTo(center.x() - awidth, y)
path.lineTo(center.x(), y - aheight)
path.lineTo(center.x() + awidth, y)
path.arcTo(x + w - rxx2, y, rxx2, ryy2, 90, -90)
if anchor == QBubbleView.AnchorLeft and draw_arrow:
path.lineTo(x + w, center.y() - awidth)
path.lineTo(x + w + aheight, center.y())
path.lineTo(x + w, center.y() + awidth)
path.arcTo(x + w - rxx2, y + h - ryy2, rxx2, ryy2, 0, -90)
if anchor == QBubbleView.AnchorTop and draw_arrow:
path.lineTo(center.x() + awidth, y + h)
path.lineTo(center.x(), y + h + aheight)
path.lineTo(center.x() - awidth, y + h)
path.arcTo(x, y + h - ryy2, rxx2, ryy2, 270, -90)
if anchor == QBubbleView.AnchorRight and draw_arrow:
path.lineTo(x, center.y() + awidth)
path.lineTo(x - aheight, center.y())
path.lineTo(x, center.y() - awidth)
path.closeSubpath()
return path
| StarcoderdataPython |
1697311 | <filename>agagd/agagd_core/tables/all_chapters_table.py
import agagd_core.models as agagd_models
import django_tables2 as tables
from django.utils.html import format_html
# Base Bootstrap Column Header Attributes
default_bootstrap_header_column_attrs = {
"class": "table",
"thead": {"class": "thead-dark"},
"th": {"scope": "col"},
}
class AllChaptersTable(tables.Table):
name = tables.Column(linkify=("chapter_detail", [tables.A("member_id")]))
url = tables.Column()
def render_url(self, value):
return format_html("<a href='//{}' rel='external'>{}</a>", value, value)
class Meta:
attrs = default_bootstrap_header_column_attrs
fields = ("name", "contact", "meeting_text", "url")
model = agagd_models.Chapters
orderable = False
sequence = fields
template_name = "django_tables2/bootstrap4.html"
| StarcoderdataPython |
3387436 | <reponame>krishotte/web_sperky<gh_stars>0
"""A DashboardController Module."""
from masonite.request import Request
from masonite.view import View
from masonite.controllers import Controller
from .PortfolioController import get_user
from .auth.LoginController import get_caller_path
from app.Product import Product
from app.Order import Order
from .EditPortfolioController import add_image_path
from app.User import User
from app.Address import Address
from app.Shipping import Shipping
from app.OrderState import OrderState
from masonite import env
import pendulum
import json
from app.Variant import Variant
from .PortfolioController import get_settings
from masonite import Mail
from app.mailable.AdminsNewOrderMailable import AdminsNewOrderMailable
from threading import Thread
import time
class DashboardController(Controller):
"""DashboardController Controller Class."""
def __init__(self, request: Request):
"""DashboardController Initializer
Arguments:
request {masonite.request.Request} -- The Masonite Request class.
"""
self.request = request
def show(self, request: Request, view: View):
user = get_user(request)
user_ = User.where('email', '=', user['email']).first()
user_.addresses()
return view.render('dash/profile', {
'user': user,
'user_': user_,
'settings': get_settings(),
})
def show_profile(self, request: Request, view: View):
user = get_user(request)
user_ = User.where('email', '=', user['email']).first()
user_.addresses()
if user_.verified_at is not None:
print(f' user verified')
# print(f' environ: {request.environ}')
print(f' APP_URL: {request.header("APP_URL")}')
# print(f' env: {env("APP_URL")}')
return view.render('dash/profile', {
'user': user,
'user_': user_,
'settings': get_settings(),
})
def show_orders(self, request: Request, view: View):
user = get_user(request)
orders = request.user().orders().order_by('id', 'desc').get()
orders.load('order_state')
for order in orders:
print(f' datetime: {order.created_at.strftime("%Y-%m-%d")}')
print(f' your orders: {orders.serialize()}')
return view.render('dash/orders', {
'user': user,
'orders': orders.serialize(),
'settings': get_settings(),
})
def show_single_order(self, request: Request, view: View):
user = get_user(request)
order = Order.find(request.param('order_id'))
order.address
order.shipping
order.order_state
print(f' order to display: {order.serialize()}')
# serialized_products = add_image_path(order.products.serialize())
for product in order.products:
if product.pivot.variant_id:
product.load({
'variants': Variant.query().where('id', '=', product.pivot.variant_id)
})
serialized_products = add_image_path(order.products.serialize())
print(f' products: {serialized_products}') # order.products.serialize()}')
if order.user.email == user['email']:
return view.render('dash/single_order', {
'user': user,
'order': order.serialize(),
'products': serialized_products,
'settings': get_settings(),
})
else:
print(f' not your order')
return request.redirect('/dashboard/orders')
# cart control methods
def show_cart(self, request: Request, view: View):
user = get_user(request)
try:
items = request.session.get('ordered_items')
print(f' cart contains: {items}')
unique_items = items_to_unique(items)
print(f' unique items: {unique_items}')
except Exception:
raise
unique_items = []
total_price = 0
serialized_products, total_price = evaluate_cart(unique_items, total_price)
request.session.set('total_price', total_price)
# print(f' products: {products}')
return view.render('dash/cart', {
'user': user,
'ordered_items': unique_items,
'products': serialized_products,
'total_price': total_price,
'settings': get_settings(),
})
def add_to_cart(self, request: Request):
"""
obsolete - not used
items to order are held in cookie as list
items can be in list multiple times
"""
caller = get_caller_path(request)
# request.session.reset()
if request.session.has('ordered_items'):
items = request.session.get('ordered_items')
items.append(int(request.param('product_id')))
request.session.set('ordered_items', items)
else:
request.session.set('ordered_items', [int(request.param('product_id'))])
request.session.flash('success', 'Produkt bol pridaný do košíka')
print(f' session : {request.session.all()}')
return request.redirect(caller)
def add_to_cart2(self, request: Request):
caller = get_caller_path(request)
product_id = int(request.input('product_id'))
variant_id = int(request.input('variant_id'))
print(f' request: {request.all()}')
product = Product.find(product_id)
product.variants
if len(product.variants) > 0:
if request.has('variant_id'):
# order product with variant
print(f' variant required, variant selected')
if request.session.has('ordered_items'):
items = request.session.get('ordered_items')
items.append({
'product_id': product_id,
'variant_id': variant_id,
})
request.session.set('ordered_items', json.dumps(items))
else:
request.session.set('ordered_items', json.dumps([{
'product_id': product_id,
'variant_id': variant_id,
}]))
request.session.flash('success', 'Produkt bol pridaný do košíka')
else:
# order not possible
print(f' variant required, but not found')
request.session.flash('warning', 'Prosím vyberte si variant produktu')
else:
# order product without variant
if request.session.has('ordered_items'):
items = request.session.get('ordered_items')
print(f' items: {items}')
items.append({
'product_id': product_id,
})
request.session.set('ordered_items', json.dumps(items))
else:
request.session.set('ordered_items', json.dumps([{
'product_id': product_id,
}]))
request.session.flash('success', 'Produkt bol pridaný do košíka')
return request.redirect(caller)
def remove_from_cart(self, request: Request, view: View):
"""
remove one item from cart
"""
caller = get_caller_path(request)
# item_to_remove = int(request.input('item_to_remove'))
ordered_items = request.session.get('ordered_items')
print(f' ordered items before del: {ordered_items}')
if request.has('variant_id'):
item_to_remove = {
'product_id': int(request.input('item_to_remove')),
'variant_id': int(request.input('variant_id')),
}
else:
item_to_remove = {'product_id': int(request.input('item_to_remove'))}
index_of_item = ordered_items.index(item_to_remove)
ordered_items.pop(index_of_item)
print(f' ordered items after del: {ordered_items}')
request.session.set('ordered_items', json.dumps(ordered_items))
return request.redirect(caller)
# order control methods
def order_show_user_details(self, request: Request, view: View):
"""
first step of order
user select address to send order to
"""
user = get_user(request)
user_ = User.where('email', '=', user['email']).first()
user_.addresses()
print(f' user addresses: {user_.addresses.serialize()}')
return view.render('dash/order/user_data', {
'user': user,
'user_': user_,
'settings': get_settings(),
})
def order_set_user_address(self, request: Request):
"""
sets order address to cookie
redirects to shipping
"""
address_id = int(request.input('address_id'))
address = Address.find(address_id)
print(f' address to use: {address.serialize()}')
request.session.set('address', address.id)
return request.redirect('/order-shipping')
def order_show_shipping(self, request: Request, view: View):
"""
allows to go back from order review
"""
user = get_user(request)
user_ = User.where('email', '=', user['email']).first()
user_.addresses()
shippings = Shipping.all()
payments = [
{'name': 'V hotovosti pri prebratí tovaru'},
{'name': 'Bankovým prevodom'},
]
return view.render('dash/order/shipping', {
'user': user,
'user_': user_,
'shippings': shippings,
'payments': payments,
'settings': get_settings(),
})
def order_set_shipping(self, request: Request):
"""
saves shipping to session, redirects to order review
"""
request.session.set('shipping', int(request.input('shipping_id')))
return request.redirect('/order-review')
def order_back_to_shipping(self, request: Request):
"""
saves note to session, redirects to order_show_shipping
"""
note = request.input('note')
print(f' saving note to session: {note}')
request.session.set('note', note)
return request.redirect('/order-shipping')
def order_review(self, request: Request, view: View):
"""
shows order review
"""
user = get_user(request)
user_ = User.where('email', '=', user['email']).first()
shipping = Shipping.find(int(request.session.get('shipping')))
address = Address.find(int(request.session.get('address')))
items = request.session.get('ordered_items')
unique_items = items_to_unique(items)
note = request.session.get('note')
total_price = shipping.price
serialized_products, total_price = evaluate_cart(unique_items, total_price)
request.session.set('total_price', total_price)
return view.render('dash/order/review_order', {
'user': user,
'user_': user,
'ordered_items': unique_items,
'products': serialized_products,
'total_price': total_price,
'shipping': shipping,
'address': address,
'note': note,
'settings': get_settings(),
})
def make_order(self, request: Request, mail: Mail):
print(f' session: {request.session.all()}')
shipping = Shipping.find(int(request.session.get('shipping')))
address = Address.find(int(request.session.get('address')))
items = request.session.get('ordered_items')
unique_items = items_to_unique(items)
note = request.input('note')
total_price = float(request.session.get('total_price'))
products = []
try:
for index, each in enumerate(unique_items):
product = Product.find(each['product_id'])
if 'variant_id' in each:
product.load({
'variants': Variant.query().where('id', '=', each['variant_id'])
})
products.append(product)
except Exception:
pass
print(f' products1: {products}')
# let's make an order
order = Order(total_price=total_price, note=note)
order.user().associate(request.user())
order_state = OrderState.where('phase', '=', 1).first()
order.order_state().associate(order_state)
order.shipping().associate(shipping)
order.address().associate(address)
# save to get an id
order.save()
order.name = f"{pendulum.now().format('%Y')}{str(order.id).zfill(4)}"
order.save()
print(f' order saved')
for index, product in enumerate(products):
if len(product.variants) > 0:
if product.variants[0].price:
product_price = product.variants[0].price
else:
product_price = product.price
order.products().attach(product, {
'product_count': unique_items[index]['count'],
'unit_price': product_price,
'variant_id': unique_items[index]['variant_id'],
})
else:
order.products().attach(product, {
'product_count': unique_items[index]['count'],
'unit_price': product.price,
})
# send notification to admins
admins = User.where('role_id', '=', 1).get()
emails = []
for admin in admins:
emails.append(AdminsNewOrderMailable(admin.email, order))
thr1 = Thread(target=admin_send_order_notification, args=[mail, emails])
thr1.start()
# clear session
request.session.reset()
return request.redirect('/dashboard/orders')
# user address control methods
def show_new_address(self, request: Request, view: View):
"""
shows form for new user address
"""
user = get_user(request)
print(f' logged in user: {user}')
return view.render('dash/new_address', {
'user': user,
'settings': get_settings(),
})
def store_new_address(self, request: Request):
user = get_user(request)
print(f' logged in user: {user}')
user_ = User.where('email', '=', user['email']).first_or_fail()
address1 = Address(
street=request.input('street'),
zip_code=request.input('zip_code'),
city=request.input('city'),
name=request.input('name'),
phone=request.input('phone'),
)
print(f' address to store: {address1}')
user_.addresses().save(address1)
return request.redirect('/dashboard/profile')
def show_existing_address(self, request: Request, view: View):
user = get_user(request)
print(f' logged in user: {user}')
address_id = request.param('address_id')
address_ = Address.find(address_id)
if address_.user.email == user['email']:
print(f' your address')
# return request.redirect('/dashboard/profile')
return view.render('dash/existing_address', {
'user': user,
'address': address_,
'settings': get_settings(),
})
else:
print(f' not your address')
return request.redirect('/dashboard')
def store_existing_address(self, request: Request):
user = get_user(request)
print(f' logged in user: {user}')
user_ = User.where('email', '=', user['email']).first_or_fail()
address1 = Address.find(request.input('id'))
address1.street = request.input('street')
address1.zip_code = request.input('zip_code')
address1.city = request.input('city')
address1.name = request.input('name')
address1.phone = request.input('phone')
print(f' address to store: {address1.serialize()}')
address1.save()
return request.redirect('/dashboard/profile')
def delete_address(self, request: Request):
user = get_user(request)
print(f' logged in user: {user}')
address_id = request.param('address_id')
address_ = Address.find(address_id)
if address_.user.email == user['email']:
print(f' your address, deleting ...')
# return request.redirect('/dashboard/profile')
address_.delete()
return request.redirect('/dashboard/profile')
else:
print(f' not your address')
return request.redirect('/dashboard')
def items_to_unique(items):
"""
builds list of unique items with counts
:param items:
:return:
"""
unique_items = []
counts = []
unique_items_with_counts = []
try:
for item in items:
count = items.count(item)
if item not in unique_items:
# item['count'] = count
unique_items.append(item)
counts.append(count)
for index, uitem in enumerate(unique_items):
uitem['count'] = counts[index]
unique_items_with_counts.append(uitem)
except Exception:
pass
return unique_items_with_counts
def evaluate_cart(unique_items, total_price):
"""
prepare products with variants for view
:param unique_items:
:return: products
"""
total_price_ = total_price
products = []
try:
for each in unique_items:
product = Product.find(each['product_id'])
if 'variant_id' in each:
# load variant if selected
product.load({
'variants': Variant.query().where('id', '=', each['variant_id'])
})
if product.variants[0].price:
# count in variant price if exists
total_price_ += product.variants[0].price * each['count']
else:
total_price_ += product.price * each['count']
else:
total_price_ += product.price * each['count']
products.append(product.serialize())
print(f' products: {products}')
print(f' total price: {total_price_}')
serialized_products = add_image_path(products)
except Exception:
serialized_products = []
return serialized_products, total_price_
def admin_send_order_notification(mail, emails):
print(f' sending order notification to admins from another thread')
for email in emails:
mail.mailable(email).send()
time.sleep(2)
| StarcoderdataPython |
1724007 | <reponame>forksnd/arbytmap
try:
from setuptools import setup, Extension, Command
except ImportError:
from distutils.core import setup, Extension, Command
import arbytmap
long_desc = ""
try:
long_desc = open("README.MD").read()
except Exception:
print("Couldn't read readme.")
setup(
name="arbytmap",
description='A texture manipulation module for python 3.',
long_description=long_desc,
long_description_content_type='text/markdown',
version='%s.%s.%s' % arbytmap.__version__,
url='https://github.com/Sigmmma/arbytmap',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=[
'arbytmap',
'arbytmap.ext',
],
ext_modules = [
Extension("arbytmap.ext.arbytmap_ext", ["arbytmap/src/arbytmap_ext.c"]),
Extension("arbytmap.ext.bitmap_io_ext", ["arbytmap/src/bitmap_io_ext.c"]),
Extension("arbytmap.ext.dds_defs_ext", ["arbytmap/src/dds_defs_ext.c"]),
Extension("arbytmap.ext.raw_packer_ext", ["arbytmap/src/raw_packer_ext.c"]),
Extension("arbytmap.ext.raw_unpacker_ext", ["arbytmap/src/raw_unpacker_ext.c"]),
Extension("arbytmap.ext.swizzler_ext", ["arbytmap/src/swizzler_ext.c"])
],
package_data={
'arbytmap': ["src/*", '*.[tT][xX][tT]', '*.[mM][dD]'],
},
platforms=["POSIX", "Windows"],
keywords="arbytmap, texture, bitmap, converter, image, editing",
install_requires=[],
requires=[],
provides=['arbytmap'],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Multimedia :: Graphics :: Graphics Conversion",
"Programming Language :: C",
],
zip_safe=False,
)
| StarcoderdataPython |
3385397 | <filename>setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
version = '1.0.1'
setup(
name='fabplugins',
version=version,
description="Fabric plugins",
long_description="Fabric plugins",
keywords='fabplugins',
author='Time Home',
author_email='<EMAIL>',
url='',
license='MIT',
classifiers=[],
packages=['fabplugins'],
include_package_data=True,
zip_safe=False,
install_requires=[
"fabric"
]
)
| StarcoderdataPython |
1702843 | <filename>ApiManager/utils/utils.py
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
import json
import os
import requests
import xmind
import logging
import time
from .parser import xmind_to_testsuites
from ApiManager.models import XmindCase, UserInfo
# from django.utils import timezone
def get_absolute_path(path):
"""
Return the absolute path of a file
If path contains a start point (eg Unix '/') then use the specified start point
instead of the current working directory. The starting point of the file path is
allowed to begin with a tilde "~", which will be replaced with the user's home directory.
"""
fp, fn = os.path.split(path)
if not fp:
fp = os.getcwd()
fp = os.path.abspath(os.path.expanduser(fp))
return os.path.join(fp, fn)
def get_xmind_testsuites(xmind_file):
"""Load the XMind file and parse to `xmind2testcase.metadata.TestSuite` list"""
xmind_file = get_absolute_path(xmind_file)
workbook = xmind.load(xmind_file)
xmind_content_dict = workbook.getData()
logging.debug("loading XMind file(%s) dict data: %s", xmind_file, xmind_content_dict)
if xmind_content_dict:
testsuites = xmind_to_testsuites(xmind_content_dict)
return testsuites
else:
logging.error('Invalid XMind file(%s): it is empty!', xmind_file)
return []
def get_xmind_testsuite_list(xmind_file):
"""Load the XMind file and get all testsuite in it
:param xmind_file: the target XMind file
:return: a list of testsuite data
"""
xmind_file = get_absolute_path(xmind_file)
logging.info('Start converting XMind file(%s) to testsuite data list...', xmind_file)
testsuite_list = get_xmind_testsuites(xmind_file)
suite_data_list = []
for testsuite in testsuite_list:
product_statistics = {'case_num': 0, 'non_execution': 0, 'pass': 0, 'failed': 0, 'blocked': 0, 'skipped': 0}
for sub_suite in testsuite.sub_suites:
suite_statistics = {'case_num': len(sub_suite.testcase_list), 'non_execution': 0,
'pass': 0, 'failed': 0, 'blocked': 0, 'skipped': 0}
for case in sub_suite.testcase_list:
if case.result == 0:
suite_statistics['non_execution'] += 1
elif case.result == 1:
suite_statistics['pass'] += 1
elif case.result == 2:
suite_statistics['failed'] += 1
elif case.result == 3:
suite_statistics['blocked'] += 1
elif case.result == 4:
suite_statistics['skipped'] += 1
else:
logging.warning('This testcase result is abnormal: %s, please check it: %s',
case.result, case.to_dict())
sub_suite.statistics = suite_statistics
for item in product_statistics:
product_statistics[item] += suite_statistics[item]
testsuite.statistics = product_statistics
suite_data = testsuite.to_dict()
suite_data_list.append(suite_data)
logging.info('Convert XMind file(%s) to testsuite data list successfully!', xmind_file)
return suite_data_list
def get_xmind_testcase_list(xmind_file):
"""Load the XMind file and get all testcase in it
:param xmind_file: the target XMind file
:return: a list of testcase data
"""
xmind_file = get_absolute_path(xmind_file)
logging.info('Start converting XMind file(%s) to testcases dict data...', xmind_file)
testsuites = get_xmind_testsuites(xmind_file)
testcases = []
for testsuite in testsuites:
product = testsuite.name
for suite in testsuite.sub_suites:
for case in suite.testcase_list:
case_data = case.to_dict()
case_data['product'] = product
case_data['suite'] = suite.name
testcases.append(case_data)
logging.info('Convert XMind file(%s) to testcases dict data successfully!', xmind_file)
return testcases
def xmind_testsuite_to_json_file(xmind_file):
"""Convert XMind file to a testsuite json file"""
xmind_file = get_absolute_path(xmind_file)
logging.info('Start converting XMind file(%s) to testsuites json file...', xmind_file)
testsuites = get_xmind_testsuite_list(xmind_file)
testsuite_json_file = xmind_file[:-6] + '_testsuite.json'
if os.path.exists(testsuite_json_file):
logging.info('The testsuite json file already exists, return it directly: %s', testsuite_json_file)
return testsuite_json_file
with open(testsuite_json_file, 'w', encoding='utf8') as f:
f.write(json.dumps(testsuites, indent=4, separators=(',', ': ')))
logging.info('Convert XMind file(%s) to a testsuite json file(%s) successfully!',
xmind_file, testsuite_json_file)
return testsuite_json_file
def xmind_testcase_to_json_file(xmind_file):
"""Convert XMind file to a testcase json file"""
xmind_file = get_absolute_path(xmind_file)
logging.info('Start converting XMind file(%s) to testcases json file...', xmind_file)
testcases = get_xmind_testcase_list(xmind_file)
testcase_json_file = xmind_file[:-6] + '.json'
if os.path.exists(testcase_json_file):
logging.info('The testcase json file already exists, return it directly: %s', testcase_json_file)
return testcase_json_file
with open(testcase_json_file, 'w', encoding='utf8') as f:
f.write(json.dumps(testcases, indent=4, separators=(',', ': ')))
logging.info('Convert XMind file(%s) to a testcase json file(%s) successfully!', xmind_file, testcase_json_file)
return testcase_json_file
def handle_upload(file, folder, user):
current = time.strftime("%Y-%m-%d", time.localtime())
timestamp = str(int(time.time()))
format_name = user + '-' + current + '(' + timestamp + ')' + '-'
with open(folder + '\\' + format_name + file.name, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
return format_name + file.name
def case_to_db(testcases, user, xmind_file, xlsx_file, filename):
"""
:param testcases:
:param user:
:param xmind_file:
:param xlsx_file:
:param filename
:return:
table structure:
项目 模块 子模块 标题 步骤/预期 作者 文件路径 属性=优先级&用例类型
"""
user_id = UserInfo.objects.get(username=user).id
for case in testcases:
xmindcase = XmindCase()
xmindcase.belong_project = case['product']
xmindcase.suite = case['suite']
xmindcase.belong_module = case['module']
xmindcase.steps = case['steps']
xmindcase.name = case['name']
xmindcase.author = user_id
xmindcase.attributes = {"importance": case['importance'], "execution_type": case['execution_type']}
xmindcase.xmind_file.name = xmind_file
xmindcase.xlsx_file.name = xlsx_file
xmindcase.file_name = filename
xmindcase.save()
def get_recent_records():
records = []
files = XmindCase.objects.values('xmind_file').distinct()
for file in files:
timequery = XmindCase.objects.filter(xmind_file=file['xmind_file']).values('create_time')[:1]
filetime = list(timequery)[0]['create_time'].strftime("%Y-%m-%d %H:%M:%S")
filequery = XmindCase.objects.filter(xmind_file=file['xmind_file']).values('xlsx_file', 'xmind_file').distinct()
xmindfile = filequery[0]['xmind_file'].split('\\')[-1]
xlsx = filequery[0]['xlsx_file'].split('\\')[-1]
records.append({"name": xmindfile, "time": filetime, "xmind_file": xmindfile, "xlsx_file": xlsx})
records.reverse()
return records
def get_case_from_db(file):
test_cases = []
query = XmindCase.objects.filter(xmind_file=file).values('suite', 'belong_project', 'steps', 'belong_module',
'name', 'attributes')
for case in query:
suite = case['suite']
product = case['belong_project']
dbstep = case['steps']
print('************' + case['name'])
try:
steps = json.loads(dbstep.replace(r'"', r'\"').replace('\'', '"'))
module = case['belong_module']
name = case['name']
attr = case['attributes'].replace('\'', '"')
attributes = json.loads(attr)
importance = attributes['importance']
execution_type = attributes['execution_type']
casedict = {"module": module, "name": name, "execution_type": execution_type, "importance": importance,
"steps": steps, "product": product, "suite": suite}
test_cases.append(casedict)
except Exception as e:
print(e)
return test_cases
def get_metadata(env, message_class):
"""
:param env: env is a dict include url and ssl token
:param message_class
:return: is a list like this: [{'channel': 'channel1', 'key': 'aaaaa', 'uuid': 'bbbbb',
'deviceTypes': ['' , '', '']},{}]
"""
env = {"url": "https://10.101.12.4:17998", "token": "063f2acb8048a8af15074f0387aeda1b"}
url = env['url'] + '/ciimc-fe-api/meta/subscribe-change'
filter = None
params = {"token": env['token'], "message_class": message_class}
with requests.get(url=url, params=params, stream=True, verify=False) as response:
meta = []
for chunk in response.iter_lines(chunk_size=1):
chunk = chunk.decode('utf-8')
if chunk:
if chunk == 'change':
break
else:
data = json.loads(chunk)
if message_class == 'channel':
meta.append(data['record']['name'])
elif message_class == 'api-key':
channel = data['record']['channel']
key = data['record']['key']
uuid = data['record']['uuid']
devicetypes = data['record']['deviceTypes']
meta.append({"channel": channel, "key": key, "uuid": uuid, "deviceTypes": devicetypes})
return meta
def get_token(env, channel):
env = {"url": "http://10.101.12.4:10099"}
url = env['url'] + '/v2/auth'
channels = get_metadata(env=[], message_class='api-key')
for chan in channels:
if channel == chan['channel']:
key = chan['key']
break
params = {"key": key}
with requests.get(url=url, params=params) as response:
token = response.json()['token']
return token
| StarcoderdataPython |
1784441 | <reponame>a000b/zderzacz-BTC<gh_stars>0
## Program generuje dowolną ilość kluczy prywatnych oraz przekształca je w opcjonalnie w adresy Legacy bądź SegWit.
## Następnie odpytuje blockstream.info i oblicza saldo danego konta.
## To tylko zabawa, szansa na to że trafi się na tzw kolizję jest praktycznie zerowa, jak 1 do 2^256.
##
## Kod jest zlepkiem kilku rozwiązań.
##
## Linki do źródeł.
## (https://github.com/sipa/bech32/tree/master/ref/python)
## (https://github.com/zeltsi/segwit_tutorial/tree/master/addresses)
## (https://www.youtube.com/channel/UCi9Mf3veSDDIMdGGtPmPu1g)
## (https://www.reddit.com/r/Bitcoin/comments/7tzq3w/generate_your_own_private_key_5_lines_of_python/)
## (https://github.com/blockstream/esplora/blob/master/API.md)
##
## tested in python 3.6.8
## potrzebne dodatkowe moduły ecdsa, base58, requests
## Done by Atari_XE ( wypok AD 2019)
import random, ecdsa, hashlib, base58, binascii, requests, time
##--------------BECH32-------------------------------------------------
CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
def bech32_polymod(values):
"""Internal function that computes the Bech32 checksum."""
generator = [0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3]
chk = 1
for value in values:
top = chk >> 25
chk = (chk & 0x1ffffff) << 5 ^ value
for i in range(5):
chk ^= generator[i] if ((top >> i) & 1) else 0
return chk
def bech32_hrp_expand(hrp):
"""Expand the HRP into values for checksum computation."""
return [ord(x) >> 5 for x in hrp] + [0] + [ord(x) & 31 for x in hrp]
def bech32_verify_checksum(hrp, data):
"""Verify a checksum given HRP and converted data characters."""
return bech32_polymod(bech32_hrp_expand(hrp) + data) == 1
def bech32_create_checksum(hrp, data):
"""Compute the checksum values given HRP and data."""
values = bech32_hrp_expand(hrp) + data
polymod = bech32_polymod(values + [0, 0, 0, 0, 0, 0]) ^ 1
return [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]
def bech32_encode(hrp, data):
"""Compute a Bech32 string given HRP and data values."""
combined = data + bech32_create_checksum(hrp, data)
return hrp + '1' + ''.join([CHARSET[d] for d in combined])
def bech32_decode(bech):
"""Validate a Bech32 string, and determine HRP and data."""
if ((any(ord(x) < 33 or ord(x) > 126 for x in bech)) or
(bech.lower() != bech and bech.upper() != bech)):
return (None, None)
bech = bech.lower()
pos = bech.rfind('1')
if pos < 1 or pos + 7 > len(bech) or len(bech) > 90:
return (None, None)
if not all(x in CHARSET for x in bech[pos+1:]):
return (None, None)
hrp = bech[:pos]
data = [CHARSET.find(x) for x in bech[pos+1:]]
if not bech32_verify_checksum(hrp, data):
return (None, None)
return (hrp, data[:-6])
def convertbits(data, frombits, tobits, pad=True):
"""General power-of-2 base conversion."""
acc = 0
bits = 0
ret = []
maxv = (1 << tobits) - 1
max_acc = (1 << (frombits + tobits - 1)) - 1
for value in data:
if value < 0 or (value >> frombits):
return None
acc = ((acc << frombits) | value) & max_acc
bits += frombits
while bits >= tobits:
bits -= tobits
ret.append((acc >> bits) & maxv)
if pad:
if bits:
ret.append((acc << (tobits - bits)) & maxv)
elif bits >= frombits or ((acc << (tobits - bits)) & maxv):
return None
return ret
def decode(hrp, addr):
"""Decode a segwit address."""
hrpgot, data = bech32_decode(addr)
if hrpgot != hrp:
return (None, None)
decoded = convertbits(data[1:], 5, 8, False)
if decoded is None or len(decoded) < 2 or len(decoded) > 40:
return (None, None)
if data[0] > 16:
return (None, None)
if data[0] == 0 and len(decoded) != 20 and len(decoded) != 32:
return (None, None)
return (data[0], decoded)
def encode(hrp, witver, witprog):
"""Encode a segwit address."""
ret = bech32_encode(hrp, [witver] + convertbits(witprog, 8, 5))
if decode(hrp, ret) == (None, None):
return None
return ret
##---------------------GENERATOR-SEGWIT----------------------------------------
def generator_segwit(a):
start = time.time()
number = a
for n in range(number):
d = privkey_generator()
private_key = d["pkey"]
WIF = d["Wk"]
signing_key = d["sk"]
verifying_key = d["vk"]
x_cor = bytes.fromhex(verifying_key.to_string().hex())[:32]
y_cor = bytes.fromhex(verifying_key.to_string().hex())[32:]
if int.from_bytes(y_cor, byteorder="big", signed=True) % 2 == 0:
public_key = bytes.fromhex(f'02{x_cor.hex()}')
else:
public_key = bytes.fromhex(f'03{x_cor.hex()}')
sha256_key = hashlib.sha256(public_key)
ripemd160_key = ripemd160(sha256_key.digest())
keyhash = ripemd160_key.digest()
P2WPKH_V0 = bytes.fromhex(f'0014{keyhash.hex()}')
sha256_P2WPKH_V0 = hashlib.sha256(P2WPKH_V0)
ripemd160_P2WPKH_V0 = ripemd160(sha256_P2WPKH_V0.digest())
scripthash = ripemd160_P2WPKH_V0.digest()
P2SH_P2WPKH_V0 = bytes.fromhex(f'a9{scripthash.hex()}87')
flagged_scripthash = bytes.fromhex(f'05{scripthash.hex()}')
checksum = hashlib.sha256(hashlib.sha256(flagged_scripthash).digest()).digest()[:4]
bin_addr = flagged_scripthash + checksum
nested_address = base58.b58encode(bin_addr)
bech32 = encode('bc', 0, keyhash)
i = n + 1
stradress = str(nested_address.decode())
balance = sprawdz_balance_blockstream(stradress)
if balance == 0:
print("{:25} | {:35} | {:46} | {:20}".format("Bitcoin Address " + str(i), str(nested_address.decode()), str(bech32), str(balance) + " BTC"))
else:
print("{:25} | {:35} | {:46} | {:20}".format("Bitcoin Address " + str(i), str(nested_address.decode()), str(bech32), str(balance) + " BTC"))
print("Private Key", str(i) + ": " + private_key.hex())
print("Private Key WIF", str(i) + ": " + WIF.decode())
break
calculate_speed(start, time.time(), number)
##---------------------GENERATOR-LEGACY----------------------------------------
def generator_legacy(a):
start = time.time()
number = a
for n in range(number):
d = privkey_generator()
private_key = d["pkey"]
WIF = d["Wk"]
signing_key = d["sk"]
verifying_key = d["vk"]
publ_key = '04' + binascii.hexlify(verifying_key.to_string()).decode()
hash160 = ripemd160(hashlib.sha256(binascii.unhexlify(publ_key)).digest()).digest()
publ_addr_a = b"\x00" + hash160
checksum = hashlib.sha256(hashlib.sha256(publ_addr_a).digest()).digest()[:4]
publ_addr_b = base58.b58encode(publ_addr_a + checksum)
i = n + 1
stradress = str(publ_addr_b.decode())
balance = sprawdz_balance_blockstream(stradress)
if balance == 0:
print("{:25} | {:35} | {:20}".format("Bitcoin Address " + str(i), publ_addr_b.decode(), str(balance) + " BTC"))
else:
print("{:25} | {:35} | {:20}".format("Bitcoin Address " + str(i), publ_addr_b.decode(), str(balance) + " BTC"))
print('Private Key ', str(i) + ": " + WIF.decode())
break
calculate_speed(start, time.time(), number)
##---------------------POBIERANIE-DANYCH-ONLINE----------------------------------------
def sprawdz_balance_blockstream(a):
addr = a
response = requests.get('https://blockstream.info/api/address/' + addr)
if response.status_code == 200:
content = response.json()
b = (int(content['chain_stats']['funded_txo_sum']) - int(content['chain_stats']['spent_txo_sum'])) / 10**8
else:
print("Err: ", response.status_code)
return b
def check_price():
response = requests.get('https://blockchain.info/ticker')
if response.status_code == 200:
content = response.json()
a = (str(content['USD']['last']) + " USD")
else:
print(response.status_code)
return a
##---------------------FUNKCJE-DODATKOWE----------------------------------------
def calculate_speed(tstart, tend, ilosc):
tdiff = (tend - tstart)
sp = ilosc / tdiff
print("\nProcess time: ", str(tdiff), " sec")
print("Calculated average speed: ", str(sp), " key/sec")
def privkey_generator():
d ={}
private_key = (random.getrandbits(256)).to_bytes(32, byteorder="little", signed=False)
fullkey = '80' + binascii.hexlify(private_key).decode()
sha256a = hashlib.sha256(binascii.unhexlify(fullkey)).hexdigest()
sha256b = hashlib.sha256(binascii.unhexlify(sha256a)).hexdigest()
WIF = base58.b58encode(binascii.unhexlify(fullkey+sha256b[:8]))
signing_key = ecdsa.SigningKey.from_string(private_key, curve=ecdsa.SECP256k1)
verifying_key = signing_key.get_verifying_key()
d["pkey"] = private_key
d["Wk"] = WIF
d["sk"] = signing_key
d["vk"] = verifying_key
return d
def ripemd160(x):
d = hashlib.new('ripemd160')
d.update(x)
return d
##---------------------PROGRAM----------------------------------------
wybor = int(input("Jeżeli chcesz generować adresy Legacy wciśnij 1, jeżeli SegWit wciśnij 2 :"))
if wybor == 1:
ilosc = int(input("Podaj ilość kluczy:"))
generator_legacy(ilosc)
elif wybor == 2:
ilosc = int(input("Podaj ilość kluczy:"))
generator_segwit(ilosc)
else:
print("Nie ma takiej opcji")
print("BTC last price /Blockchain.info/ : ", check_price())
print("Koniec")
| StarcoderdataPython |
1747614 | <reponame>alexcfaber/katka-core<filename>katka/migrations/0005_scmservice.py
# Generated by Django 2.1.5 on 2019-02-14 08:04
import uuid
from django.db import migrations, models
import katka.fields
class Migration(migrations.Migration):
dependencies = [
("katka", "0004_credential_secret"),
]
operations = [
migrations.CreateModel(
name="SCMService",
fields=[
("created", models.DateTimeField(auto_now_add=True)),
("created_username", katka.fields.AutoUsernameField(max_length=50)),
("modified", models.DateTimeField(auto_now=True)),
("modified_username", katka.fields.AutoUsernameField(max_length=50)),
(
"status",
models.CharField(
choices=[("active", "active"), ("inactive", "inactive")], default="active", max_length=50
),
),
(
"public_identifier",
models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False),
),
("type", models.CharField(max_length=48)),
("server_url", models.URLField()),
],
options={"abstract": False,},
),
]
| StarcoderdataPython |
3347996 | from dj_rest_auth.registration.views import RegisterView
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin, UpdateModelMixin
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from .serializers import UserSerializer, CustomRegisterSerializer
User = get_user_model()
class UserViewSet(RetrieveModelMixin, ListModelMixin, UpdateModelMixin, GenericViewSet):
serializer_class = UserSerializer
queryset = User.objects.all()
lookup_field = "username"
def get_queryset(self, *args, **kwargs):
assert isinstance(self.request.user.id, int)
return self.queryset
def update(self, request, *args, **kwargs):
self.queryset = self.queryset.filter(id=self.request.user.id)
return super(UserViewSet, self).update(request, *args, **kwargs)
@action(detail=False)
def me(self, request):
serializer = UserSerializer(request.user, context={"request": request})
return Response(status=status.HTTP_200_OK, data=serializer.data)
class CustomRegisterView(RegisterView):
serializer_class = CustomRegisterSerializer
| StarcoderdataPython |
1784200 | import math
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import backend as K
class PositionLayer(tf.keras.layers.Layer):
def __init__(self, embedding_size, **kwargs):
self.embedding_size = embedding_size
super(PositionLayer, self).__init__(**kwargs)
def build(self, input_shape):
super(PositionLayer, self).build(input_shape)
def call(self, x):
mask = K.expand_dims(K.cast(K.arange(start=0, stop=K.shape(x)[1] + 1), 'float32'), axis=-1)
bins = K.expand_dims(K.cast(K.arange(self.embedding_size // 2) * 2, 'float32'), axis=0)
evens = K.dot(mask, 1.0 / K.pow(10000.0, bins / self.embedding_size))
odds = tf.identity(evens)
evens = K.sin(evens)[1:, :]
odds = K.cos(odds)[1:, :]
pos = K.reshape(K.stack([evens, odds], axis=2), (-1, K.shape(x)[1], self.embedding_size))
return pos
def compute_output_shape(self, input_shape):
return input_shape + (self.embedding_size,)
class MaskLayerLeft(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(MaskLayerLeft, self).__init__(**kwargs)
def build(self, input_shape):
super(MaskLayerLeft, self).build(input_shape)
def call(self, x):
length = K.shape(x)[1]
rank = tf.ones(shape=(1, length), dtype='float32')
y = K.expand_dims(x, axis=-1)
mask = K.dot(y, rank)
return tf.transpose(mask, (0, 2, 1))
def compute_output_shape(self, input_shape):
return input_shape + (input_shape[1])
class MaskLayerRight(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(MaskLayerRight, self).__init__(**kwargs)
def build(self, input_shape):
super(MaskLayerRight, self).build(input_shape)
def call(self, x):
right = x[0]
left = x[1]
length = K.shape(right)[1]
rank = tf.ones(shape=(1, length), dtype='float32')
y = K.expand_dims(left, axis=-1)
mask = K.dot(y, rank)
return tf.transpose(mask, (0, 2, 1))
def compute_output_shape(self, input_shape):
return input_shape + (input_shape[1])
class MaskLayerTriangular(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(MaskLayerTriangular, self).__init__(**kwargs)
def build(self, input_shape):
super(MaskLayerTriangular, self).build(input_shape)
def call(self, x):
t = tf.ones(shape=(K.shape(x)[0], K.shape(x)[1], K.shape(x)[1]))
tri = tf.matrix_band_part(t, -1, 0)
rank = tf.ones(shape=(1, K.shape(x)[1]), dtype='float32')
y = K.expand_dims(x, axis=-1)
mask = K.dot(y, rank)
return tri * tf.transpose(mask, (0, 2, 1))
def compute_output_shape(self, input_shape):
return input_shape + (input_shape[1],)
class LayerNormalization(tf.keras.layers.Layer):
def __init__(self, eps=1e-6, **kwargs):
self.eps = eps
super(LayerNormalization, self).__init__(**kwargs)
def build(self, input_shape):
self.gamma = self.add_weight(name='gamma', shape=input_shape[-1:],
initializer=tf.keras.initializers.Ones(), trainable=True)
self.beta = self.add_weight(name='beta', shape=input_shape[-1:],
initializer=tf.keras.initializers.Zeros(), trainable=True)
super(LayerNormalization, self).build(input_shape)
def call(self, x):
mean = K.mean(x, axis=-1, keepdims=True)
std = K.std(x, axis=-1, keepdims=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
def compute_output_shape(self, input_shape):
return input_shape
class SelfLayer(tf.keras.layers.Layer):
def __init__(self, embedding_size, key_size, **kwargs):
self.embedding_size = embedding_size
self.key_size = key_size
self.denom = math.sqrt(embedding_size)
super(SelfLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.K = self.add_weight(shape=(self.embedding_size, self.key_size),
name="K", trainable=True,
initializer='glorot_uniform')
self.V = self.add_weight(shape=(self.embedding_size, self.key_size),
name="V", trainable=True,
initializer='glorot_uniform')
self.Q = self.add_weight(shape=(self.embedding_size, self.key_size),
name="Q", trainable=True,
initializer='glorot_uniform')
super(SelfLayer, self).build(input_shape)
def call(self, inputs):
Q = tf.tensordot(inputs[0], self.Q, axes=[[2], [0]])
K = tf.tensordot(inputs[1], self.K, axes=[[2], [0]])
V = tf.tensordot(inputs[2], self.V, axes=[[2], [0]])
A = tf.keras.backend.batch_dot(Q, tf.transpose(K, (0, 2, 1)))
A = A / self.denom
A = tf.exp(A) * inputs[3]
A = A / tf.reshape(tf.reduce_sum(A, axis=2), (-1, tf.shape(inputs[0])[1], 1))
A = layers.Dropout(rate=0.1)(A)
return tf.keras.backend.batch_dot(A, V)
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[1], self.key_size)
def GetPosEncodingMatrix(max_len, d_emb):
pos_enc = np.array([
[pos / np.power(10000, 2 * (j // 2) / d_emb) for j in range(d_emb)]
for pos in range(max_len)
])
pos_enc[1:, 0::2] = np.sin(pos_enc[1:, 0::2])
pos_enc[1:, 1::2] = np.cos(pos_enc[1:, 1::2])
return pos_enc
| StarcoderdataPython |
1706448 | <filename>A/A 230 Dragons.py
# https://codeforces.com/problemset/problem/230/A
s, n = map(int, input().split())
dragons = sorted([[int(j) for j in input().split()] for i in range(n)], key=lambda x: x[0])
for d in dragons:
if s > d[0]:
s += d[1]
else:
print('NO')
break
else:
print('YES') | StarcoderdataPython |
1662282 | <filename>fulltext/services/extractor/extractor.py
"""Integration with Docker to perform plain text extraction."""
import os
import shutil
from datetime import datetime
from typing import Tuple, Optional, Any
import docker
from docker import DockerClient
from docker.errors import ContainerError, APIError
from requests.exceptions import ConnectionError
from flask import current_app
from arxiv.base import logging
logger = logging.getLogger(__name__)
class NoContentError(RuntimeError):
"""No content was extracted from the PDF."""
class Extractor:
"""
Integrates with Docker to perform plain text extraction.
This class groups together related methods for the sake of clarity. It is
completely stateless, and should stay that way unless an explicit decision
is made otherwise.
"""
def is_available(self, **kwargs: Any) -> bool:
"""Make sure that we can connect to the Docker API."""
try:
self._new_client().info()
except (APIError, ConnectionError) as e:
logger.error('Error when connecting to Docker API: %s', e)
return False
return True
def _new_client(self) -> DockerClient:
"""Make a new Docker client."""
return DockerClient(current_app.config['DOCKER_HOST'])
@property
def image(self) -> Tuple[str, str, str]:
"""Get the name of the image used for extraction."""
image_name = current_app.config['EXTRACTOR_IMAGE']
image_tag = current_app.config['EXTRACTOR_VERSION']
return f'{image_name}:{image_tag}', image_name, image_tag
def _pull_image(self, client: Optional[DockerClient] = None) -> None:
"""Tell the Docker API to pull our extraction image."""
if client is None:
client = self._new_client()
_, name, tag = self.image
client.images.pull(name, tag)
def _cleanup(self, outpath: str) -> None:
os.remove(outpath.replace('.txt', '.pdf2txt'))
os.remove(outpath) # Cleanup.
def __call__(self, filename: str, cleanup: bool = False,
image: Optional[str] = None) -> str:
"""
Extract fulltext from the PDF represented by ``filehandle``.
Parameters
----------
filename : str
Returns
-------
str
Raw XML response from FullText.
"""
logger.info('Attempting text extraction for %s', filename)
start_time = datetime.now()
# This is the path in this container/env where PDFs are stored.
workdir = current_app.config['WORKDIR']
# This is the path on the Docker host that should be mapped into the
# extractor container at /pdf. This is the same volume that should be
# mounted at ``workdir`` in this container/env.
mountdir = current_app.config['MOUNTDIR']
# The result is something like:
#
# | <-- {workdir} (worker)
# [working volume] <--- |
# | <-- {mountdir} (dind) <-- /pdfs (extractor)
#
if image is None:
image, _, _ = self.image
client = self._new_client()
# Get the name of the file so that we know how to refer to it within
# the container running the extractor.
# _, name = os.path.split(filename)
name = filename.split(workdir, 1)[1].strip('/')
stub, _ = os.path.splitext(name)
try: # Pull and run the extractor image.
self._pull_image(client)
volumes = {mountdir: {'bind': '/pdfs', 'mode': 'rw'}}
client.containers.run(image, f'/pdfs/{name}', volumes=volumes)
except (ContainerError, APIError) as e:
raise RuntimeError('Fulltext failed: %s' % filename) from e
# Grab the extracted plain text content from a .txt file in the working
# volume.
outpath = os.path.join(workdir, '{}.txt'.format(stub))
if not os.path.exists(outpath):
raise NoContentError('%s not found, expected output' % outpath)
with open(outpath, 'rb') as f:
content = f.read().decode('utf-8')
# Cleanup any left-over files.
self._cleanup(outpath)
duration = (start_time - datetime.now()).microseconds
logger.info(f'Finished extraction for %s in %s ms', filename, duration)
if not content:
raise NoContentError(f'No content extracted from {filename}')
return content
do_extraction = Extractor()
| StarcoderdataPython |
175018 | import os
with open(os.path.join(os.path.dirname(__file__), "input.txt"), "r") as file:
ins = [l.strip() for l in file.readlines()]
card_count = 10007
stack = []
for j in range(card_count):
stack.append(j)
initial_stack = stack.copy()
count = 0
for i in ins:
if "stack" in i:
stack.reverse()
if "cut" in i:
amount = int(i.split(" ").pop())
if amount < 0:
stack[0:0] = stack[amount:]
del stack[amount:]
if amount > 0:
stack.extend(stack[:amount])
del stack[:amount]
if "increment" in i:
amount = int(i.split(" ").pop())
old_stack = stack.copy()
pointer = 0
for k in old_stack:
stack[pointer] = k
pointer = (pointer + amount) % len(stack)
print("Part 1: ", stack.index(2019))
| StarcoderdataPython |
58807 | # -*- coding: utf-8 -*-
import asyncio
from config import (CHECK_SERVER_INTERVAL, CHECK_SERVER_INTERVAL_MAX,
CRON_LOOP_INTERVAL)
from discord import Activity, ActivityType
from discord.errors import Forbidden, NotFound
from discord.ext import commands, tasks
from modules.db import Servers
from modules.logging import logger
from modules.utils import embed_generator, get_server_info, stop_server
class ServersCron(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.loop = asyncio.get_event_loop()
self.crontab.start()
logger.info("Cron started")
@tasks.loop(seconds=CRON_LOOP_INTERVAL, reconnect=True)
async def crontab(self):
await self.bot.wait_until_ready()
channels = await Servers.filter(worked=True).group_by("channel").values_list("channel", flat=True)
for channel_id in channels:
self.loop.create_task(self.for_channels(channel_id))
servers_count = await Servers.filter(worked=True).count()
await self.bot.change_presence(activity=Activity(type=ActivityType.watching,
name=f"Use !help | {servers_count} game servers"))
async def for_channels(self, channel_id):
servers_ids = await Servers.filter(channel=channel_id, worked=True).values_list("id", flat=True)
channel = self.bot.get_channel(channel_id)
if channel is None:
await Servers.filter(channel=channel_id).update(worked=False)
return
sleep = CHECK_SERVER_INTERVAL_MAX if len(servers_ids) > 3 else CHECK_SERVER_INTERVAL
for id in servers_ids:
await self.for_id(channel, id)
await asyncio.sleep(sleep)
async def for_id(self, channel, id):
instance = await Servers.filter(id=id).first()
server_info, players = await get_server_info(instance.ip, instance.port)
if server_info:
await Servers.filter(id=id).update(name=server_info.server_name, game=server_info.game)
try:
msg = await channel.fetch_message(instance.message)
embed = await embed_generator(server_info, players, instance)
await msg.edit(embed=embed)
except (NotFound, Forbidden) as e:
user = await self.bot.fetch_user(instance.author)
if isinstance(e, Forbidden):
await user.send(f"I don't have permission to edit {instance.ip}:{instance.port} in #{channel.name}\n"
"Please give me permission to edit messages in this channel.\n"
"To resume the server, react with 🔄")
elif isinstance(e, NotFound):
await user.send(f"Server {instance.ip}:{instance.port} in #{channel.name} has been deleted")
#await user.send(f"Server {instance.ip}:{instance.port} in channel <#{instance.channel}> is off if you not delete bot message, check bot permissions")
await stop_server(instance.message)
def setup(bot: commands.Bot):
bot.add_cog(ServersCron(bot))
| StarcoderdataPython |
1769239 | <filename>spikeextractors/extractors/mdaextractors/mdaextractors.py
from spikeextractors import RecordingExtractor
from spikeextractors import SortingExtractor
import json
import numpy as np
from pathlib import Path
from .mdaio import DiskReadMda, readmda, writemda32, writemda64
import os
class MdaRecordingExtractor(RecordingExtractor):
extractor_name = 'MdaRecordingExtractor'
has_default_locations = True
installed = True # check at class level if installed or not
is_writable = True
mode = 'dir'
_gui_params = [
{'name': 'dir_path', 'type': 'dir', 'title': "Path to directory"},
]
installation_mesg = "" # error message when not installed
def __init__(self, dir_path):
dataset_directory = Path(dir_path)
self._dataset_directory = dataset_directory
timeseries0 = dataset_directory / 'raw.mda'
self._dataset_params = read_dataset_params(str(dataset_directory))
self._samplerate = self._dataset_params['samplerate'] * 1.0
self._timeseries_path = os.path.abspath(timeseries0)
geom0 = os.path.join(dataset_directory, 'geom.csv')
self._geom_fname = geom0
self._geom = np.genfromtxt(self._geom_fname, delimiter=',')
X = DiskReadMda(self._timeseries_path)
if self._geom.shape[0] != X.N1():
raise Exception(
'Incompatible dimensions between geom.csv and timeseries file {} <> {}'.format(self._geom.shape[0],
X.N1()))
self._num_channels = X.N1()
self._num_timepoints = X.N2()
RecordingExtractor.__init__(self)
for m in range(self._num_channels):
self.set_channel_property(m, 'location', self._geom[m, :])
def get_channel_ids(self):
return list(range(self._num_channels))
def get_num_frames(self):
return self._num_timepoints
def get_sampling_frequency(self):
return self._samplerate
def get_traces(self, channel_ids=None, start_frame=None, end_frame=None):
if start_frame is None:
start_frame = 0
if end_frame is None:
end_frame = self.get_num_frames()
if channel_ids is None:
channel_ids = self.get_channel_ids()
X = DiskReadMda(self._timeseries_path)
recordings = X.readChunk(i1=0, i2=start_frame, N1=X.N1(), N2=end_frame - start_frame)
recordings = recordings[channel_ids, :]
return recordings
@staticmethod
def write_recording(recording, save_path, params=dict()):
save_path = Path(save_path)
if not save_path.exists():
if not save_path.is_dir():
os.makedirs(str(save_path))
save_file_path = str(save_path / 'raw.mda')
parent_dir = save_path
channel_ids = recording.get_channel_ids()
M = len(channel_ids)
raw = recording.get_traces()
location0 = recording.get_channel_property(channel_ids[0], 'location')
nd = len(location0)
geom = np.zeros((M, nd))
for ii in range(len(channel_ids)):
location_ii = recording.get_channel_property(channel_ids[ii], 'location')
geom[ii, :] = list(location_ii)
if not os.path.isdir(save_path):
os.mkdir(save_path)
writemda32(raw, save_file_path)
params["samplerate"] = recording.get_sampling_frequency()
with (parent_dir / 'params.json').open('w') as f:
json.dump(params, f)
np.savetxt(str(parent_dir / 'geom.csv'), geom, delimiter=',')
class MdaSortingExtractor(SortingExtractor):
extractor_name = 'MdaSortingExtractor'
installed = True # check at class level if installed or not
is_writable = True
mode = 'file'
installation_mesg = "" # error message when not installed
def __init__(self, file_path, sampling_frequency=None):
SortingExtractor.__init__(self)
self._firings_path = file_path
self._firings = readmda(self._firings_path)
self._max_channels = self._firings[0, :]
self._times = self._firings[1, :]
self._labels = self._firings[2, :]
self._unit_ids = np.unique(self._labels).astype(int)
self._sampling_frequency = sampling_frequency
for unit_id in self._unit_ids:
inds = np.where(self._labels == unit_id)
max_channels = self._max_channels[inds].astype(int)
self.set_unit_property(unit_id, 'max_channel', max_channels[0])
def get_unit_ids(self):
return list(self._unit_ids)
def get_unit_spike_train(self, unit_id, start_frame=None, end_frame=None):
if start_frame is None:
start_frame = 0
if end_frame is None:
end_frame = np.Inf
inds = np.where((self._labels == unit_id) & (start_frame <= self._times) & (self._times < end_frame))
return np.rint(self._times[inds]).astype(int)
@staticmethod
def write_sorting(sorting, save_path, write_primary_channels=False):
unit_ids = sorting.get_unit_ids()
times_list = []
labels_list = []
primary_channels_list = []
for unit_id in unit_ids:
times = sorting.get_unit_spike_train(unit_id=unit_id)
times_list.append(times)
labels_list.append(np.ones(times.shape) * unit_id)
if write_primary_channels:
if 'max_channel' in sorting.get_unit_property_names(unit_id):
primary_channels_list.append([sorting.get_unit_property(unit_id, 'max_channel')]*times.shape[0])
else:
raise ValueError(
"Unable to write primary channels because 'max_channel' spike feature not set in unit " + str(
unit_id))
else:
primary_channels_list.append(np.zeros(times.shape))
all_times = _concatenate(times_list)
all_labels = _concatenate(labels_list)
all_primary_channels = _concatenate(primary_channels_list)
sort_inds = np.argsort(all_times)
all_times = all_times[sort_inds]
all_labels = all_labels[sort_inds]
all_primary_channels = all_primary_channels[sort_inds]
L = len(all_times)
firings = np.zeros((3, L))
firings[0, :] = all_primary_channels
firings[1, :] = all_times
firings[2, :] = all_labels
writemda64(firings, save_path)
def _concatenate(list):
if len(list) == 0:
return np.array([])
return np.concatenate(list)
def read_dataset_params(dsdir):
fname1 = os.path.join(dsdir, 'params.json')
if not os.path.exists(fname1):
raise Exception('Dataset parameter file does not exist: ' + fname1)
with open(fname1) as f:
return json.load(f)
| StarcoderdataPython |
148360 | <filename>src/InstPyr/MyDevices/helpers.py
from dataclasses import dataclass,fields
@dataclass
class Sensor:
pass | StarcoderdataPython |
129787 | import random
import time
import pygame
import ppb.events as events
import ppb.flags as flags
default_resolution = 800, 600
class System(events.EventMixin):
def __init__(self, **_):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
from ppb.systems.pg import EventPoller as PygameEventPoller # To not break old imports.
class Renderer(System):
def __init__(self, resolution=default_resolution, window_title: str="PursuedPyBear", target_frame_rate: int=30, **kwargs):
self.resolution = resolution
self.resources = {}
self.window = None
self.window_title = window_title
self.pixel_ratio = None
self.resized_images = {}
self.old_resized_images = {}
self.render_clock = 0
self.target_frame_rate = target_frame_rate
self.target_count = 1 / self.target_frame_rate
def __enter__(self):
pygame.init()
self.window = pygame.display.set_mode(self.resolution)
pygame.display.set_caption(self.window_title)
def __exit__(self, exc_type, exc_val, exc_tb):
pygame.quit()
def on_idle(self, idle_event: events.Idle, signal):
self.render_clock += idle_event.time_delta
if self.render_clock > self.target_count:
signal(events.PreRender())
signal(events.Render())
self.render_clock = 0
def on_render(self, render_event, signal):
self.render_background(render_event.scene)
camera = render_event.scene.main_camera
camera.viewport_width, camera.viewport_height = self.resolution
self.pixel_ratio = camera.pixel_ratio
self.old_resized_images = self.resized_images
self.resized_images = {}
for game_object in render_event.scene:
resource = self.prepare_resource(game_object)
if resource is None:
continue
rectangle = self.prepare_rectangle(resource, game_object, camera)
self.window.blit(resource, rectangle)
pygame.display.update()
def render_background(self, scene):
self.window.fill(scene.background_color)
def prepare_resource(self, game_object):
image_name = game_object.__image__()
if image_name is flags.DoNotRender:
return None
image_name = str(image_name)
if image_name not in self.resources:
self.register_renderable(game_object)
source_image = self.resources[image_name]
resized_image = self.resize_image(source_image, game_object.size)
rotated_image = self.rotate_image(resized_image, game_object.rotation)
return rotated_image
def prepare_rectangle(self, resource, game_object, camera):
rect = resource.get_rect()
rect.center = camera.translate_to_viewport(game_object.position)
return rect
def register(self, resource_path, name=None):
try:
resource = pygame.image.load(str(resource_path)).convert_alpha(self.window)
except pygame.error:
# Image didn't load, so either the name is bad or the file doesn't
# exist. Instead, we'll render a square with a random color.
resource = pygame.Surface((70, 70))
random.seed(str(resource_path))
r = random.randint(65, 255)
g = random.randint(65, 255)
b = random.randint(65, 255)
resource.fill((r, g, b))
name = name or resource_path
self.resources[name] = resource
def register_renderable(self, renderable):
image_name = str(renderable.__image__())
source_path = renderable.__resource_path__()
self.register(source_path / image_name, image_name)
def resize_image(self, image, game_unit_size):
# TODO: Pygame specific code To be abstracted somehow.
key = (image, game_unit_size)
resized_image = self.old_resized_images.get(key)
if resized_image is None:
height = image.get_height()
width = image.get_width()
target_resolution = self.target_resolution(width,
height,
game_unit_size)
resized_image = pygame.transform.smoothscale(image,
target_resolution)
self.resized_images[key] = resized_image
return resized_image
def rotate_image(self, image, rotation):
"""Rotates image clockwise {rotation} degrees."""
return pygame.transform.rotate(image, rotation)
def target_resolution(self, width, height, game_unit_size):
values = [width, height]
short_side_index = width > height
target = self.pixel_ratio * game_unit_size
ratio = values[short_side_index] / target
return tuple(round(value / ratio) for value in values)
class Updater(System):
def __init__(self, time_step=0.016, **kwargs):
self.accumulated_time = 0
self.last_tick = None
self.start_time = None
self.time_step = time_step
def __enter__(self):
self.start_time = time.monotonic()
def on_idle(self, idle_event: events.Idle, signal):
if self.last_tick is None:
self.last_tick = time.monotonic()
this_tick = time.monotonic()
self.accumulated_time += this_tick - self.last_tick
self.last_tick = this_tick
while self.accumulated_time >= self.time_step:
# This might need to change for the Idle event system to signal _only_ once per idle event.
self.accumulated_time += -self.time_step
signal(events.Update(self.time_step))
| StarcoderdataPython |
1684568 | <filename>tests/unit/test_api.py
from eth_wallet.api import(
WalletAPI,
)
from tests.conftest import (
prepare_conf,
)
from web3 import (
Web3,
)
from eth_utils import (
decode_hex,
)
def test_account(tmp_path):
test_configuration = prepare_conf(tmp_path)
WalletAPI.new_wallet(test_configuration, 'my-password')
assert len(list(tmp_path.iterdir())) == 2 # one config.yaml and one keystore
address, pub_key = WalletAPI.get_wallet(test_configuration)
public_key_bytes = decode_hex(pub_key)
assert len(public_key_bytes) == 64
assert Web3.isAddress(address)
assert Web3.isChecksumAddress(address)
| StarcoderdataPython |
3345793 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specif
# a stand alone script to read rows and columns in a given SSTable
import struct
from datetime import datetime
import uuid
debug = 0
class Buffer:
def __init__(self, buf):
self.buf = buf
self.offset = 0
self.buflen = len(buf)
if (debug):
print "buflen: %d" % (self.buflen)
def readbytes(self, count):
if self.remaining() >= count:
return
if (debug):
print "count: ",count
self.rebuffer()
def rebuffer(self):
if (debug):
print "offset: ", self.offset
raise NotImplementedError("Not Implemented")
def unpack_int(self):
int_size = struct.calcsize('i')
self.readbytes(int_size)
value = struct.unpack('>i', self.buf[self.offset:self.offset+int_size])[0]
self.offset += int_size
return value
def unpack_short(self):
short_size = struct.calcsize('h')
self.readbytes(short_size)
value = struct.unpack('>h', self.buf[self.offset:self.offset+short_size])[0]
self.offset += short_size
return value
def unpack_byte(self):
byte_size = struct.calcsize('B')
self.readbytes(byte_size)
value = struct.unpack('>B', self.buf[self.offset:self.offset+byte_size])[0]
self.offset += byte_size
return value
def unpack_signed_byte(self):
byte_size = struct.calcsize('b')
self.readbytes(byte_size)
value = struct.unpack('>b', self.buf[self.offset:self.offset+byte_size])[0]
self.offset += byte_size
return value
def unpack_utf_string(self):
length = self.unpack_short()
if length == 0:
return ""
if (debug):
print "length: %d" % (length)
self.readbytes(length)
format = '%ds' % length
value = struct.unpack(format, self.buf[self.offset:self.offset+length])[0]
self.offset += length
return value
def unpack_longlong(self):
longlong_size = struct.calcsize('Q')
self.readbytes(longlong_size)
value = struct.unpack('>Q', self.buf[self.offset:self.offset+longlong_size])[0]
self.offset += longlong_size
return value
def unpack_float(self):
float_size = struct.calcsize('f')
self.readbytes(float_size)
value = struct.unpack('>f', self.buf[self.offset:self.offset+float_size])[0]
self.offset += float_size
return value
def unpack_double(self):
double_size = struct.calcsize('d')
self.readbytes(double_size)
value = struct.unpack('>d', self.buf[self.offset:self.offset+double_size])[0]
self.offset += double_size
return value
def unpack_data(self):
length = self.unpack_int()
if length > 0:
self.readbytes(length)
format = '%ds' % length
value = struct.unpack(format, self.buf[self.offset:self.offset+length])[0]
self.offset += length
return value
return None
def unpack_bytes(self, length):
if length > 0:
self.readbytes(length)
format = '%ds' % length
value = struct.unpack(format, self.buf[self.offset:self.offset+length])[0]
self.offset += length
return value
return None
def unpack_date(self):
length = self.unpack_short()
if length == 0:
return ""
ts = self.unpack_longlong()
date = datetime.fromtimestamp(ts/1000)
s = date.strftime("%Y-%m-%d %H:%M")
r = s.replace(":", '\\\\:')
return r
def unpack_uuid(self):
length = self.unpack_short()
if length == 0:
return ""
self.readbytes(length)
format = '%ds' % length
value = struct.unpack(format, self.buf[self.offset:self.offset+length])[0]
self.offset += length
x = uuid.UUID(bytes=value)
return str(x)
def unpack_boolean(self):
length = self.unpack_short()
if length == 0:
return ""
self.readbytes(length)
byte = self.unpack_byte()
if byte == 0:
return "false"
return "true"
def unpack_vint(self):
byte = self.unpack_signed_byte()
# MSB bit test
if byte & 0x80 != 0x80:
return byte
# Get number of extra bytes to read
mask = 0x80
extrabytes = 0
while byte & mask != 0:
extrabytes = extrabytes + 1
mask = mask >> 1
mask = 0x80
i = 0
while i < extrabytes - 1:
mask = mask >> 1
mask = mask | 0x80
i = i + 1
mask = (~mask & 0xff)
val = (byte & mask)
i = 0
while i < extrabytes:
val = val << 8
byte = self.unpack_signed_byte()
val = val | (byte & 0xff)
i = i + 1
return val
def unpack_vintlendata(self):
length = self.unpack_vint()
if length > 0:
self.readbytes(length)
format = '%ds' % length
value = struct.unpack(format, self.buf[self.offset:self.offset+length])[0]
self.offset += length
return value
return None
def skip_data(self):
length = self.unpack_int()
if length > 0:
self.offset += length
def skip_bytes(self, length):
if length > 0:
self.offset += length
def get_remaining(self):
return self.buf[self.offset:]
def remaining(self):
return self.buflen - self.offset
def available(self):
return (self.remaining() > 0)
def seek(self, off):
self.offset = off
| StarcoderdataPython |
3221286 | <gh_stars>0
import copy
from .slct import some_keeping_order,some
from .index import uniform_index
def fcp(ol):
return(ol[:])
def max_length(ol):
lngths = list(map(lambda r:len(r),ol))
lngth = max(lngths)
return(lngth)
def entries(ol):
rslt = []
length = ol.__len__()
for i in range(0,length):
entry = [i,ol[i]]
rslt.append(entry)
return(rslt)
def includes(ol,value):
return((value in ol))
def to_str(ol):
return(ol.__str__())
def to_src(ol):
return(ol.__repr__())
def every(ol,test_func,*args,**kwargs):
rslt = True
length = ol.__len__()
for i in range(0,length):
cond = test_func(ol[i],*args)
if(cond):
pass
else:
return(False)
return(rslt)
def any(ol,test_func,*args,**kwargs):
rslt = False
length = ol.__len__()
for i in range(0,length):
cond = test_func(ol[i],*args)
if(cond):
return(True)
else:
pass
return(rslt)
def uniqualize(l,**kwargs):
if('mode' in kwargs):
mode = kwargs['mode']
else:
mode = 'deepcopy'
pt = copy.deepcopy(l)
seqs =[]
freq = {}
for i in range(0,pt.__len__()):
v = pt[i]
if(v in freq):
freq[v] = freq[v] + 1
else:
freq[v] = 0
seqs.append(i)
#####下面是影响速度的关键,append特别耗时
#npt = some_keeping_order(pt,*seqs)
npt = some(pt,*seqs)
########################
pt = npt
if(mode == 'deepcopy'):
return(npt)
else:
l.clear()
l.extend(npt)
return(l)
def combinations(arr,*args):
args = list(args)
lngth = len(args)
if(lngth == 0):
start = 1
end = len(arr) + 1
elif(lngth == 1):
start = uniform_index(args[0],lngth)
end = len(arr) + 1
else:
start = uniform_index(args[0],lngth)
end = uniform_index(args[1],lngth)
rslt = []
for i in range(start,end):
tmp = list(itertools.combinations(arr,i))
rslt.extend(tmp)
return(rslt)
| StarcoderdataPython |
1673374 | <gh_stars>0
import os
import sys
from typing import List, Tuple, TypeVar
try:
import importlib.resources as pkg_resources
except:
import importlib_resources as pkg_resources
T = TypeVar("T")
def var_to_grid(array_var: List[T], size: Tuple[int, int]) -> List[List[T]]:
"""convert ownership/policy to grid format such that grid[y][x] is for move with coords x,y"""
ix = 0
grid = [[]] * size[1]
for y in range(size[1] - 1, -1, -1):
grid[y] = array_var[ix : ix + size[0]]
ix += size[0]
return grid
def evaluation_class(points_lost: float, eval_thresholds: List[float]):
i = 0
while i < len(eval_thresholds) - 1 and points_lost < eval_thresholds[i]:
i += 1
return i
def find_package_resource(path, silent_errors=False):
if path.startswith("katrain"):
parts = path.replace("\\", "/").split("/")
try:
with pkg_resources.path(".".join(parts[:-1]), parts[-1]) as path_obj:
return str(path_obj) # this will clean up if egg etc, but these don't work anyway
except (ModuleNotFoundError, FileNotFoundError, ValueError) as e:
if silent_errors:
return None
print(f"File {path} not found, installation possibly broken", file=sys.stderr)
return f"FILENOTFOUND::{path}"
else:
return os.path.abspath(os.path.expanduser(path)) # absolute path
| StarcoderdataPython |
1612626 | from django.contrib.gis.db import models
from users.models import CustomUser as User
class Occurrence(models.Model):
CON = 'CONSTRUCTION'
SPE = 'SPECIAL_EVENT'
INC = 'INCIDENT'
WCD = 'WEATHER_CONDITION'
RCD = 'ROAD_CONDITION'
CATEGORY_CHOICES=[
(CON, 'Construction'),
(SPE, 'Special Event'),
(INC, 'Incident'),
(WCD, 'Weather Condition'),
(RCD, 'Road Condition')
]
category = models.CharField(max_length=17, choices=CATEGORY_CHOICES)
description = models.TextField(null=False, blank=False)
TBV = 'TO_BE_VALIDATED'
VAL = 'VALIDATED'
RES = 'RESOLVED'
STATE_CHOICES = [
(TBV, 'To be validated'),
(VAL, 'Validated'),
(RES, 'Resolved')
]
state = models.CharField(max_length=15, choices=STATE_CHOICES, default=TBV)
location = models.PointField()
user = models.ForeignKey(User, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return '%s %s' % (self.category, self.location)
| StarcoderdataPython |
1602158 | if __name__ == "__main__":
import gizeh
import moviepy.editor as mpy
from vectortween.PointAnimation import PointAnimation
from vectortween.SequentialAnimation import SequentialAnimation
from vectortween.BezierCurveAnimation import BezierCurveAnimation
from vectortween.PolarAnimation import PolarAnimation
import noise
H = 250
W = 500
duration = 30
fps = 25
def my_noise(x, t):
return 2 * noise.snoise2(x, 0)
def my_noise2(x, t):
#print("x = {}, t = {}".format(x, t))
return 30 * noise.snoise2(x, t)
a1 = PointAnimation(frm=(0, 0), to=(500, 250), tween=["easeOutQuad"])
a2 = PointAnimation(to=(0, 0), frm=(500, 250), tween=["easeOutQuad"], noise_fn=my_noise, y_noise_fn=my_noise)
a3 = PointAnimation(frm=(0, 0), to=(500, 250), tween=["easeOutQuad"], noise_fn=my_noise, y_noise_fn=None)
a4 = PointAnimation(to=(0, 0), frm=(500, 250), tween=["easeOutQuad"], noise_fn=None, y_noise_fn=my_noise)
a5 = BezierCurveAnimation([[0, 0], [500, 250]], tween=["easeOutQuad"], noise_fn=my_noise2, y_noise_fn=my_noise2)
a6 = BezierCurveAnimation([[500, 250], [0, 0]], tween=["easeOutQuad"], noise_fn=my_noise2, y_noise_fn=my_noise2)
a7 = PolarAnimation(equation="6*theta*sin(6*theta)", offset=[250, 125], scale=[5, 5], noise_fn=my_noise2,
y_noise_fn=my_noise2)
s = SequentialAnimation([a1, a2, a3, a4, a5, a6, a7])
def make_frame(t):
surface = gizeh.Surface(W, H)
xy = s.make_frame(t, 0, 0, duration, duration, noiseframe=t)
trail = s.curve_points(t - 1.5, t, 0.01, 0, 0, duration, duration, noiseframe=t)
if trail and None not in trail:
gizeh.polyline(trail, stroke=(t / duration, 1 - t / duration, t / duration), stroke_width=5,
fill=None).draw(surface)
if xy is not None and None not in xy:
gizeh.circle(r=5, xy=xy, stroke=(0, 1, 0), stroke_width=2, fill=None).draw(surface)
return surface.get_npimage()
clip = mpy.VideoClip(make_frame, duration=duration)
clip.write_videofile("example_noisy_lines.mp4", fps=fps, codec="libx264")
| StarcoderdataPython |
3375662 | <reponame>Livin21/LinuxDrop
import sys
from sender import send
from receiver import receive
def run():
try:
option = sys.argv[1]
if option == "-send":
f_name = sys.argv[2]
send.start_server(f_name)
elif option == "-receive":
receive.receive()
else:
print ("Usage:\n\n\t\tlidrop -send file\n\t\tlidrop -receive")
except IndexError:
print ("Usage:\n\n\t\tlidrop -send file\n\t\tlidrop -receive")
if __name__ == "__main__":
run()
| StarcoderdataPython |
3302076 | #!/usr/bin/env python
# Goal of this script is to take a vulners url and give you the short description of the CVE
# Ideally, this will be run against the vulners nmap output, which will append the description
# to each of the findings.
# Probably a lot of unnecessary code, but got incredibly frustrated debugging due to using a bad file...
# works now. I may revisit later.
import requests
import sys
import os
if len(sys.argv) != 2:
print "Usage: getVulnersDescriptions.py <FileOrURL>"
sys.exit(0)
if not os.path.isfile(sys.argv[1]):
if "http" in sys.argv[1]:
userUrl = sys.argv[1]
req = requests.get(userUrl)
description = req.text.split('<meta name="description" content=')[1].split('/>')[0]
print "Description: %s" % description
else:
userFile = sys.argv[1]
userUrl = open(userFile,'r')
copyFile = open("/tmp/VulnersDescription",'w')
for line in userUrl:
if "vulners.com" in line:
workingLine = ""
workingLine = line.rstrip()
#this will parse output from nmap scripts
#ie: | CVE-2010-4344 9.3 https://vulners.com/cve/CVE-2010-4344
actualURL = workingLine.split("\t")[5].split(" ")[0]
actualURL = actualURL.replace("\n","")
req = requests.get(actualURL)
if req.status_code != 200:
continue
req = req.text.encode("ascii","ignore")
description = req.split('<meta name="description" content=')[1].split('/>')[0]
description = description.rstrip()
replace = "%s: %s" % (actualURL,description)
replace = replace.rstrip()
copyFile.write(workingLine + ": " + description + "\n")
else:
if "Vulners - Vulnerability Data Base" in line:
line = line.replace(':"Vulners - Vulnerability Data Base" ',"")
if "\n" not in line:
copyFile.write(line + "\n")
line = ""
else:
copyFile.write(line)
line = ""
userUrl.close()
copyFile.close()
os.rename("/tmp/VulnersDescription",os.path.abspath(sys.argv[1]))
| StarcoderdataPython |
189130 | P = []
f = []
def primes():
P = []
mark = [0]*1000001
for i in xrange(2, 1000001):
if mark[i]:
continue
P.append(i)
for j in xrange(i + i, 1000001, i):
mark[j] = 1
return P
def calc():
for p in P:
for i in xrange(p + p, 1000001, p):
f[i] = f[i / p] * f[p]
pre = 0
for i in xrange(1, 1000001):
pre += f[i]
if abs(pre) > 20:
print pre, i
return (pre, i)
print 'OK'
return True
def search(x, d):
for p in reversed(P):
if f[p] == d and p <= x:
print p
return p
def fuck():
dat = calc()
if dat == True:
print 'OK'
return False
else:
print dat
d = 0
if dat[0] > 0:
d = -1
else:
d = 1
p = search(dat[1], -d)
f[p] = d
print 'f[' + str(p) + '] = ' + str(d)
return True
| StarcoderdataPython |
118085 | <reponame>GodQ/notest
import pycurl
import os
import sys
import copy
from io import BytesIO
from .http_auth_type import HttpAuthType
from .http_response import HttpResponse
base_dir = os.path.abspath(os.path.dirname(__file__))
libcurl_crt_file = os.path.join(base_dir, "..", "..", "tools", "curl-ca-bundle.crt")
DEFAULT_TIMEOUT = 10 # Seconds
# Map HTTP method names to curl methods
# Kind of obnoxious that it works this way...
HTTP_METHODS = {'GET': pycurl.HTTPGET,
'PUT': pycurl.UPLOAD,
'PATCH': pycurl.POSTFIELDS,
'POST': pycurl.POST,
'HEAD': "",
'DELETE': 'DELETE'}
HttpAuthType_Map = {
HttpAuthType.HTTP_AUTH_BASIC: pycurl.HTTPAUTH_BASIC
}
class PyCurlClient:
def __init__(self, handler=None):
if not handler or not isinstance(handler, pycurl.Curl):
self.handler = pycurl.Curl()
else:
self.handler = handler
self.response = None
def get_handler(self):
return self.handler
@staticmethod
def close_handler(handler):
if handler:
handler.close()
def close(self):
if self.handler:
self.handler.close()
def send_request(self, test_obj, timeout=DEFAULT_TIMEOUT, context=None,
handler=None, ssl_insecure=True, verbose=False):
""" Create and mostly configure a curl object for test, reusing existing if possible """
if handler:
curl = handler
try: # Check the curl handle isn't closed, and reuse it if possible
curl.getinfo(curl.HTTP_CODE)
# Below clears the cookies & curl options for clean run
# But retains the DNS cache and connection pool
curl.reset()
curl.setopt(curl.COOKIELIST, "ALL")
except pycurl.error:
curl = pycurl.Curl()
else:
curl = self.handler
curl.setopt(curl.URL, str(test_obj.url))
curl.setopt(curl.TIMEOUT, timeout)
is_unicoded = False
_body = test_obj.body
if isinstance(_body, str): # Encode unicode
_body = _body.encode('UTF-8')
is_unicoded = True
# Set read function for post/put bodies
if _body and len(_body) > 0:
curl.setopt(curl.READFUNCTION, BytesIO(_body).read)
if test_obj.auth_username and test_obj.auth_password:
auth_username = test_obj.auth_username
auth_password = test_obj.auth_password
if isinstance(auth_username, str):
auth_username = auth_username.encode()
if isinstance(auth_password, str):
auth_password = <PASSWORD>.encode()
curl.setopt(pycurl.USERPWD, auth_username + b':' + auth_password)
if test_obj.auth_type:
auth_type = HttpAuthType_Map[test_obj.auth_type]
curl.setopt(pycurl.HTTPAUTH, auth_type)
if test_obj.method == u'POST':
curl.setopt(pycurl.POST, 1)
# Required for some servers
if _body is not None:
curl.setopt(pycurl.POSTFIELDSIZE, len(_body))
else:
curl.setopt(pycurl.POSTFIELDSIZE, 0)
elif test_obj.method == u'PUT':
curl.setopt(pycurl.UPLOAD, 1)
# Required for some servers
if _body is not None:
curl.setopt(pycurl.INFILESIZE, len(_body))
else:
curl.setopt(pycurl.INFILESIZE, 0)
elif test_obj.method == u'PATCH':
curl.setopt(curl.POSTFIELDS, _body)
curl.setopt(curl.CUSTOMREQUEST, 'PATCH')
# Required for some servers
# I wonder: how compatible will this be? It worked with Django but feels iffy.
if _body is not None:
curl.setopt(pycurl.INFILESIZE, len(_body))
else:
curl.setopt(pycurl.INFILESIZE, 0)
elif test_obj.method == u'DELETE':
curl.setopt(curl.CUSTOMREQUEST, 'DELETE')
if _body is not None:
curl.setopt(pycurl.POSTFIELDS, _body)
curl.setopt(pycurl.POSTFIELDSIZE, len(_body))
elif test_obj.method == u'HEAD':
curl.setopt(curl.NOBODY, 1)
curl.setopt(curl.CUSTOMREQUEST, 'HEAD')
elif test_obj.method and test_obj.method.upper() != 'GET': # Alternate HTTP methods
curl.setopt(curl.CUSTOMREQUEST, test_obj.method.upper())
if _body is not None:
curl.setopt(pycurl.POSTFIELDS, _body)
curl.setopt(pycurl.POSTFIELDSIZE, len(_body))
# Template headers as needed and convert headers dictionary to list of header entries
head = test_obj.get_headers(context=context)
head = copy.copy(head) # We're going to mutate it, need to copy
# Set charset if doing unicode conversion and not set explicitly
# TESTME
if is_unicoded and u'content-type' in head.keys():
content = head[u'content-type']
if u'charset' not in content:
head[u'content-type'] = content + u' ; charset=UTF-8'
if head:
headers = [str(headername) + ':' + str(headervalue)
for headername, headervalue in head.items()]
else:
headers = list()
# Fix for expecting 100-continue from server, which not all servers
# will send!
headers.append("Expect:")
headers.append("Connection: close")
curl.setopt(curl.HTTPHEADER, headers)
# reset the body, it holds values from previous runs otherwise
headers = BytesIO()
body = BytesIO()
if sys.platform.find("win") > -1:
curl.setopt(pycurl.CAINFO, libcurl_crt_file)
curl.setopt(pycurl.WRITEFUNCTION, body.write)
curl.setopt(pycurl.HEADERFUNCTION, headers.write)
if verbose:
curl.setopt(pycurl.VERBOSE, True)
if ssl_insecure is True:
curl.setopt(pycurl.SSL_VERIFYPEER, 0)
curl.setopt(pycurl.SSL_VERIFYHOST, 0)
curl.perform() # Run the actual call
response_body = body.getvalue()
body.close()
response_headers = headers.getvalue()
headers.close()
response_code = curl.getinfo(pycurl.RESPONSE_CODE)
response = HttpResponse(
body=response_body,
headers=response_headers,
status_code=response_code
)
self.response = response
return response
| StarcoderdataPython |
3301969 | <filename>samples/tutorial-2-wg.py
from pyalgotrade import strategy
from pyalgotrade.barfeed import quandlfeed
from pyalgotrade.technical import ma
def safe_round(value, digits):
if value is not None:
value = round(value, digits)
return value
class MyStrategy(strategy.BacktestingStrategy):
def __init__(self, feed, instrument):
super(MyStrategy, self).__init__(feed)
# We want a 15 period SMA over the closing prices.
self.__sma15 = ma.SMA(feed[instrument].getCloseDataSeries(), 15)
self.__sma50 = ma.SMA(feed[instrument].getCloseDataSeries(), 50)
self.__sma200 = ma.SMA(feed[instrument].getCloseDataSeries(), 200)
self.__instrument = instrument
def onBars(self, bars):
bar = bars[self.__instrument]
# self.info("%s, %s, %s, %s" % (bar.getClose()
print("%s, %s, %s, %s, %s" % (
bar.getDateTime()
, bar.getClose()
, safe_round(self.__sma15[-1], 2)
, safe_round(self.__sma50[-1], 2)
, safe_round(self.__sma200[-1], 2))
)
# Load the bar feed from the CSV file
feed = quandlfeed.Feed()
# feed.addBarsFromCSV("orcl", "WIKI-ORCL-2000-quandl.csv")
feed.addBarsFromCSV("orcl", "ORCL.csv")
# Evaluate the strategy with the feed's bars.
myStrategy = MyStrategy(feed, "orcl")
myStrategy.run()
| StarcoderdataPython |
1697791 | <reponame>RealTimeWeb/wikisite
# -*- coding: utf-8 -*-
"""
MoinMoin - MoinMoin.caching Tests
@copyright: 2007 by MoinMoin:ThomasWaldmann
@license: GNU GPL, see COPYING for details.
"""
import py
import time
from MoinMoin import caching
from MoinMoin.PageEditor import PageEditor
class TestCaching(object):
""" Tests the caching module """
def test_persistence_simple(self):
""" test if cache persists (on disk) """
test_data = '12345abcde'
cache = caching.CacheEntry(self.request, 'test_arena', 'test_key', 'wiki')
cache.update(test_data)
del cache
cache = caching.CacheEntry(self.request, 'test_arena', 'test_key', 'wiki')
assert test_data == cache.content()
def test_persistence_pickle(self):
""" test if cache persists (on disk), use pickle """
test_data = {1: 2, 2: 3, 3: [4, 5, ], }
cache = caching.CacheEntry(self.request, 'test_arena', 'test_key', 'wiki', use_pickle=True)
cache.update(test_data)
del cache
cache = caching.CacheEntry(self.request, 'test_arena', 'test_key', 'wiki', use_pickle=True)
assert test_data == cache.content()
def test_persistence_encode(self):
""" test if cache persists (on disk), use encoded string """
test_data = u"üöäÜÖÄß"
cache = caching.CacheEntry(self.request, 'test_arena', 'test_key', 'wiki', use_encode=True)
cache.update(test_data)
del cache
cache = caching.CacheEntry(self.request, 'test_arena', 'test_key', 'wiki', use_encode=True)
cache_data = cache.content()
assert type(cache_data) == type(test_data)
assert cache_data == test_data
def test_mtime(self):
""" test if cache mtime yields correct values """
test_data = '12345abcde'
now = time.time()
cache = caching.CacheEntry(self.request, 'test_arena', 'test_key', 'wiki')
cache.update(test_data)
assert now - 2 <= cache.mtime() <= now + 2
def test_remove(self):
""" test if cache file removal works """
cache = caching.CacheEntry(self.request, 'test_arena', 'test_key', 'wiki')
assert cache.exists()
cache.remove()
assert not cache.exists()
def test_update_needed(self):
""" test update check) """
test_data1 = u'does not matter'
test_data2 = u'something else'
page_name = u'Caching_TestPage'
page = PageEditor(self.request, page_name)
page._write_file(test_data1)
cache = caching.CacheEntry(self.request, page, 'test_key', 'item')
cache.update(test_data1)
assert not cache.needsUpdate(page._text_filename())
time.sleep(3) # XXX fails without, due to mtime granularity
page = PageEditor(self.request, page_name)
page._write_file(test_data2)
assert cache.needsUpdate(page._text_filename())
def test_filelike_readwrite(self):
request = self.request
key = 'nooneknowsit'
arena = 'somethingfunny'
data = "dontcare"
cacheentry = caching.CacheEntry(request, arena, key, scope='wiki', do_locking=True,
use_pickle=False, use_encode=True)
cacheentry.open(mode='w')
cacheentry.write(data)
cacheentry.close()
assert cacheentry.exists()
cacheentry.open(mode='r')
rdata = cacheentry.read()
cacheentry.close()
assert data == rdata
coverage_modules = ['MoinMoin.caching']
| StarcoderdataPython |
1764967 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import os
import re
from datetime import datetime
from datetime import timedelta
from babel import Locale
from babel.core import UnknownLocaleError
from googleapiclient import discovery
from googleapiclient.http import build_http
from googleapiclient.errors import HttpError
from oauth2client import client, file, tools
GMB_DISCOVERY_FILE = "gmb_discovery.json"
CLIENT_SECRETS_FILE = "client_secrets.json"
CREDENTIALS_STORAGE = "credentials.dat"
SCHEMAS_FILE = "schemas.json"
SENTIMENTS_LASTRUN_FILE = "sentiments_lastrun"
SCOPES = [
"https://www.googleapis.com/auth/business.manage",
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-language"
]
DATASET_ID = "alligator"
MAX_RETRIES = 10
MIN_TOKENS = 20
INSIGHTS_DAYS_BACK = 540
CALLS_DAYS_BACK = 7
DIRECTIONS_NUM_DAYS = "SEVEN"
BQ_JOBS_QUERY_MAXRESULTS_PER_PAGE = 1000
BQ_TABLEDATA_INSERTALL_BATCHSIZE = 5000
logging.getLogger("googleapiclient.discovery_cache").setLevel(logging.CRITICAL)
class API(object):
def __init__(self, project_id, language):
client_secrets = os.path.join(
os.path.dirname(__file__), CLIENT_SECRETS_FILE)
flow = client.flow_from_clientsecrets(
client_secrets,
SCOPES,
message=tools.message_if_missing(client_secrets))
storage = file.Storage(CREDENTIALS_STORAGE)
credentials = storage.get()
if credentials is None or credentials.invalid:
flags = argparse.Namespace(
noauth_local_webserver=True,
logging_level=logging.getLevelName(
logging.getLogger().getEffectiveLevel()))
credentials = tools.run_flow(flow, storage, flags=flags)
http = credentials.authorize(http=build_http())
with open(GMB_DISCOVERY_FILE) as gmb_discovery_file:
self.gmb_service = discovery.build_from_document(
gmb_discovery_file.read(),
base="https://www.googleapis.com/",
http=http)
self.PROJECT_ID = project_id
self.dataset_exists = False
self.existing_tables = {}
self.language = language
with open(SCHEMAS_FILE) as schemas_file:
self.schemas = json.load(schemas_file)
self.bq_service = discovery.build("bigquery", "v2", http=http)
self.nlp_service = discovery.build("language", "v1", http=http)
def accounts(self):
data = []
page_token = None
while True:
response_json = self.gmb_service.accounts().list(
pageToken=page_token).execute(num_retries=MAX_RETRIES)
data = data + (response_json.get("accounts") or [])
page_token = response_json.get("nextPageToken")
if not page_token:
break
logging.info(json.dumps(data, indent=2))
self.to_bigquery(table_name="accounts", data=data)
return data
def locations(self, account_id, location_id=None):
data = []
page_token = None
if not location_id:
while True:
response_json = self.gmb_service.accounts().locations().list(
parent=account_id,
pageToken=page_token).execute(num_retries=MAX_RETRIES)
data = data + (response_json.get("locations") or [])
page_token = response_json.get("nextPageToken")
if not page_token:
break
else:
response_json = self.gmb_service.accounts().locations().get(
name=location_id).execute(num_retries=MAX_RETRIES)
data = data + (response_json.get("locations") or [])
logging.info(json.dumps(data, indent=2))
self.to_bigquery(table_name="locations", data=data)
return data
def reviews(self, location_id):
page_token = None
while True:
response_json = self.gmb_service.accounts().locations().reviews().list(
parent=location_id,
pageToken=page_token).execute(num_retries=MAX_RETRIES)
data = response_json.get("reviews") or []
logging.info(json.dumps(data, indent=2))
self.to_bigquery(table_name="reviews", data=data)
page_token = response_json.get("nextPageToken")
if not page_token:
break
def sentiments(self):
page_token = None
lastrun = self.get_sentiments_lastrun()
self.ensure_dataset_exists()
self.ensure_table_exists(table_name="reviews")
query = {
"query": """
SELECT
comment,
name,
reviewId
FROM
[{projectId}:{datasetId}.reviews]
WHERE
comment IS NOT NULL
AND (
DATE(_PARTITIONTIME) > "{lastrun}"
OR
_PARTITIONTIME IS NULL)
""".format(
projectId=self.PROJECT_ID,
datasetId=DATASET_ID,
lastrun=lastrun),
"maxResults": BQ_JOBS_QUERY_MAXRESULTS_PER_PAGE
}
response_json = self.bq_service.jobs().query(
projectId=self.PROJECT_ID,
body=query).execute(num_retries=MAX_RETRIES)
rows = response_json.get("rows") or []
self.process_sentiments(rows)
page_token = response_json.get("pageToken")
if page_token:
job_id = response_json.get("jobReference").get("jobId")
while True:
response_json_job = self.bq_service.jobs().getQueryResults(
projectId=self.PROJECT_ID,
jobId=job_id,
maxResults=BQ_JOBS_QUERY_MAXRESULTS_PER_PAGE,
pageToken=page_token).execute(num_retries=MAX_RETRIES)
rows_job = response_json_job.get("rows") or []
self.process_sentiments(rows_job)
page_token = response_json_job.get("pageToken")
if not page_token:
break
self.set_sentiments_lastrun()
def get_sentiments_lastrun(self):
lastrun_file_path = os.path.join(
os.path.dirname(__file__), SENTIMENTS_LASTRUN_FILE)
lastrun = datetime(year=1970, month=1, day=1).date()
try:
lastrun = datetime.fromtimestamp(os.path.getmtime(lastrun_file_path)).date()
except OSError:
logging.info("No previous run for sentiment analysis found. " +
"Performing sentiment analysis on all available reviews.")
return lastrun
def process_sentiments(self, rows):
sentiments = []
for row in rows:
sentiment = {}
comment = row.get("f")[0].get("v")
sentiment["comment"] = comment
sentiment["name"] = row.get("f")[1].get("v")
sentiment["reviewId"] = row.get("f")[2].get("v")
sentiment["annotation"] = self.annotate_text(comment)
sentiments.append(sentiment)
logging.info(json.dumps(sentiments, indent=2))
self.to_bigquery(table_name="sentiments", data=sentiments)
def set_sentiments_lastrun(self):
lastrun_file_path = os.path.join(
os.path.dirname(__file__), SENTIMENTS_LASTRUN_FILE)
current_time = datetime.now().timestamp()
if os.path.isfile(lastrun_file_path):
os.utime(lastrun_file_path, (current_time, current_time))
else:
os.open(lastrun_file_path, os.O_CREAT)
def annotate_text(self, content):
if not content:
return
body = {
"document": {
"type": "PLAIN_TEXT",
"content": content
},
"features": {
"extractSyntax": True,
"extractEntities": True,
"extractDocumentSentiment": True,
"extractEntitySentiment": True,
"classifyText": len(content.split()) > MIN_TOKENS
},
"encodingType": "UTF8"
}
if self.language:
body['document']['language'] = self.language
try:
return self.nlp_service.documents().annotateText(body=body).execute(
num_retries=MAX_RETRIES)
except HttpError as err:
raise
def insights(self, location_id):
end_time = (datetime.now() - timedelta(days=5)).replace(
hour=0, minute=0, second=0, microsecond=0)
start_time = end_time - timedelta(days=INSIGHTS_DAYS_BACK)
query = {
"locationNames": [location_id],
"basicRequest": {
"metricRequests": {
"metric": "ALL",
"options": ["AGGREGATED_DAILY"]
},
"timeRange": {
"startTime": start_time.strftime("%Y-%m-%dT%H:%M:%SZ"),
"endTime": end_time.strftime("%Y-%m-%dT%H:%M:%SZ")
}
},
}
data = []
account_id = re.search("(accounts/[0-9]+)/locations/[0-9]+", location_id,
re.IGNORECASE).group(1)
response_json = self.gmb_service.accounts().locations().reportInsights(
name=account_id, body=query).execute(num_retries=MAX_RETRIES)
if "locationMetrics" in response_json:
for line in response_json.get("locationMetrics"):
line["name"] = line.get("locationName")
data.append(line)
logging.info(json.dumps(data, indent=2))
self.to_bigquery(table_name="insights", data=data)
else:
logging.warning("No insights reported for %s", location_id)
return data
def directions(self, location_id):
query = {
"locationNames": [location_id],
"drivingDirectionsRequest": {
"numDays": DIRECTIONS_NUM_DAYS
}
}
if self.language:
lang = "en_US"
try:
lang = Locale.parse(f'und_{self.language}')
except UnknownLocaleError:
logging.warning("Error parsing language code, falling back to en_US.")
query['drivingDirectionsRequest']['languageCode'] = lang
data = []
account_id = re.search("(accounts/[0-9]+)/locations/[0-9]+", location_id,
re.IGNORECASE).group(1)
response_json = self.gmb_service.accounts().locations().reportInsights(
name=account_id, body=query).execute(num_retries=MAX_RETRIES)
if "locationDrivingDirectionMetrics" in response_json:
for line in response_json.get("locationDrivingDirectionMetrics"):
line["name"] = line.get("locationName")
data.append(line)
logging.info(json.dumps(data, indent=2))
self.to_bigquery(table_name="directions", data=data)
return data
def hourly_calls(self, location_id):
query = {
"locationNames": [location_id],
"basicRequest": {
"metricRequests": [{
"metric": "ACTIONS_PHONE",
"options": ["BREAKDOWN_HOUR_OF_DAY"]
}],
"timeRange": {}
},
}
account_id = re.search("(accounts/[0-9]+)/locations/[0-9]+", location_id,
re.IGNORECASE).group(1)
limit_end_time = (datetime.now() - timedelta(days=5)).replace(
hour=0, minute=0, second=0, microsecond=0)
start_time = limit_end_time - timedelta(days=CALLS_DAYS_BACK)
data = []
while start_time < limit_end_time:
end_time = start_time + timedelta(days=1)
start_time_string = start_time.strftime("%Y-%m-%dT%H:%M:%SZ")
end_time_string = end_time.strftime("%Y-%m-%dT%H:%M:%SZ")
query["basicRequest"]["timeRange"] = {
"startTime": start_time_string,
"endTime": end_time_string
}
response_json = self.gmb_service.accounts().locations().reportInsights(
name=account_id, body=query).execute(num_retries=MAX_RETRIES)
if "locationMetrics" in response_json:
for line in response_json.get("locationMetrics"):
line["name"] = "{}/{}".format(
line.get("locationName"), start_time_string)
if "metricValues" in line:
for metric_values in line.get("metricValues"):
if "dimensionalValues" in metric_values:
for values in metric_values.get("dimensionalValues"):
values["timeDimension"]["timeRange"] = {
"startTime": start_time_string
}
data.append(line)
start_time = start_time + timedelta(days=1)
if data:
logging.info(json.dumps(data, indent=2))
self.to_bigquery(table_name="hourly_calls", data=data)
return data
def ensure_dataset_exists(self):
if self.dataset_exists:
return
try:
self.bq_service.datasets().get(
projectId=self.PROJECT_ID,
datasetId=DATASET_ID).execute(num_retries=MAX_RETRIES)
logging.info(u"Dataset {}:{} already exists.".format(
self.PROJECT_ID, DATASET_ID))
self.dataset_exists = True
return
except HttpError as err:
if err.resp.status != 404:
raise
dataset = {
"datasetReference": {
"projectId": self.PROJECT_ID,
"datasetId": DATASET_ID
}
}
self.bq_service.datasets().insert(
projectId=self.PROJECT_ID,
body=dataset).execute(num_retries=MAX_RETRIES)
self.dataset_exists = True
def ensure_table_exists(self, table_name):
if self.existing_tables.get(table_name):
return
try:
self.bq_service.tables().get(
projectId=self.PROJECT_ID, datasetId=DATASET_ID,
tableId=table_name).execute(num_retries=MAX_RETRIES)
logging.info(u"Table {}:{}.{} already exists.".format(
self.PROJECT_ID, DATASET_ID, table_name))
self.existing_tables[table_name] = True
return
except HttpError as err:
if err.resp.status != 404:
raise
table = {
"schema": {
"fields": self.schemas.get(table_name)
},
"tableReference": {
"projectId": self.PROJECT_ID,
"datasetId": DATASET_ID,
"tableId": table_name
},
"timePartitioning": {
"type": 'DAY'
}
}
self.bq_service.tables().insert(
projectId=self.PROJECT_ID, datasetId=DATASET_ID,
body=table).execute(num_retries=MAX_RETRIES)
self.existing_tables[table_name] = True
def to_bigquery(self, table_name, data=[]):
if not data:
return
self.ensure_dataset_exists()
self.ensure_table_exists(table_name)
rows = [{"json": line, "insertId": line.get("name")} for line in data]
chunk_size = BQ_TABLEDATA_INSERTALL_BATCHSIZE
chunked_rows = [
rows[i * chunk_size:(i + 1) * chunk_size]
for i in range((len(rows) + chunk_size - 1) // chunk_size)
]
for chunk in chunked_rows:
logging.info(u"Inserting {} rows into table {}:{}.{}.".format(
len(chunk), self.PROJECT_ID, DATASET_ID, table_name))
data_chunk = {"rows": chunk, "ignoreUnknownValues": True}
self.bq_service.tabledata().insertAll(
projectId=self.PROJECT_ID,
datasetId=DATASET_ID,
tableId=table_name,
body=data_chunk).execute(num_retries=MAX_RETRIES)
| StarcoderdataPython |
1691673 | import tensorflow as tf
class ConfusionMatrix(tf.keras.metrics.Metric):
def __init__(self, num_classes, **kwargs):
super(ConfusionMatrix, self).__init__(name='confusion_matrix', **kwargs) # handles base args (e.g., dtype)
self.num_classes = num_classes
self.total_cm = self.add_weight("total", shape=(num_classes, num_classes), initializer="zeros")
def reset_states(self):
for s in self.variables:
s.assign(tf.zeros(shape=s.shape))
def update_state(self, y_true, y_pred, sample_weight=None):
self.total_cm.assign_add(self.confusion_matrix(y_true, y_pred))
return self.total_cm
def result_detailed(self):
return self.process_confusion_matrix()
def result(self):
return self.total_cm
def confusion_matrix(self, y_true, y_pred):
"""Make a confusion matrix"""
y_pred = tf.reshape(y_pred, [-1, 12])
y_pred = tf.argmax(y_pred, 1)
y_true = tf.reshape(y_true, [-1])
cm = tf.math.confusion_matrix(y_true, y_pred, dtype=tf.float32, num_classes=self.num_classes)
return cm
def process_confusion_matrix(self):
"""returns precision, recall and f1 along with overall accuracy"""
cm = self.total_cm
diag_part = tf.linalg.diag_part(cm)
# Precision = TP/(TP+FP)
precision = diag_part / (tf.reduce_sum(cm, 0) + tf.constant(1e-15))
# Recall = Sensitivity = TP/(TP+FN)
recall = diag_part / (tf.reduce_sum(cm, 1) + tf.constant(1e-15))
# F1 Score = 2*Precision*Recall/(Precision+Recall)
f1 = 2 * precision * recall / (precision + recall + tf.constant(1e-15))
return precision, recall, f1 | StarcoderdataPython |
3294577 | <reponame>kevinlib/IOHMM
'''
The Wrapper of statsmodels one parameter exponential family distributions used by GLM,
with the added functionality for log likelihood per sample. Loglikelihood per sample is
going to be used in IOHMM to estimate emission probability.
'''
from __future__ import division
from past.utils import old_div
from builtins import object
import numpy as np
from scipy import special
from statsmodels.genmod.families.family import (Poisson,
Gaussian,
Gamma,
Binomial,
InverseGaussian,
NegativeBinomial)
import statsmodels.genmod.families.links as L
EPS = np.finfo(float).eps
class FamilyWrapper(object):
"""
The parent class for the wrapper of one-parameter exponential families,
with function for per sample loglikelihood.
Parameters
----------
link : a link function instance
Link is the linear transformation function.
See the individual families for available links.
variance : a variance function
Measures the variance as a function of the mean probabilities.
See the individual families for the default variance function.
Attributes
----------
family : a statsmodels corresponding family object
--------
"""
def __init__(self, link, variance):
raise NotImplementedError
def loglike_per_sample(self, endog, mu, scale=1.):
"""
The function to calculate log-likelihood per sample
in terms of the fitted mean response.
Parameters
----------
endog : array-like.
Endogenous response variable
For binomial family, it could be of shape (n, ) or (n, k).
where n is the number of samples and k is number of classes.abs
For other families, it should be of shape (n, ).
mu : array-like
should be of shape (n, )
Fitted mean response variable
scale : float, optional
The scale parameter, defaults to 1.
Returns
-------
log_p : array-like
The value of the loglikelihood function evaluated per sample.
The shape should be (n, )
"""
raise NotImplementedError
class PoissonWrapper(FamilyWrapper):
"""
The wrapper for Poisson exponential family.
Subclass of FamilyWrapper
Parameters
----------
link : a link instance, optional
The default link for the Poisson family is the log link. Available
links are log, identity, and sqrt. See statsmodels.family.links for
more information.
Attributes
----------
family : a statsmodels Possion family object
--------
"""
def __init__(self, link=L.log):
# For now the statsmodels 0.8.0 still takes a link as an argument
# will follow the changes in statsmodels whenever it happens
self.family = Poisson(link=link)
def loglike_per_sample(self, endog, mu, scale=1.):
r"""
The function to calculate log-likelihood per sample
in terms of the fitted mean response.
Parameters
----------
endog : array-like of shape (n, )
Endogenous response variable
mu : array-like of shape (n, )
Fitted mean response variable
scale : float, optional
Not used for in the Poisson loglike.
Returns
-------
log_p : array-like of shape (n, )
The value of the loglikelihood function evaluated per sample
(endog,mu,scale) as defined below.
Notes
-----
.. math::
log_p_{i} = scale * (Y_i * \log(\mu_i) - \mu_i -
\ln \Gamma(Y_i + 1))
"""
return (endog * np.log(mu) - mu -
special.gammaln(endog + 1)).reshape(-1,)
class GaussianWrapper(FamilyWrapper):
"""
The wrapper of Gaussian exponential family distribution,
with function for per sample probability.
Parameters
----------
link : a link instance, optional
The default link for the Gaussian family is the identity link.
Available links are log, identity, and inverse.
See statsmodels.family.links for more information.
Attributes
----------
family : a statsmodel Gaussian family object
--------
"""
def __init__(self, link=L.identity):
self.family = Gaussian(link=link)
def loglike_per_sample(self, endog, mu, scale=1.):
"""
The function to calculate log-likelihood per sample
in terms of the fitted mean response.
Parameters
----------
endog : array-like of shape (n, )
Endogenous response variable
mu : array-like of shape (n, )
Fitted mean response variable
scale : float, optional
Scales the loglikelihood function. The default is 1.
Returns
-------
log_p : array-like of shape (n, )
The value of the loglikelihood function evaluated per sample
(endog,mu,scale) as defined below.
Notes
-----
log_p_{i} = - 1 / 2 * ((Y_i - mu_i)^2 / scale + log(2 * \pi * scale))
"""
if scale > EPS:
return (old_div((endog * mu - old_div(mu**2, 2.)), scale) -
old_div(endog**2, (2 * scale)) - .5 * np.log(2 * np.pi * scale)).reshape(-1,)
else:
log_p = np.zeros(endog.shape[0])
log_p[~np.isclose(endog, mu)] = - np.Infinity
return log_p
class GammaWrapper(FamilyWrapper):
"""
The wrapper of Gaussian exponential family distribution,
with function for per sample probability.
Parameters
----------
link : a link instance, optional
The default link for the Gaussian family is the identity link.
Available links are log, identity, and inverse.
See statsmodels.family.links for more information.
Attributes
----------
family : a statsmodel Gaussian family object
--------
"""
def __init__(self, link=L.inverse_power):
self.family = Gamma(link=link)
def loglike_per_sample(self, endog, mu, scale=1.):
"""
The function to calculate log-likelihood per sample
in terms of the fitted mean response.
Parameters
----------
endog : array-like of shape (n, )
Endogenous response variable
mu : array-like of shape (n, )
Fitted mean response variable
scale : float, optional
The default is 1.
Returns
-------
log_p : array-like of shape (n, )
The value of the loglikelihood function evaluated per sample
(endog,mu,freq_weights,scale) as defined below.
Notes
--------
log_p_{i} = -1 / scale * (Y_i / \mu_i+ \log(\mu_i)+
(scale -1) * \log(Y) + \log(scale) + scale *
\ln \Gamma(1 / scale))
"""
if scale > EPS:
endog_mu = self.family._clean(old_div(endog, mu))
return (old_div(-(endog_mu - np.log(endog_mu) + scale *
np.log(endog) + np.log(scale) + scale *
special.gammaln(old_div(1., scale))), scale)).reshape(-1,)
else:
log_p = np.zeros(endog.shape[0])
log_p[~np.isclose(endog, mu)] = - np.Infinity
return log_p
class BinomialWrapper(FamilyWrapper):
"""
The wrapper of Binomial exponential family distribution,
with function for per sample probability.
Parameters
----------
link : a link instance, optional
The default link for the Binomial family is the logit link.
Available links are logit, probit, cauchy, log, and cloglog.
See statsmodels.family.links for more information.
Attributes
----------
family : a statsmodel Binomial family object
--------
"""
def __init__(self, link=L.logit): # , n=1.):
# TODO: it *should* work for a constant n>1 actually, if data_weights
# is equal to n
self.family = Binomial(link=link)
def loglike_per_sample(self, endog, mu, scale=1.):
"""
The function to calculate log-likelihood per sample
in terms of the fitted mean response.
Parameters
----------
endog : array-like of shape (n, k) or (n, )
Endogenous response variable
mu : array-like of shape (n, )
Fitted mean response variable
scale : float, optional
Not used for the Binomial GLM.
Returns
-------
log_p : array-like of shape (n, )
The value of the loglikelihood function evaluated per sample
(endog,mu,freq_weights,scale) as defined below.
Notes
--------
If the endogenous variable is binary:
.. math::
log_p_{i} = (y_i * \log(\mu_i/(1-\mu_i)) + \log(1-\mu_i))
If the endogenous variable is binomial:
.. math::
log_p_{i} = (\ln \Gamma(n+1) -
\ln \Gamma(y_i + 1) - \ln \Gamma(n_i - y_i +1) + y_i *
\log(\mu_i / (n_i - \mu_i)) + n * \log(1 - \mu_i/n_i))
where :math:`y_i = Y_i * n_i` with :math:`Y_i` and :math:`n_i` as
defined in Binomial initialize. This simply makes :math:`y_i` the
original number of successes.
"""
# special setup
# see _Setup_binomial(self) in generalized_linear_model.py
tmp = self.family.initialize(endog, 1)
endog = tmp[0]
if np.shape(self.family.n) == () and self.family.n == 1:
return scale * (endog * np.log(old_div(mu, (1 - mu)) + 1e-200) +
np.log(1 - mu)).reshape(-1,)
else:
y = endog * self.family.n # convert back to successes
return scale * (special.gammaln(self.family.n + 1) -
special.gammaln(y + 1) -
special.gammaln(self.family.n - y + 1) + y *
np.log(old_div(mu, (1 - mu))) + self.family.n *
np.log(1 - mu)).reshape(-1,)
class InverseGaussianWrapper(FamilyWrapper):
"""
The wrapper of InverseGaussian exponential family distribution,
with function for per sample probability.
Parameters
----------
link : a link instance, optional
The default link for the InverseGaussian family is the identity link.
Available links are inverse_squared, inverse, log, and identity.
See statsmodels.family.links for more information.
Attributes
----------
family : a statsmodel InverseGaussian family object
--------
"""
def __init__(self, link=L.inverse_squared):
self.family = InverseGaussian(link=link)
def loglike_per_sample(self, endog, mu, scale=1.):
"""
The function to calculate log-likelihood per sample
in terms of the fitted mean response.
Parameters
----------
endog : array-like of shape (n,)
Endogenous response variable
mu : array-like of shape (n,)
Fitted mean response variable
scale : float, optional
The default is 1.
Returns
-------
log_p : array-like of shape (n,)
The value of the loglikelihood function evaluated per sample
(endog,mu,scale) as defined below.
Notes
-----
log_p_{i} = -1/2 * ((Y_i - \mu_i)^2 / (Y_i *
\mu_i^2 * scale) + \log(scale * Y_i^3) + \log(2 * \pi))
"""
if scale > EPS:
return -.5 * (old_div((endog - mu)**2, (endog * mu**2 * scale)) +
np.log(scale * endog**3) + np.log(2 * np.pi)).reshape(-1,)
else:
log_p = np.zeros(endog.shape[0])
log_p[~np.isclose(endog, mu)] = - np.Infinity
return log_p
class NegativeBinomialWrapper(FamilyWrapper):
"""
The wrapper of NegativeBinomial exponential family distribution,
with function for per sample probability.
Parameters
----------
link : a link instance, optional
The default link for the NegativeBinomial family is the identity link.
Available links are log, cloglog, identity, nbinom and power.
See statsmodels.family.links for more information.
Attributes
----------
family : a statsmodel NegativeBinomial family object
--------
"""
def __init__(self, link=L.log, alpha=1.):
# make it at least float
self.family = NegativeBinomial(link=link, alpha=alpha)
def loglike_per_sample(self, endog, mu, scale):
"""
The function to calculate log-likelihood per sample
in terms of the fitted mean response.
Parameters
----------
endog : array-like of shape (n, )
Endogenous response variable
mu : array-like of shape (n, )
The fitted mean response values
scale : float
The scale parameter
Returns
-------
log_p : array-like of shape (n, )
The value of the loglikelihood function evaluated per sample
(endog,mu,freq_weights,scale) as defined below.
Notes
-----
Defined as:
.. math::
log_p_{i} = (Y_i * \log{(\alpha * \mu_i /
(1 + \alpha * \mu_i))} - \log{(1 + \alpha * \mu_i)}/
\alpha + Constant)
where :math:`Constant` is defined as:
.. math::
Constant = \ln \Gamma{(Y_i + 1/ \alpha )} - \ln \Gamma(Y_i + 1) -
\ln \Gamma{(1/ \alpha )}
"""
if scale > EPS:
lin_pred = self.family._link(mu)
constant = (special.gammaln(endog + old_div(1, self.family.alpha)) -
special.gammaln(endog + 1) - special.gammaln(old_div(1, self.family.alpha)))
exp_lin_pred = np.exp(lin_pred)
return (endog * np.log(self.family.alpha * exp_lin_pred /
(1 + self.family.alpha * exp_lin_pred)) -
old_div(np.log(1 + self.family.alpha * exp_lin_pred),
self.family.alpha) + constant).reshape(-1,)
else:
log_p = np.zeros(endog.shape[0])
log_p[~np.isclose(endog, mu)] = - np.Infinity
return log_p
| StarcoderdataPython |
1614392 | import random
import pytest
import redis
from RLTest import Env
from test_helper_classes import _get_ts_info
def test_ooo(self):
with Env().getClusterConnectionIfNeeded() as r:
quantity = 50001
type_list = ['', 'UNCOMPRESSED']
for chunk_type in type_list:
r.execute_command('ts.create', 'no_ooo', chunk_type, 'CHUNK_SIZE', 100, 'DUPLICATE_POLICY', 'BLOCK')
r.execute_command('ts.create', 'ooo', chunk_type, 'CHUNK_SIZE', 100, 'DUPLICATE_POLICY', 'LAST')
for i in range(0, quantity, 5):
r.execute_command('ts.add', 'no_ooo', i, i)
for i in range(0, quantity, 10):
r.execute_command('ts.add', 'ooo', i, i)
for i in range(5, quantity, 10): # limit
r.execute_command('ts.add', 'ooo', i, i)
ooo_res = r.execute_command('ts.range', 'ooo', '-', '+')
no_ooo_res = r.execute_command('ts.range', 'no_ooo', '-', '+')
assert len(ooo_res) == len(no_ooo_res)
for i in range(len(ooo_res)):
assert ooo_res[i] == no_ooo_res[i]
ooo_res = r.execute_command('ts.range', 'ooo', 1000, 1000)
assert ooo_res[0] == [1000, b'1000']
last_sample = r.execute_command('ts.get', 'ooo')
r.execute_command('ts.add', 'ooo', 1000, 42)
ooo_res = r.execute_command('ts.range', 'ooo', 1000, 1000)
assert ooo_res[0] == [1000, b'42']
assert last_sample == r.execute_command('ts.get', 'ooo')
r.execute_command('ts.add', 'ooo', last_sample[0], 42)
assert [last_sample[0], b'42'] == r.execute_command('ts.get', 'ooo')
r.execute_command('DEL', 'no_ooo')
r.execute_command('DEL', 'ooo')
def test_ooo_with_retention(self):
with Env().getClusterConnectionIfNeeded() as r:
retention = 13
batch = 100
r.execute_command('ts.create', 'ooo', 'CHUNK_SIZE', 10, 'RETENTION', retention, 'DUPLICATE_POLICY', 'LAST')
for i in range(batch):
assert r.execute_command('ts.add', 'ooo', i, i) == i
assert r.execute_command('ts.range', 'ooo' ,0, batch - retention - 2) == []
assert len(r.execute_command('ts.range', 'ooo', '-', '+')) == retention + 1
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('ts.add', 'ooo', 70, 70)
for i in range(batch, batch * 2):
assert r.execute_command('ts.add', 'ooo', i, i) == i
assert r.execute_command('ts.range', 'ooo', 0, batch * 2 - retention - 2) == []
assert len(r.execute_command('ts.range', 'ooo', '-', '+')) == retention + 1
# test for retention larger than timestamp
r.execute_command('ts.create', 'large', 'RETENTION', 1000000, 'DUPLICATE_POLICY', 'LAST')
assert r.execute_command('ts.add', 'large', 100, 0) == 100
assert r.execute_command('ts.add', 'large', 101, 0) == 101
assert r.execute_command('ts.add', 'large', 100, 0) == 100
def test_ooo_split(self):
with Env().getClusterConnectionIfNeeded() as r:
quantity = 5000
type_list = ['', 'UNCOMPRESSED']
for chunk_type in type_list:
r.execute_command('ts.create', 'split', chunk_type)
r.execute_command('ts.add', 'split', quantity, 42)
for i in range(quantity):
r.execute_command('ts.add', 'split', i, i * 1.01)
assert _get_ts_info(r, 'split').chunk_count in [13, 32]
res = r.execute_command('ts.range', 'split', '-', '+')
for i in range(quantity - 1):
assert res[i][0] + 1 == res[i + 1][0]
assert round(float(res[i][1]) + 1.01, 2) == round(float(res[i + 1][1]), 2)
r.execute_command('DEL', 'split')
def test_rand_oom(self):
random.seed(20)
start_ts = 1592917924000
current_ts = int(start_ts)
data = []
ooo_data = []
start_ooo = random.randrange(500, 9000)
amount = random.randrange(250, 1000)
for i in range(10000):
val = '%.5f' % random.gauss(50, 10.5)
if i < start_ooo or i > start_ooo + amount:
data.append([current_ts, val])
else:
ooo_data.append([current_ts, val])
current_ts += random.randrange(20, 1000)
with Env().getClusterConnectionIfNeeded() as r:
r.execute_command('ts.create', 'tester')
for sample in data:
r.execute_command('ts.add', 'tester', sample[0], sample[1])
for sample in ooo_data:
r.execute_command('ts.add', 'tester', sample[0], sample[1])
all_data = sorted(data + ooo_data, key=lambda x: x[0])
res = r.execute_command('ts.range', 'tester', '-', '+')
assert len(res) == len(all_data)
for i in range(len(all_data)):
assert all_data[i][0] == res[i][0]
assert float(all_data[i][1]) == float(res[i][1])
| StarcoderdataPython |
1741619 | <reponame>phigre/cobi
import torch
import numpy as np
import rasterio
import salem
from oggm import entity_task, cfg
from combine2d.core.sia2d_adapted import Upstream2D
import logging
# -------------------------------
# Further initialization / extended import tasks
# Module logger
log = logging.getLogger(__name__)
# @entity_task(log, writes=['dem_spinup', 'ice_thickness_spinup'])
@torch.no_grad()
def spinup(gdir, case, yr_spinup_end, mb=None):
"""
Performs a forward run on the DEM in gdir for given number of years.
Writes the found spinup_surface
Parameters
----------
gdir
case
yr_spinup_end
mb
Returns
-------
"""
with rasterio.open(gdir.get_filepath('dem')) as src:
bed = src.read(1)
profile = src.profile
spinup_surf = run_forward(gdir, case, yr_spinup_end, bed, mb=mb)
profile['dtype'] = 'float32'
with rasterio.open(gdir.get_filepath('spinup_dem'),
'w', **profile) as dst:
dst.write(spinup_surf, 1)
spinup_surf = salem.GeoTiff(gdir.get_filepath('spinup_dem')).get_vardata()
spinup_it = spinup_surf - bed
np.save(gdir.get_filepath('spinup_ice_thickness'), spinup_it)
return spinup_surf, spinup_it
@torch.no_grad()
def run_forward(gdir, case, yrs, bed, mb=None, init_ice_thick=None):
"""
Wrapper for run_forward_core. Can derive mass-balance from case, if not
given, accepts strings for bed_files as well as ndarrays and tensors
Parameters
----------
gdir: NonRGIGlacierDirectory
The GlacierDirectory containing the data
case: TestCase
Case to be run, giving dx and mb (if not specified)
yrs: float
yrs to run forward
bed: FloatTensor, ndarray, string or list/tuple of strings
either array/tensor of bed height (unit: [m]) or filename
(+filesuffix) of bed to be loaded from gdir (e.g. 'dem')
mb:
Mass-balance model
init_ice_thick: ndarray
optional, additional ice thickness for the model run
Returns
-------
ndarray of surface height (unit: [m])
"""
mb_model = mb
if mb is None:
mb_model = case.get_mb_model()
if isinstance(bed, np.ndarray) or isinstance(bed, torch.Tensor):
bed_arr = bed
elif isinstance(bed, str):
bed_arr = salem.GeoTiff(gdir.get_filepath(bed)).get_vardata()
elif isinstance(bed, (list, tuple)) and len(bed) == 2 and all(
isinstance(s, str) for s in bed):
bed_arr = salem.GeoTiff(
gdir.get_filepath(bed[0],filesuffix=bed[1])
).get_vardata()
else:
raise TypeError('Unexpected Type of argument "bed" in "run_forward"')
bed_h = torch.tensor(bed_arr, dtype=torch.float,
requires_grad=False)
ice_thick = init_ice_thick
if init_ice_thick is not None:
ice_thick = torch.tensor(ice_thick, dtype=torch.float,
requires_grad=False)
return run_forward_core(yrs, bed_h, case.dx, mb_model,
ice_thick).numpy()
def run_forward_core(yrs, bed, dx, mb_model, init_ice_thick):
"""
Parameters
----------
yrs: float
yrs to run forward
bed: FloatTensor
tensor of bed height (unit: [m])
dx: float
model resolution (unit: [m])
mb_model: MassBalanceModel
Mass-balance-model used in the model
init_ice_thick: ndarray
initial ice thickness, if None it will be set to 0 for the whole
domain (unit: [m])
Returns
-------
FloatTensor of surface height (unit: [m])
"""
model = Upstream2D(bed, dx=dx, mb_model=mb_model, y0=0,
glen_a=cfg.PARAMS['glen_a'], ice_thick_filter=None,
init_ice_thick=init_ice_thick)
model.run_until(yrs)
return model.surface_h
# @entity_task(log, writes=[''])
@torch.no_grad()
def create_glacier(gdir, run_spinup=True):
"""
Creates a DEM-file for a glacier surface by running a forward model
for spin-up to a first state and based on this state further on to a
next state
Parameters
----------
gdir: NonRGIGlacierDirectory
GlacierDirectory possibly containing spinup-state and used for
saving the final reference state
run_spinup: bool
whether to run spin-up or rely on existing state
Returns
-------
"""
inv_settings = gdir.inversion_settings
if run_spinup:
spinup(gdir, inv_settings['case'],
inv_settings['yrs_spinup'],
mb=inv_settings['mb_spinup'])
spinup_it = np.load(gdir.get_filepath('spinup_ice_thickness'))
spinup_surf = salem.GeoTiff(gdir.get_filepath('spinup_dem')).get_vardata()
with rasterio.open(gdir.get_filepath('dem')) as src:
bed = src.read(1)
profile = src.profile
ref_surf = run_forward(gdir, inv_settings['case'],
inv_settings['yrs_forward_run'], bed,
mb=inv_settings['mb_forward_run'],
init_ice_thick=spinup_it)
profile['dtype'] = 'float32'
with rasterio.open(gdir.get_filepath('ref_dem'),
'w', **profile) as dst:
dst.write(ref_surf, 1)
ref_surf = salem.GeoTiff(gdir.get_filepath('ref_dem')).get_vardata()
ref_it = ref_surf - bed
ref_ice_mask = ref_it > 0
np.save(gdir.get_filepath('ref_ice_thickness'), ref_it)
np.save(gdir.get_filepath('ref_ice_mask'), ref_ice_mask)
| StarcoderdataPython |
3384628 | <reponame>mann-brinson/LA_Apartments_Scraper
#!/usr/bin/env python
# coding: utf-8
import argparse
import sys
import sqlite3
import pandas as pd
import os
import shutil
import matplotlib.pylab as pylab
import matplotlib.pyplot as plt
import seaborn as sns
#GOAL: Run queries on la_apartments.db to return simple metrics about the database
def check_db():
'''
Initial function checks that the db exists before proceeding with queries.
If the db tables exist, will return the cursor object
'''
#Connect to the database
conn = sqlite3.connect('la_apartments.db')
cur = conn.cursor()
#Check to see that the neighborhood table exists before continuing
sql = "SELECT name FROM sqlite_master WHERE type='table' AND name='neighborhood'"
cur.execute(sql)
response = cur.fetchall()
if len(response) == 0:
print("The neighborhood table doesn't exist. You must first source the data remotely.")
sys.exit(0)
#Check to see that the apartment table exists before continuing
sql = "SELECT name FROM sqlite_master WHERE type='table' AND name='apartment'"
cur.execute(sql)
response = cur.fetchall()
if len(response) == 0:
print("The apartment table doesn't exist. You must first source the data remotely.")
sys.exit(0)
return cur
def query1():
cur = check_db()
#How many rows are in the neighborhood table?
sql = ('SELECT COUNT(*) '
'FROM neighborhood ')
cur.execute(sql)
response = cur.fetchall()
return 'neighborhood table rows:', response[0][0]
def query2():
cur = check_db()
#How many rows are in the apartment table?
sql = ('SELECT COUNT(*) '
'FROM apartment ')
cur.execute(sql)
response = cur.fetchall()
return 'apartment table rows:', response[0][0]
def query3():
cur = check_db()
#What are the prices, bedrooms, and bathrooms of the 5 apartments with lowest square footage
sql = ('SELECT craigslist_id, sq_feet, price, bedrooms, bathrooms '
'FROM '
'(SELECT * '
'FROM apartment '
'ORDER BY sq_feet asc) apartment '
'LIMIT 5 ')
cur.execute(sql)
response = cur.fetchall()
response_df = pd.DataFrame(response, columns=['craigslist_id', 'sq_feet', 'price', 'bedrooms', 'bathrooms'])
print('Top 5 Smallest Apartments:')
print(response_df)
def query4():
cur = check_db()
#What are the 5 Los Angeles census tracts, and its neighborhood that had the highest rent in 2015?
sql = ('SELECT fips_hood, neighborhood, year, avg_rent, sqmi, population '
'FROM '
'(SELECT * '
'FROM neighborhood '
'WHERE year = 2015 '
'ORDER BY avg_rent desc) neighborhood '
'LIMIT 5 ')
cur.execute(sql)
response = cur.fetchall()
response_df = pd.DataFrame(response, columns=['tract_id', 'neighborhood', 'year', 'avg_rent', 'sqmi', 'population'])
print('Top 5 Most Expensive Census Tracts:')
print(response_df)
def query5():
cur = check_db()
#Which neighborhood tracts have the most homeless people?
sql = ('SELECT year, fips_hood, neighborhood, homeless_persons '
'FROM neighborhood '
'WHERE neighborhood.year = 2018 '
'ORDER BY homeless_persons desc '
'LIMIT 5')
cur.execute(sql)
response = cur.fetchall()
response_df = pd.DataFrame(response, columns=['year', 'tract', 'neighborhood', 'homeless_persons'])
print('Top 5 tracts with the most homeless persons:')
print(response_df)
def query6():
cur = check_db()
#What apartments had the lowest price per square foot, and in what neighborhoods?
sql = ('SELECT neighborhood, price_per_sqfoot, price, sq_feet, bedrooms, bathrooms, url '
'FROM '
'(SELECT tract_id, bedrooms, bathrooms, substr(price,2) price, sq_feet, round((CAST(substr(price,2) AS FLOAT)/sq_feet), 2) price_per_sqfoot, url '
'FROM apartment '
'ORDER BY price_per_sqfoot asc) price_per_sqfoot '
'JOIN neighborhood on price_per_sqfoot.tract_id = neighborhood.fips_hood '
'WHERE year = 2016 '
'ORDER BY price_per_sqfoot asc '
'LIMIT 5'
)
cur.execute(sql)
response = cur.fetchall()
response_df = pd.DataFrame(response, columns=['neighborhood', 'price_per_sqfoot', 'price', 'sq_feet', 'bedrooms', 'bathrooms', 'url'])
print('Top 5 Best Value Per Square Foot Apartments:')
print(response_df)
def query7():
cur = check_db()
#For the apartments, which apartments are in neighborhoods with the least amount of homeless people?
sql = ('SELECT apartment.id, apartment.bedrooms, apartment.price, neighborhood.fips_hood, neighborhood.neighborhood, neighborhood.year, neighborhood.homeless_persons '
'FROM apartment '
'LEFT JOIN neighborhood on apartment.tract_id = neighborhood.fips_hood '
'WHERE neighborhood.year = 2018 '
'ORDER BY neighborhood.homeless_persons asc '
'LIMIT 5'
)
cur.execute(sql)
response = list(cur.fetchall())
response_df = pd.DataFrame(response, columns=['apt_id', 'bedrooms', 'price', 'tract', 'neighborhood', 'year', 'homeless_persons'])
print('Top 5 Apartments with the Least Homeless Persons Nearby:')
print(response_df)
def query8():
cur = check_db()
#For the apartments, which apartments are in neighborhoods with the most amount of homeless people?
sql = ('SELECT apartment.id, apartment.bedrooms, apartment.price, neighborhood.fips_hood, neighborhood.neighborhood, neighborhood.year, neighborhood.homeless_persons '
'FROM apartment '
'LEFT JOIN neighborhood on apartment.tract_id = neighborhood.fips_hood '
'WHERE neighborhood.year = 2018 '
'ORDER BY neighborhood.homeless_persons desc '
'LIMIT 5'
)
cur.execute(sql)
response = list(cur.fetchall())
response_df = pd.DataFrame(response, columns=['apt_id', 'bedrooms', 'price', 'tract', 'neighborhood', 'year', 'homeless_persons'])
print('Apartments with the Most Homeless Persons Nearby:')
print(response_df)
def query9():
cur = check_db()
#Which neighborhood tracts in Hollywood that have the least homeless people per square mile?
sql = ('SELECT fips_hood, neighborhood, year, homeless_persons, sqmi, round((homeless_persons/sqmi),0) homeless_per_sqmi '
'FROM neighborhood '
'WHERE year = 2018 AND neighborhood = "Hollywood" AND homeless_persons > 0 AND sqmi != "None" '
'ORDER BY homeless_per_sqmi asc '
'LIMIT 5')
cur.execute(sql)
response = list(cur.fetchall())
response_df = pd.DataFrame(response, columns=['tract', 'neighborhood', 'year', 'homeless_persons', 'sqmi', 'homeless_per_sqmi'])
print('Tracts in Hollywood with the least homeless per square mile:')
print(response_df)
def query10():
cur = check_db()
#What apartments are in neighborhoods with the least homeless per sqmi?
sql = ('SELECT apartment.id, apartment.bedrooms, apartment.price, neighborhood.fips_hood, neighborhood.neighborhood, neighborhood.year, round((neighborhood.homeless_persons/neighborhood.sqmi),0) homeless_per_sqmi '
'FROM apartment '
'LEFT JOIN neighborhood on apartment.tract_id = neighborhood.fips_hood '
'WHERE neighborhood.year = 2018 AND homeless_persons > 0 AND sqmi != "None" '
'ORDER BY homeless_per_sqmi asc '
'LIMIT 5'
)
cur.execute(sql)
response = list(cur.fetchall())
response_df = pd.DataFrame(response, columns=['apt_id', 'bedrooms', 'price', 'tract', 'neighborhood', 'year', 'homeless_per_sqmi'])
print('Apartments in tracts with the least homeless per square mile:')
print(response_df) | StarcoderdataPython |
137047 | from keeks.binary_strategies.base import BaseStrategy
__author__ = 'willmcginnis'
class NaiveStrategy(BaseStrategy):
def __init__(self, payoff, loss, transaction_cost):
"""
The Naive strategy returns full portion of bet if expected value is above transaction costs at all.
:param payoff:
:param loss:
:param transaction_cost:
"""
self.payoff = payoff
self.loss = loss
self.transaction_cost = transaction_cost
def evaluate(self, probability):
E = (self.payoff * probability) - (self.loss * (1 - probability)) - self.transaction_cost
E_neg = (self.payoff * (1 - probability)) - (self.loss * probability) - self.transaction_cost
if E > 0:
return 1.0
elif E_neg > 0:
return -1.0
else:
return 0.0
| StarcoderdataPython |
3247162 | import heapq
class KthLargest(object):
def __init__(self, k, nums):
"""
:type k: int
:type nums: List[int]
"""
self.heap = []
self.k = k
for i in range(len(nums)):
self.add(nums[i])
def add(self, val):
"""
:type val: int
:rtype: int
"""
if self.k == 0:
if self.heap[0] <= val:
heapq.heapreplace(self.heap, val)
return self.heap[0]
else:
self.k = self.k - 1
heapq.heappush(self.heap, val)
if self.k == 0:
return self.heap[0]
else:
return None
k = 3
arr = [4, 5, 8, 2]
c = KthLargest(k, arr)
c.add(3)
c.add(5)
c.add(10)
| StarcoderdataPython |
1666323 | def old_test_single_annot_distinctiveness_params(ibs, aid):
r"""
CommandLine:
python -m ibeis.model.hots.distinctiveness_normalizer --test-old_test_single_annot_distinctiveness_params --show
python -m ibeis.model.hots.distinctiveness_normalizer --test-old_test_single_annot_distinctiveness_params --show --db GZ_ALL
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.model.hots.distinctiveness_normalizer import * # NOQA
>>> import plottool as pt
>>> import ibeis
>>> # build test data
>>> ibs = ibeis.opendb(ut.get_argval('--db', type_=str, default='PZ_MTEST'))
>>> aid = ut.get_argval('--aid', type_=int, default=1)
>>> # execute function
>>> old_test_single_annot_distinctiveness_params(ibs, aid)
>>> pt.show_if_requested()
"""
####
# TODO: Also paramatarize the downweighting based on the keypoint size
####
# HACK IN ABILITY TO SET CONFIG
from ibeis.dev.main_commands import postload_commands
postload_commands(ibs, None)
from vtool import coverage_image
import plottool as pt
from plottool import interact_impaint
#cfglbl_list = cfgdict_list
#ut.all_dict_combinations_lbls(varied_dict)
# Get info to find distinctivness of
species_text = ibs.get_annot_species(aid)
vecs = ibs.get_annot_vecs(aid)
kpts = ibs.get_annot_kpts(aid)
print(kpts)
chip = ibs.get_annot_chips(aid)
chipsize = ibs.get_annot_chipsizes(aid)
# Paramater space to search
# TODO: use slicing to control the params being varied
# Use GridSearch class to modify paramaters as you go.
gauss_patch_varydict = {
'gauss_shape': [(7, 7), (19, 19), (41, 41), (5, 5), (3, 3)],
'gauss_sigma_frac': [.2, .5, .7, .95],
}
cov_blur_varydict = {
'cov_blur_on': [True, False],
'cov_blur_ksize': [(5, 5,), (7, 7), (17, 17)],
'cov_blur_sigma': [5.0, 1.2],
}
dstncvs_varydict = {
'dcvs_power': [.01, .1, .5, 1.0],
'dcvs_clip_max': [.05, .1, .2, .5],
'dcvs_K': [2, 3, 5],
}
size_penalty_varydict = {
'remove_affine_information': [False, True],
'constant_scaling': [False, True],
'size_penalty_on': [True, False],
'size_penalty_power': [.5, .1, 1.0],
'size_penalty_scale': [.1, 1.0],
}
keyval_iter = ut.iflatten([
dstncvs_varydict.items(),
gauss_patch_varydict.items(),
cov_blur_varydict.items(),
size_penalty_varydict.items(),
])
# Dont vary most paramaters, specify how much of their list can be used
param_slice_dict = {
'dcvs_power' : slice(0, 2),
'dcvs_K' : slice(0, 2),
'dcvs_clip_max' : slice(0, 2),
'dcvs_clip_max' : slice(0, 2),
#'gauss_shape' : slice(0, 3),
'gauss_sigma_frac' : slice(0, 2),
'remove_affine_information' : slice(0, 2),
'constant_scaling' : slice(0, 2),
'size_penalty_on' : slice(0, 2),
#'cov_blur_on' : slice(0, 2),
#'cov_blur_ksize' : slice(0, 2),
#'cov_blur_sigma' : slice(0, 1),
#'size_penalty_power' : slice(0, 2),
#'size_penalty_scale' : slice(0, 2),
}
varied_dict = {
key: val[param_slice_dict.get(key, slice(0, 1))]
for key, val in keyval_iter
}
def consmonitor_config(cfg):
""" encode what makes a configuration feasible """
if cfg['cov_blur_on'] is False:
cfg['cov_blur_ksize'] = None
cfg['cov_blur_sigma'] = None
if cfg['constant_scaling'] is True:
cfg['remove_affine_information'] = True
cfg['size_penalty_on'] = False
if cfg['remove_affine_information'] is True:
cfg['gauss_shape'] = (41, 41)
if cfg['size_penalty_on'] is False:
cfg['size_penalty_power'] = None
cfg['size_penalty_scale'] = None
print('Varied Dict: ')
print(ut.dict_str(varied_dict))
cfgdict_list, cfglbl_list = ut.make_constrained_cfg_and_lbl_list(varied_dict, consmonitor_config)
# Get groundtruthish distinctivness map
# for objective function
GT_IS_DSTNCVS = 255
GT_NOT_DSTNCVS = 100
GT_UNKNOWN = 0
label_colors = [GT_IS_DSTNCVS, GT_NOT_DSTNCVS, GT_UNKNOWN]
gtmask = interact_impaint.cached_impaint(chip, 'dstncvnss',
label_colors=label_colors,
aug=True, refine=ut.get_argflag('--refine'))
true_dstncvs_mask = gtmask == GT_IS_DSTNCVS
false_dstncvs_mask = gtmask == GT_NOT_DSTNCVS
true_dstncvs_mask_sum = true_dstncvs_mask.sum()
false_dstncvs_mask_sum = false_dstncvs_mask.sum()
def distinctiveness_objective_function(dstncvs_mask):
true_mask = true_dstncvs_mask * dstncvs_mask
false_mask = false_dstncvs_mask * dstncvs_mask
true_score = true_mask.sum() / true_dstncvs_mask_sum
false_score = false_mask.sum() / false_dstncvs_mask_sum
score = true_score * (1 - false_score)
return score
# Load distinctivness normalizer
with ut.Timer('Loading Distinctivness Normalizer for %s' % (species_text)):
dstcvnss_normer = request_species_distinctiveness_normalizer(species_text)
# Get distinctivness over all params
dstncvs_list = [dstcvnss_normer.get_distinctiveness(vecs, **cfgdict)
for cfgdict in ut.ProgressIter(cfgdict_list, lbl='get dstcvns')]
# Then compute the distinctinvess coverage map
#gauss_shape = kwargs.get('gauss_shape', (19, 19))
#sigma_frac = kwargs.get('sigma_frac', .3)
dstncvs_mask_list = [
coverage_image.make_coverage_mask(
kpts, chipsize, fx2_score=dstncvs, mode='max', return_patch=False, **cfg)
for cfg, dstncvs in ut.ProgressIter(zip(cfgdict_list, dstncvs_list), lbl='Warping Image')
]
score_list = [distinctiveness_objective_function(dstncvs_mask) for dstncvs_mask in dstncvs_mask_list]
fnum = 1
def show_covimg_result(img, fnum=None, pnum=None):
pt.imshow(255 * img, fnum=fnum, pnum=pnum)
ut.interact_gridsearch_result_images(
show_covimg_result, cfgdict_list, cfglbl_list, dstncvs_mask_list,
score_list=score_list, fnum=fnum, figtitle='dstncvs gridsearch')
# Show subcomponents of grid search
gauss_patch_cfgdict_list, gauss_patch_cfglbl_list = ut.get_cfgdict_lbl_list_subset(cfgdict_list, gauss_patch_varydict)
patch_list = [coverage_image.get_gaussian_weight_patch(**cfgdict)
for cfgdict in ut.ProgressIter(gauss_patch_cfgdict_list, lbl='patch cfg')]
ut.interact_gridsearch_result_images(
show_covimg_result, gauss_patch_cfgdict_list, gauss_patch_cfglbl_list,
patch_list, fnum=fnum + 1, figtitle='gaussian patches')
patch = patch_list[0]
# Show the first mask in more depth
dstncvs = dstncvs_list[0]
dstncvs_mask = dstncvs_mask_list[0]
coverage_image.show_coverage_map(chip, dstncvs_mask, patch, kpts, fnum=fnum + 2, ell_alpha=.2, show_mask_kpts=False)
pt.imshow(gtmask, fnum=fnum + 3, pnum=(1, 2, 1), title='ground truth distinctiveness')
pt.imshow(chip, fnum=fnum + 3, pnum=(1, 2, 2))
pt.present()
#pt.iup()
#ut.print_resource_usage()
#pt.set_figtitle(mode)
#pass
#def test_example():
# import scipy.linalg as spl
# M = np.array([
# [1.0, 0.6, 0. , 0. , 0. ],
# [0.6, 1.0, 0.5, 0.2, 0. ],
# [0. , 0.5, 1.0, 0. , 0. ],
# [0. , 0.2, 0. , 1.0, 0.8],
# [0. , 0. , 0. , 0.8, 1.0],
# ])
# M_ = M / M.sum(axis=0)[:, None]
# #eigvals, eigvecs = np.linalg.eigh(M_)
# #, left=True, right=False)
# eigvals, eigvecs = spl.eig(M_, left=True, right=False)
# index = np.where(np.isclose(eigvals, 1))[0]
# pi = stationary_vector = eigvecs.T[index]
# pi_test = pi.dot(M_)
# pi / pi.sum()
# print(pi / np.linalg.norm(pi))
# print(pi_test / np.linalg.norm(pi_test))
# M = np.array([
# [1.0, 0.6],
# [0.6, 1.0],
# ])
# M_ = M / M.sum(axis=0)[:, None]
# #eigvals, eigvecs = np.linalg.eigh(M_)
# #, left=True, right=False)
# eigvals, eigvecs = spl.eig(M_, left=True, right=False)
# index = np.where(np.isclose(eigvals, 1))[0]
# pi = stationary_vector = eigvecs.T[index]
# pi_test = pi.dot(M_)
# pi / pi.sum()
# print(pi / np.linalg.norm(pi))
# print(pi_test / np.linalg.norm(pi_test))
# #pi = pi / pi.sum()
| StarcoderdataPython |
3336248 | class ExportError(Exception):
pass
| StarcoderdataPython |
4821698 | <reponame>uninassau-2020-2/proj-grupo5
from app.models.DAO import DAOFornecedorPJ
import pymysql
from app import app
from config import mysql
from flask import jsonify
from flask import flash, request
from app.models.classes_basicas.FornecedorPJ import FornecedorPJ
def add_fornecedorpj(f):
try:
return DAOFornecedorPJ.add_fornecedorpj(f)
except Exception as ex:
print(ex)
def listarFornecedorespj():
try:
return DAOFornecedorPJ.listarFornecedorespj()
except Exception as ex:
print(ex)
def getFornecedorPJById(id):
try:
return DAOFornecedorPJ.getFornecedorPJById(id)
except Exception as ex:
print(ex)
def update_fornecedorpj(f):
try:
return DAOFornecedorPJ.update_fornecedorpj(f)
except Exception as ex:
print(ex)
def delete_fornecedorpj(id):
try:
return DAOFornecedorPJ.delete_fornecedorpj(id)
except Exception as ex:
print(ex)
| StarcoderdataPython |
3222466 | <reponame>DogukanKundum/Classification-tweets<filename>test10.py<gh_stars>1-10
import sys
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
pd.options.mode.chained_assignment = None
import numpy as np
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
import gensim
from gensim.models.word2vec import Word2Vec # the word2vec model gensim class
LabeledSentence = gensim.models.doc2vec.LabeledSentence # we'll talk about this down below
from tqdm import tqdm
tqdm.pandas(desc="progress-bar")
from nltk.tokenize import TweetTokenizer # a tweet tokenizer from nltk.
tokenizer = TweetTokenizer()
# sklearn
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
# zemberek
import jpype
jpype.startJVM(jpype.getDefaultJVMPath(),
"-Djava.class.path=zemberek_jar/zemberek-tum-2.0.jar", "-ea")
# Türkiye Türkçesine göre çözümlemek için gerekli sınıfı hazırla
Tr = jpype.JClass("net.zemberek.tr.yapi.TurkiyeTurkcesi")
# tr nesnesini oluştur
tr = Tr()
# Zemberek sınıfını yükle
Zemberek = jpype.JClass("net.zemberek.erisim.Zemberek")
# zemberek nesnesini oluştur
zemberek = Zemberek(tr)
def ingest():
data = pd.read_csv("data/tweets_turktelekom_result.csv", header=0, delimiter="•", quoting=3, encoding = 'utf8')
data = data[data.TWEETS.isnull() == False]
data['RESULT'] = data['RESULT'].map(int)
data = data[data['TWEETS'].isnull() == False]
# data.reset_index(inplace=True)
# data.drop('index', axis=1, inplace=True)
print('dataset loaded with shape', data.shape)
return data
data = ingest()
def tokenize(tweet):
try:
tweet = tweet.lower()
tokens = tokenizer.tokenize(tweet)
tokens = [x for x in tokens if not x.startswith('@')]
tokens = [x for x in tokens if not x.startswith('#')]
tokens = [x for x in tokens if not x.startswith('http')]
tokens = [x for x in tokens if not x.startswith('.')]
tokens = [x for x in tokens if not x.startswith('(')]
tokens = [x for x in tokens if not x.startswith(')')]
tokens = [x for x in tokens if not x == ',']
tokens = [x for x in tokens if not x == '?']
tokens = [x for x in tokens if not x == '!']
tokens = [x for x in tokens if not x == ':']
tokens = [x for x in tokens if not x == ';']
tokens = [x for x in tokens if not x == '"']
tokens = [x for x in tokens if not x == "'"]
return tokens
except:
print("Unexpected error:", sys.exc_info()[0])
return 'NC'
def tokenizeWithZemberek(tweet):
try:
tweet = tweet.lower()
tokens = tokenizer.tokenize(tweet)
tokens = [x for x in tokens if not x.startswith('@')]
tokens = [x for x in tokens if not x.startswith('#')]
tokens = [x for x in tokens if not x.startswith('http')]
tokens = [x for x in tokens if not x.startswith('.')]
tokens = [x for x in tokens if not x.startswith('(')]
tokens = [x for x in tokens if not x.startswith(')')]
tokens = [x for x in tokens if not x == ',']
tokens = [x for x in tokens if not x == '?']
tokens = [x for x in tokens if not x == '!']
tokens = [x for x in tokens if not x == ':']
tokens = [x for x in tokens if not x == ';']
tokens = [x for x in tokens if not x == '"']
tokens = [x for x in tokens if not x == "'"]
zemberekTokens = []
for token in tokens:
if token.strip() > '':
zemberekToken = zemberek.kelimeCozumle(token)
if zemberekToken:
zemberekTokens.append(zemberekToken[0].kok().icerik())
else:
zemberekTokens.append(token)
return zemberekTokens
except:
print("Unexpected error:", sys.exc_info()[0])
return 'NC'
def postprocess(data, n=1000000):
data = data.head(n)
data['tokens'] = data['TWEETS'].progress_map(
tokenize) ## progress_map is a variant of the map function plus a progress bar. Handy to monitor DataFrame creations.
data['zemberekTokens'] = data['TWEETS'].progress_map(tokenizeWithZemberek)
data = data[data.tokens != 'NC']
data.reset_index(inplace=True)
data.drop('index', inplace=True, axis=1)
return data
data = postprocess(data)
x_train, x_test, y_train, y_test = train_test_split(np.array(data.tokens),
np.array(data.RESULT), test_size=0.2)
x_train_zemberek, x_test_zemberek, y_train_zemberek, y_test_zemberek = train_test_split(np.array(data.zemberekTokens),
np.array(data.RESULT),
test_size=0.2)
def labelizeTweets(tweets, label_type):
labelized = []
for i, v in tqdm(enumerate(tweets)):
label = '%s_%s' % (label_type, i)
labelized.append(LabeledSentence(v, [label]))
return labelized
x_train = labelizeTweets(x_train, 'TRAIN')
x_test = labelizeTweets(x_test, 'TEST')
x_train_zemberek = labelizeTweets(x_train_zemberek, 'TRAIN')
x_test_zemberek = labelizeTweets(x_test_zemberek, 'TEST')
n_dims = [50, 75, 100, 125, 150, 175, 200, 225, 250, 275, 300]
n_dim = 75
tweet_w2v = Word2Vec(size=n_dim, min_count=3, hs=1, window=7, iter=75, sg=0)
tweet_w2v.build_vocab([x.words for x in tqdm(x_train)])
tweet_w2v.train([x.words for x in tqdm(x_train)], total_examples=tweet_w2v.corpus_count, epochs=tweet_w2v.iter)
tweet_w2v_zemberek = Word2Vec(size=n_dim, min_count=3, hs=1, window=7, iter=75, sg=0)
tweet_w2v_zemberek.build_vocab([x.words for x in tqdm(x_train_zemberek)])
tweet_w2v_zemberek.train([x.words for x in tqdm(x_train_zemberek)], total_examples=tweet_w2v_zemberek.corpus_count,
epochs=tweet_w2v_zemberek.iter)
tweet_w2v_sg = Word2Vec(size=n_dim, min_count=3, hs=1, window=7, iter=75, sg=1)
tweet_w2v_sg.build_vocab([x.words for x in tqdm(x_train)])
tweet_w2v_sg.train([x.words for x in tqdm(x_train)], total_examples=tweet_w2v_sg.corpus_count, epochs=tweet_w2v_sg.iter)
tweet_w2v_zemberek_sg = Word2Vec(size=n_dim, min_count=3, hs=1, window=7, iter=75, sg=1)
tweet_w2v_zemberek_sg.build_vocab([x.words for x in tqdm(x_train_zemberek)])
tweet_w2v_zemberek_sg.train([x.words for x in tqdm(x_train_zemberek)],
total_examples=tweet_w2v_zemberek_sg.corpus_count, epochs=tweet_w2v_zemberek_sg.iter)
print('building tf-idf matrix ...')
vectorizer = TfidfVectorizer(analyzer=lambda x: x, min_df=10)
matrix = vectorizer.fit_transform([x.words for x in x_train])
tfidf = dict(zip(vectorizer.get_feature_names(), vectorizer.idf_))
print('vocab size :', len(tfidf))
helmotzDic = {}
helmotzZemberekDic = {}
helmotzDocumentDics = []
helmotzZemberekDocumentDics = []
for index, row in data.iterrows():
dic = {}
zemberekDic = {}
for token in row['tokens']:
if token in dic:
dic[token] = dic[token] + 1
else:
dic[token] = 1
if token in helmotzDic:
helmotzDic[token] = helmotzDic[token] + 1
else:
helmotzDic[token] = 1
for token in row['zemberekTokens']:
if token in zemberekDic:
zemberekDic[token] = zemberekDic[token] + 1
else:
zemberekDic[token] = 1
if token in helmotzZemberekDic:
helmotzZemberekDic[token] = helmotzZemberekDic[token] + 1
else:
helmotzZemberekDic[token] = 1
helmotzDocumentDics.append(dic)
helmotzZemberekDocumentDics.append(zemberekDic)
# In[11]:
print(helmotzDic)
def buildWordVector(tokens, size, multiplyBy, isZemberek, isSg):
vec = np.zeros(size).reshape((1, size))
count = 0.
for word in tokens:
try:
if multiplyBy == "helmotz":
if isZemberek:
if isSg:
vec += tweet_w2v_zemberek_sg[word].reshape((1, size)) * tfidf[word]
else:
vec += tweet_w2v_zemberek[word].reshape((1, size)) * tfidf[word]
else:
if isSg:
vec += tweet_w2v_sg[word].reshape((1, size)) * helmotzDic[word]
else:
vec += tweet_w2v[word].reshape((1, size)) * helmotzDic[word]
else:
if isZemberek:
if isSg:
vec += tweet_w2v_zemberek_sg[word].reshape((1, size))
else:
vec += tweet_w2v_zemberek[word].reshape((1, size))
else:
if isSg:
vec += tweet_w2v_zemberek_sg[word].reshape((1, size))
else:
vec += tweet_w2v_sg[word].reshape((1, size))
count += 1.
except KeyError: # handling the case where the token is not
# in the corpus. useful for testing.
continue
if count != 0:
vec /= count
return vec
# In[28]:
# without helmotz
train_vecs_w2v = np.concatenate(
[buildWordVector(z, n_dim, "none", False, False) for z in tqdm(map(lambda x: x.words, x_train))])
train_vecs_w2v = scale(train_vecs_w2v)
test_vecs_w2v = np.concatenate(
[buildWordVector(z, n_dim, "none", False, False) for z in tqdm(map(lambda x: x.words, x_test))])
test_vecs_w2v = scale(test_vecs_w2v)
# without helmotz but with zemberek
train_vecs_w2v_zemberek = np.concatenate(
[buildWordVector(z, n_dim, "none", True, False) for z in tqdm(map(lambda x: x.words, x_train_zemberek))])
train_vecs_w2v_zemberek = scale(train_vecs_w2v_zemberek)
test_vecs_w2v_zemberek = np.concatenate(
[buildWordVector(z, n_dim, "none", True, False) for z in tqdm(map(lambda x: x.words, x_test_zemberek))])
test_vecs_w2v_zemberek = scale(test_vecs_w2v_zemberek)
# with helmotz
train_vecs_w2v_helmotz = np.concatenate(
[buildWordVector(z, n_dim, "helmotz", False, False) for z in tqdm(map(lambda x: x.words, x_train))])
train_vecs_w2v_helmotz = scale(train_vecs_w2v_helmotz)
test_vecs_w2v_helmotz = np.concatenate(
[buildWordVector(z, n_dim, "helmotz", False, False) for z in tqdm(map(lambda x: x.words, x_test))])
test_vecs_w2v_helmotz = scale(test_vecs_w2v_helmotz)
# with helmotz and zemberek
train_vecs_w2v_zemberek_helmotz = np.concatenate(
[buildWordVector(z, n_dim, "helmotz", True, False) for z in tqdm(map(lambda x: x.words, x_train_zemberek))])
train_vecs_w2v_zemberek_helmotz = scale(train_vecs_w2v_zemberek_helmotz)
test_vecs_w2v_zemberek_helmotz = np.concatenate(
[buildWordVector(z, n_dim, "helmotz", True, False) for z in tqdm(map(lambda x: x.words, x_test_zemberek))])
test_vecs_w2v_zemberek_helmotz = scale(test_vecs_w2v_zemberek_helmotz)
# without helmotz sg
train_vecs_w2v_sg = np.concatenate(
[buildWordVector(z, n_dim, "none", False, True) for z in tqdm(map(lambda x: x.words, x_train))])
train_vecs_w2v_sg = scale(train_vecs_w2v_sg)
test_vecs_w2v_sg = np.concatenate(
[buildWordVector(z, n_dim, "none", False, True) for z in tqdm(map(lambda x: x.words, x_test))])
test_vecs_w2v_sg = scale(test_vecs_w2v_sg)
# without helmotz but with zemberek and sg
train_vecs_w2v_zemberek_sg = np.concatenate(
[buildWordVector(z, n_dim, "none", True, True) for z in tqdm(map(lambda x: x.words, x_train_zemberek))])
train_vecs_w2v_zemberek_sg = scale(train_vecs_w2v_zemberek_sg)
test_vecs_w2v_zemberek_sg = np.concatenate(
[buildWordVector(z, n_dim, "none", True, True) for z in tqdm(map(lambda x: x.words, x_test_zemberek))])
test_vecs_w2v_zemberek_sg = scale(test_vecs_w2v_zemberek_sg)
# with helmotz and sg
train_vecs_w2v_helmotz_sg = np.concatenate(
[buildWordVector(z, n_dim, "helmotz", False, True) for z in tqdm(map(lambda x: x.words, x_train))])
train_vecs_w2v_helmotz_sg = scale(train_vecs_w2v_helmotz_sg)
test_vecs_w2v_helmotz_sg = np.concatenate(
[buildWordVector(z, n_dim, "helmotz", False, True) for z in tqdm(map(lambda x: x.words, x_test))])
test_vecs_w2v_helmotz_sg = scale(test_vecs_w2v_helmotz_sg)
# with helmotz,zemberek and sg
train_vecs_w2v_zemberek_helmotz_sg = np.concatenate(
[buildWordVector(z, n_dim, "helmotz", True, True) for z in tqdm(map(lambda x: x.words, x_train_zemberek))])
train_vecs_w2v_zemberek_helmotz_sg = scale(train_vecs_w2v_zemberek_helmotz_sg)
test_vecs_w2v_zemberek_helmotz_sg = np.concatenate(
[buildWordVector(z, n_dim, "helmotz", True, True) for z in tqdm(map(lambda x: x.words, x_test_zemberek))])
test_vecs_w2v_zemberek_helmotz_sg = scale(test_vecs_w2v_zemberek_helmotz_sg)
# In[29]:
classifiers = ["SVM", "NaiveBayes", "DecisionTree", "MLP", "KNN"]
helmotz = [0, 1] # without weighting, helmotz
zembereks = [0, 1]
sgs = [0, 1]
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
for sg in sgs:
for hm in helmotz:
for zemberek in zembereks:
for classifier in classifiers:
cla = None
if classifier == "SVM":
cla = svm.SVC()
if classifier == "NaiveBayes":
cla = GaussianNB()
if classifier == "DecisionTree":
cla = DecisionTreeClassifier()
if classifier == "MLP":
cla = MLPClassifier()
if classifier == "KNN":
cla = KNeighborsClassifier()
if sg == 0:
if hm == 0 and zemberek == 0:
cla.fit(train_vecs_w2v, y_train.astype('int'))
y_pred = cla.predict(test_vecs_w2v)
score = accuracy_score(y_test.astype('int'), y_pred)
reports = classification_report(y_test.astype('int'), y_pred)
matrix = confusion_matrix(y_test.astype('int'), y_pred)
elif hm == 0 and zemberek == 1:
cla.fit(train_vecs_w2v_zemberek, y_train_zemberek.astype('int'))
y_pred = cla.predict(test_vecs_w2v_zemberek)
score = accuracy_score(y_test.astype('int'), y_pred)
reports = classification_report(y_test.astype('int'), y_pred)
matrix = confusion_matrix(y_test.astype('int'), y_pred)
elif hm == 1 and zemberek == 0:
cla.fit(train_vecs_w2v_helmotz, y_train.astype('int'))
y_pred = cla.predict(test_vecs_w2v_helmotz)
score = accuracy_score(y_test.astype('int'), y_pred)
reports = classification_report(y_test.astype('int'), y_pred)
matrix = confusion_matrix(y_test.astype('int'), y_pred)
elif hm == 1 and zemberek == 1:
cla.fit(train_vecs_w2v_zemberek_helmotz, y_train_zemberek.astype('int'))
y_pred = cla.predict(test_vecs_w2v_zemberek_helmotz)
score = accuracy_score(y_test.astype('int'), y_pred)
reports = classification_report(y_test.astype('int'), y_pred)
matrix = confusion_matrix(y_test.astype('int'), y_pred)
if sg == 1:
if hm == 0 and zemberek == 0:
cla.fit(train_vecs_w2v_sg, y_train.astype('int'))
y_pred = cla.predict(test_vecs_w2v_sg)
score = accuracy_score(y_test.astype('int'), y_pred)
reports = classification_report(y_test.astype('int'), y_pred)
matrix = confusion_matrix(y_test.astype('int'), y_pred)
elif hm == 0 and zemberek == 1:
cla.fit(train_vecs_w2v_zemberek_sg, y_train_zemberek.astype('int'))
y_pred = cla.predict(test_vecs_w2v_zemberek_sg)
score = accuracy_score(y_test.astype('int'), y_pred)
reports = classification_report(y_test.astype('int'), y_pred)
matrix = confusion_matrix(y_test.astype('int'), y_pred)
elif hm == 1 and zemberek == 0:
cla.fit(train_vecs_w2v_helmotz_sg, y_train.astype('int'))
y_pred = cla.predict(test_vecs_w2v_helmotz_sg)
score = accuracy_score(y_test.astype('int'), y_pred)
reports = classification_report(y_test.astype('int'), y_pred)
matrix = confusion_matrix(y_test.astype('int'), y_pred)
elif hm == 1 and zemberek == 1:
cla.fit(train_vecs_w2v_zemberek_helmotz_sg, y_train_zemberek.astype('int'))
y_pred = cla.predict(test_vecs_w2v_zemberek_helmotz_sg)
score = accuracy_score(y_test.astype('int'), y_pred)
reports = classification_report(y_test.astype('int'), y_pred)
matrix = confusion_matrix(y_test.astype('int'), y_pred)
print(classifier, " (helmotz=", hm, ") ( sg =", sg, " ) ( zemberek=", zemberek, "):", score)
print(classifier, " (helmotz=", hm, ") ( sg =", sg, " ) ( zemberek=", zemberek, "):", reports)
print(classifier, " (helmotz=", hm, ") ( sg =", sg, " ) ( zemberek=", zemberek, "):", matrix)
print("--------------------------")
| StarcoderdataPython |
1603722 | #!/usr/bin/env python3
##########################################
## EKF source switch ##
##########################################
from mycelium.components import Base, Connector
class Switch(Base):
def __init__(self,
connection_string=None, # Port set in mavproxy/mavlink to send MAVLINK messages
connection_baudrate=None):
super().__init__()
self.connection_string = connection_string
self.connection_baudrate = connection_baudrate
if self.connection_string is None:
self.connection_string = self.cfg.mavlink_msg_direct
if self.connection_baudrate is None:
self.connection_baudrate = self.cfg.connection_baudrate
class EKFSwitch(Switch):
'''implements switching between GPS and non-GPS source, and fusing all sources
'''
EKF_GPS_ONLY = 1
EKF_VICON_ONLY = 2
EKF_FUSE_SOURCES = 3
sources = [EKF_GPS_ONLY, EKF_VICON_ONLY, EKF_FUSE_SOURCES]
rc_pwm = {
EKF_GPS_ONLY: 1000,
EKF_VICON_ONLY: 1500
}
def __init__(self,
connection_string=None, # Port set in mavproxy/mavlink to send MAVLINK messages
connection_baudrate=None,
rc_channel_id=None): # This is the rc channel for pwm input. https://github.com/ArduPilot/ardupilot/pull/14803
super().__init__(connection_string, connection_baudrate)
self.rc_channel_id = rc_channel_id
if self.rc_channel_id is None:
self.rc_channel_id = self.cfg.rc_channel_id
self.rc_channel_id = int(self.rc_channel_id)
def set_ekf_source(self, source, timeout=10):
connector = Connector(self.connection_string, self.connection_baudrate, 1, 0)
if source == self.EKF_FUSE_SOURCES:
connector.set_param('EK3_SRC_OPTIONS', 1)
self.logger.log_info("Set to fuse EKF sources")
connector.disconnect()
return
pwm = self.rc_pwm[source]
connector.set_param('EK3_SRC_OPTIONS', 0)
i = 0
success = False
while i < timeout:
connector.set_rc_channel_pwm(self.rc_channel_id, pwm)
connector.send_heartbeat()
m = connector.get_callbacks(['RC_CHANNELS'], 3)
if m is not None and m.chan9_raw == pwm:
self.logger.log_debug("RC9 PWM set to %s" % str(pwm))
self.logger.log_info("EKF source set")
success = True
break
i += 1
if not success:
self.logger.log_debug("RC9 PWM not set or no response")
self.logger.log_info("No response, source may not be set correctly")
connector.disconnect()
class RelaySwitch(Switch):
def __init__(self,
connection_string=None, # Port set in mavproxy/mavlink to send MAVLINK messages
connection_baudrate=None,
relay_pin=None): # This is the rc channel for pwm input. https://github.com/ArduPilot/ardupilot/pull/14803
super().__init__(connection_string, connection_baudrate)
self.relay_pin = relay_pin
if self.relay_pin is None:
self.relay_pin = self.cfg.relay_pin['led']
def on(self):
connector = Connector(self.connection_string, self.connection_baudrate, 1, 0)
connector.set_relay(self.relay_pin, True)
connector.disconnect()
def off(self):
connector = Connector(self.connection_string, self.connection_baudrate, 1, 0)
connector.set_relay(self.relay_pin, False)
connector.disconnect()
class InitialModeSwitch(Switch):
MANUAL = 0
HOLD = 4
LOITER = 5
AUTO = 10
modes = [MANUAL, HOLD, LOITER, AUTO]
def __init__(self,
connection_string=None, # Port set in mavproxy/mavlink to send MAVLINK messages
connection_baudrate=None): # This is the rc channel for pwm input. https://github.com/ArduPilot/ardupilot/pull/14803
super().__init__(connection_string, connection_baudrate)
def set_mode(self, mode):
if mode not in self.modes:
raise Exception("Invalid mission mode")
connector = Connector(self.connection_string, self.connection_baudrate, 1, 0)
connector.set_param('INITIAL_MODE', mode)
connector.disconnect()
| StarcoderdataPython |
3360865 | import sys
sys.path = [
'']
import Phase1
from pirates.launcher.PiratesQuickLauncher import PiratesQuickLauncher
launcher = PiratesQuickLauncher()
launcher.notify.info('Reached end of StartPiratesLauncher.py.')
| StarcoderdataPython |
107230 | <filename>02.keras_MNIST_linear.py<gh_stars>1-10
from keras.models import Sequential
from keras.datasets import mnist
from keras.layers import Dense
from keras.utils import np_utils # For transformation of one-hot-encoding.
## Settings in training phase.
batch_size = 1024 # Change batch size based on the capability of your resource.
num_classes = 10 # MNIST
epochs = 12
## Load MNIST dataset.
(x_train, y_train), (x_test, y_test) = mnist.load_data()
## Normalize the input images.
x_train = x_train.reshape(60000, 28*28).astype('float32')
x_test = x_test.reshape(10000, 28*28).astype('float32')
x_train = x_train/255
x_test = x_test/255
## Initialize model.
model = Sequential()
## Build model.
model.add(Dense(units=512, input_dim=784, kernel_initializer='normal', activation='relu'))
model.add(Dense(units=128, input_dim=512, kernel_initializer='normal', activation='relu'))
model.add(Dense(units=10, kernel_initializer='normal', activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
## Show model summary
print(model.summary())
## Transfer ground truth to one-hot format. (e.g. 7=>0000001000)
y_train = np_utils.to_categorical(y_train, num_classes=num_classes)
y_test = np_utils.to_categorical(y_test, num_classes=num_classes)
## Start training this model.
train_history = model.fit(x=x_train, y=y_train, epochs=epochs, batch_size=batch_size, verbose=2, validation_data=(x_test, y_test))
## Show results.
scores = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
## Save configurations and weights of this model.
with open('02.MNIST_model_linear.config', 'w') as text_file:
text_file.write(model.to_json())
model.save_weights('02.MNIST_model_linear.weights') | StarcoderdataPython |
1658050 | import requests
def ais() -> dict:
try:
url = 'https://i.sjtu.edu.cn/xtgl/login_slogin.html'
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.3 Safari/605.1.15',
'Content-Type': 'text/html;charset=utf-8', 'Cookie': ''
}
response = requests.get(url)
for key, value in response.cookies.items():
headers['Cookie'] += key + '=' + value + ';'
print(response.cookies.items())
return headers
except Exception as error:
print('[ERROR/AIS]', error)
| StarcoderdataPython |
1767439 | #!/usr/bin/env python
# encoding: utf-8
"""
Library of built-in probability functions.
These probability functions use `scipy.stats` at their core, but also
encapsulate *limits* so that they can return a ln prob of -infinity when
a sample is called outside those limits. This is useful for emcee sampling.
Note this is a recent refactoring from the old style where used function
factories. Now we just create objects that when
called give the probability, and have a sample() method to take random samples
from the finite probability domain.
"""
import numpy as np
import scipy.stats
from sedbot.photconv import sb_to_mass
class RandomVariable(object):
"""Base class for random variables that encapsulate a `scipy.stats`
random variable.
All superclasses must provide
- self._rv - the `scipy.stats` random variable instance
- self._limits - (optional) a 2-tuple of lower and upper limits on values
that the random variable can take.
"""
def __init__(self):
super(RandomVariable, self).__init__()
self._limits = None
@property
def lower_limit(self):
return self._limits[0]
@property
def upper_limit(self):
return self._limits[1]
def __call__(self, x):
if not self._limits:
return self._rv.logpdf(x)
elif x >= self._limits[0] and x <= self._limits[1]:
return self._rv.logpdf(x)
else:
return -np.inf
def sample(self, shape=None):
if not shape:
return self._rv.rvs()
else:
return self._rv.rvs(shape)
class LnUniform(RandomVariable):
r"""Log of uniform probability.
.. math::
\ln p(x|x_1, x_2) = \ln \frac{1}{x_2 - x_1}
Parameters
----------
lower : float
Lower bound of the uniform probability distribution.
upper : float
Upper bound of the uniform probability distribution.
"""
def __init__(self, lower, upper):
super(LnUniform, self).__init__()
self._limits = (lower, upper)
self._rv = scipy.stats.uniform(loc=lower, scale=upper - lower)
class LnUniformMass(LnUniform):
"""Log of uniform probability intended to be used as an uninformative
prior on the log-mass given a range of log M/L.
Parameters
----------
logml_min : float
Minimum log M/L value.
logml_max : float
Maximum log M/L value.
sb : float
Surface brightness, mag / arcsec^2.
D_pc : float
Distance in parsecs.
area : float
Area of pixel, in square arcsecs.
msun : float
Solar magnitude. With python-fsps this can be obtained using
``fsps.get_filter(band_name).msun_ab``.
"""
def __init__(self, logml_min, logml_max, sb, D_pc, area, msun):
self._low_mass = sb_to_mass(sb, msun, logml_min, area, D_pc)
self._high_mass = sb_to_mass(sb, msun, logml_max, area, D_pc)
super(LnUniformMass, self).__init__(self._low_mass, self._high_mass)
@property
def lower_limit(self):
return self._low_mass
@property
def upper_limit(self):
return self._high_mass
class LnNormal(RandomVariable):
r"""Log of normal prior probability factory.
.. math::
\ln p(x|\mu, \sigma) = \ln \frac{1}{\sqrt{2 \pi \sigma^2}}
e^{- \left( \frac{x - \mu}{2 \pi \sigma^2} \right)}
Parameters
----------
mu : float
Mean
sigma : float
Standard deviation of Gaussian.
limits : (2,) tuple (optional)
Hard lower and upper boundaries on the random variable.
"""
def __init__(self, mu, sigma, limits=None):
super(LnNormal, self).__init__()
self._limits = limits
self._rv = scipy.stats.norm(loc=mu, scale=sigma)
def ln_uniform_factory(lower, upper):
"""Log of uniform prior probability factory (deprecated)."""
return LnUniform(lower, upper)
def ln_normal_factory(mu, sigma, limits=None):
"""Log of normal prior probability factory (deprecated)."""
return LnNormal(mu, sigma, limits=limits)
def ln_loguniform_factory(lower, upper):
r"""Log of log-uniform prior probability factory.
.. math::
\ln p(x|x_1, x_2) = \ln \frac{1}{x \ln \left( x_1 / x_2 \right)}
Parameters
----------
lower : float
Lower bound of the log-uniform probability distribution.
upper : float
Upper bound of the log-uniform probability distribution.
Returns
-------
func : function
A function that accepts a random variable and returns the log of the
log-uniform probability of that value.
Returns `-numpy.inf` if the RV is outside bounds.
"""
factor = 1. / np.log(upper / lower)
assert np.isfinite(factor), "log-uniform prior not finite"
def func(x):
"""Log of uniform prior probability."""
if x >= lower and x <= upper:
return np.log(factor / x)
else:
return -np.inf
return func
| StarcoderdataPython |
3342742 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_django-activitylog
------------
Tests for `django-activitylog` models module.
"""
from django.test import TestCase
from django.utils import timezone
from activitylog.models import ActivityLog
class ActivityLogModelTests(TestCase):
def test_str(self):
# str method formats dates and truncates long log messages to
# 100 chars
activitylog = ActivityLog.objects.create(
log="This is a long log message with many many many many many "
"many characters. 126 in total, in fact. It will be "
"truncated to 100."
)
truncated_log = 'This is a long log message with many many many ' \
'many many many characters. 126 in total, in fact. ' \
'It'
self.assertEqual(activitylog.log[:100], truncated_log)
self.assertEqual(len(truncated_log), 100)
self.assertEqual(
str(activitylog),
'{} - {}'.format(
timezone.now().strftime('%Y-%m-%d %H:%M %Z'), truncated_log
)
)
| StarcoderdataPython |
1627185 | <reponame>bpow/CNVpytor<gh_stars>0
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
exec(open('cnvpytor/version.py').read())
setup(
name='CNVpytor',
version=__version__,
author='<NAME>, <NAME>, <NAME>',
author_email='<EMAIL>',
packages=['cnvpytor'],
package_dir={'cnvpytor': 'cnvpytor'},
package_data={'cnvpytor': ['imgs/*.png','data/*']},
description='Python extension of CNVnator',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/abyzovlab/CNVpytor',
install_requires=[
'gnureadline',
'pathlib>=1.0'
'requests>=2.0',
'pysam>=0.15',
'numpy>=1.16',
'scipy>=1.1',
'matplotlib>=2.2',
'h5py>=2.9',
'xlsxwriter>=1.3',
'pathlib>=1.0',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent'
],
entry_points={
'console_scripts': [
'cnvpytor = cnvpytor.__main__:main'
]
})
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.