id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3296684 | <reponame>Croydon/pt-recap
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .requester import requester
from .calculation import calculation
from .templater import templater
def main(args):
""" Main entry point
:param args: User arguments
"""
start_date = "20181101"
end_date = "20181130"
data = requester.get_daily_users(start_date=start_date, end_date=end_date)
numbers = requester.get_users_numbers_only(data)
# get statistics
statistics = {
"start_date": start_date,
"end_date": end_date,
"average": calculation.average(numbers),
"min": calculation.min(numbers),
"max": calculation.max(numbers),
"biggest_increase": calculation.biggest_increase(numbers),
"biggest_decrease": calculation.biggest_decrease(numbers),
"total_change": calculation.total_change(numbers)
}
tpl = templater()
tpl.load()
tpl.fill(namespace=statistics)
tpl.save()
| StarcoderdataPython |
3307881 | <filename>bbpyp/common/service/queue_service.py
from bbpyp.common.model.queue_type import QueueType
class QueueService:
def __init__(self, queue_factory, named_item_service, metric_service):
self._queue_factory = queue_factory
self._named_queue = named_item_service
self._metric_service = metric_service
def create_queue(self, queue_name, queue_type=QueueType.FIFO):
if self.has_queue(queue_name):
return
self._named_queue.set(queue_name, self._queue_factory(queue_type))
def push(self, queue_name, item):
named_queue = self._get_named_queue(queue_name, f"Unable to queue the item: {item}")
named_queue.push(item)
self._update_named_queue_length_metric(queue_name)
def pop(self, queue_name):
named_queue = self._get_named_queue(queue_name, f"Unable to pop the non existent queue.")
item = named_queue.pop()
self._update_named_queue_length_metric(queue_name)
return item
def peek(self, queue_name):
named_queue = self._get_named_queue(queue_name, f"Unable to peek the non existent queue.")
return named_queue.peek()
def remove(self, queue_name, item):
named_queue = self._get_named_queue(
queue_name, f"Unable to remove an item from the non existent queue.")
item = named_queue.remove(item)
self._update_named_queue_length_metric(queue_name)
return item
def length(self, queue_name):
named_queue = self._get_named_queue(
queue_name, f"Unable to get the length of the non existent queue.")
return named_queue.length
@property
def queue_names(self):
return self._named_queue.names
def is_empty(self, queue_name):
named_queue = self._get_named_queue(
queue_name, f"Unable to check if non existent queue is empty.")
return named_queue.is_empty
def has_queue(self, queue_name):
return self._named_queue.has(queue_name)
def _get_named_queue(self, queue_name, failure_message):
return self._named_queue.get_with_validation(queue_name, f"Named queue not found. {failure_message}")
def _update_named_queue_length_metric(self, queue_name):
self._metric_service.record_numeric_value(
f"{queue_name}.queue.length", self.length(queue_name))
| StarcoderdataPython |
3200261 |
## Time Series Filters
from __future__ import print_function
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
dta = sm.datasets.macrodata.load_pandas().data
index = pd.Index(sm.tsa.datetools.dates_from_range('1959Q1', '2009Q3'))
print(index)
dta.index = index
del dta['year']
del dta['quarter']
print(sm.datasets.macrodata.NOTE)
print(dta.head(10))
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
dta.realgdp.plot(ax=ax);
legend = ax.legend(loc = 'upper left');
legend.prop.set_size(20);
#### Hodrick-Prescott Filter
# The Hodrick-Prescott filter separates a time-series $y_t$ into a trend $\tau_t$ and a cyclical component $\zeta_t$
#
# $$y_t = \tau_t + \zeta_t$$
#
# The components are determined by minimizing the following quadratic loss function
#
# $$\min_{\\{ \tau_{t}\\} }\sum_{t}^{T}\zeta_{t}^{2}+\lambda\sum_{t=1}^{T}\left[\left(\tau_{t}-\tau_{t-1}\right)-\left(\tau_{t-1}-\tau_{t-2}\right)\right]^{2}$$
gdp_cycle, gdp_trend = sm.tsa.filters.hpfilter(dta.realgdp)
gdp_decomp = dta[['realgdp']]
gdp_decomp["cycle"] = gdp_cycle
gdp_decomp["trend"] = gdp_trend
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
gdp_decomp[["realgdp", "trend"]]["2000-03-31":].plot(ax=ax, fontsize=16);
legend = ax.get_legend()
legend.prop.set_size(20);
#### Baxter-King approximate band-pass filter: Inflation and Unemployment
##### Explore the hypothesis that inflation and unemployment are counter-cyclical.
# The Baxter-King filter is intended to explictly deal with the periodicty of the business cycle. By applying their band-pass filter to a series, they produce a new series that does not contain fluctuations at higher or lower than those of the business cycle. Specifically, the BK filter takes the form of a symmetric moving average
#
# $$y_{t}^{*}=\sum_{k=-K}^{k=K}a_ky_{t-k}$$
#
# where $a_{-k}=a_k$ and $\sum_{k=-k}^{K}a_k=0$ to eliminate any trend in the series and render it stationary if the series is I(1) or I(2).
#
# For completeness, the filter weights are determined as follows
#
# $$a_{j} = B_{j}+\theta\text{ for }j=0,\pm1,\pm2,\dots,\pm K$$
#
# $$B_{0} = \frac{\left(\omega_{2}-\omega_{1}\right)}{\pi}$$
# $$B_{j} = \frac{1}{\pi j}\left(\sin\left(\omega_{2}j\right)-\sin\left(\omega_{1}j\right)\right)\text{ for }j=0,\pm1,\pm2,\dots,\pm K$$
#
# where $\theta$ is a normalizing constant such that the weights sum to zero.
#
# $$\theta=\frac{-\sum_{j=-K^{K}b_{j}}}{2K+1}$$
#
# $$\omega_{1}=\frac{2\pi}{P_{H}}$$
#
# $$\omega_{2}=\frac{2\pi}{P_{L}}$$
#
# $P_L$ and $P_H$ are the periodicity of the low and high cut-off frequencies. Following Burns and Mitchell's work on US business cycles which suggests cycles last from 1.5 to 8 years, we use $P_L=6$ and $P_H=32$ by default.
bk_cycles = sm.tsa.filters.bkfilter(dta[["infl","unemp"]])
# * We lose K observations on both ends. It is suggested to use K=12 for quarterly data.
fig = plt.figure(figsize=(14,10))
ax = fig.add_subplot(111)
bk_cycles.plot(ax=ax, style=['r--', 'b-']);
#### Christiano-Fitzgerald approximate band-pass filter: Inflation and Unemployment
# The Christiano-Fitzgerald filter is a generalization of BK and can thus also be seen as weighted moving average. However, the CF filter is asymmetric about $t$ as well as using the entire series. The implementation of their filter involves the
# calculations of the weights in
#
# $$y_{t}^{*}=B_{0}y_{t}+B_{1}y_{t+1}+\dots+B_{T-1-t}y_{T-1}+\tilde B_{T-t}y_{T}+B_{1}y_{t-1}+\dots+B_{t-2}y_{2}+\tilde B_{t-1}y_{1}$$
#
# for $t=3,4,...,T-2$, where
#
# $$B_{j} = \frac{\sin(jb)-\sin(ja)}{\pi j},j\geq1$$
#
# $$B_{0} = \frac{b-a}{\pi},a=\frac{2\pi}{P_{u}},b=\frac{2\pi}{P_{L}}$$
#
# $\tilde B_{T-t}$ and $\tilde B_{t-1}$ are linear functions of the $B_{j}$'s, and the values for $t=1,2,T-1,$ and $T$ are also calculated in much the same way. $P_{U}$ and $P_{L}$ are as described above with the same interpretation.
# The CF filter is appropriate for series that may follow a random walk.
print(sm.tsa.stattools.adfuller(dta['unemp'])[:3])
print(sm.tsa.stattools.adfuller(dta['infl'])[:3])
cf_cycles, cf_trend = sm.tsa.filters.cffilter(dta[["infl","unemp"]])
print(cf_cycles.head(10))
fig = plt.figure(figsize=(14,10))
ax = fig.add_subplot(111)
cf_cycles.plot(ax=ax, style=['r--','b-']);
# Filtering assumes *a priori* that business cycles exist. Due to this assumption, many macroeconomic models seek to create models that match the shape of impulse response functions rather than replicating properties of filtered series. See VAR notebook.
| StarcoderdataPython |
1794286 | from django.apps import AppConfig
class SetkaEditorConfig(AppConfig):
name = 'setka_editor'
| StarcoderdataPython |
3351362 | <reponame>nvloff/song_status
import sys
import os
import json
import codecs
ScriptName = "Google Play Current Song"
Website = "https://github.com/nvloff/song_status"
Description = "Provides variables to show the currently playing song from Google Desktop Player"
Creator = "<NAME><<EMAIL>>"
Version = "1.0.0.0"
m_SongStatusVar = "$song_status"
class Messages:
IO_ERROR = "I/O error({0}): {1} reading file {2}"
JSON_PARSE_ERROR = "Failed to parse JSON file at %s"
UNKNOWN_ERROR = "Unknown exception %s"
PLAYER_ERROR = "Error loading song."
def __init__(self, settings):
self._settings = settings
def not_available(self):
return self._settings.not_available_message()
def stopped(self):
return self._settings.stopped_message()
def paused(self):
return self._settings.paused_message()
def status_format(self):
return self._settings.status_format()
def offline(self):
return self._settings.offline_message()
class JsonDataParser:
def __init__(self, path):
self.path = path
self.data = {}
self.error_message = None
self.load_error = False
def load(self):
# if file is not present the player is probably stopped
if not os.path.isfile(self.path):
return self
try:
with open(self.path, "r") as f:
self.data = json.load(f)
except IOError as (errno, strerror):
self.set_load_error(Messages.IO_ERROR.format(errno, strerror, self.path))
except ValueError:
self.set_load_error(Messages.JSON_PARSE_ERROR % self.path)
except:
self.set_load_error(Messages.UNKNOWN_ERROR % sys.exc_info()[0])
return self
def is_success(self):
return self.load_error is not True
def is_error(self):
return not self.is_success()
def error(self):
return self.error_message
def set_load_error(self, message):
self.load_error = True
self.error_message = message
class SongStatus:
def __init__(self, data):
self.data = data or {}
def is_stopped(self):
return not self.data
def is_playing(self):
return not self.is_stopped() and self.data['playing'] is True
def is_paused(self):
return not self.is_stopped() and not self.is_playing()
def artist(self):
return None if (not self.is_playing()) else self.song()['artist']
def title(self):
return None if (not self.is_playing()) else self.song()['title']
def song(self):
return self.data.get('song', {})
class DisplayStatus:
def __init__(self, path, messages):
self._messages = messages
self._path = path
self._song_status = None
self._parser = None
self.load()
def artist(self):
return self._song_status.artist() or self._messages.not_available()
def title(self):
return self._song_status.title() or self._messages.not_available()
def error_message(self):
return self._parser.error()
def song_status(self):
if self._parser.is_error():
return Messages.PLAYER_ERROR
elif self._song_status.is_stopped():
return self._messages.stopped()
elif self._song_status.is_paused():
return self._messages.paused()
else:
return self._messages.status_format().format(
artist=self._song_status.artist(),
title=self._song_status.title()
)
def load(self):
self.load_parser()
self.load_song_status()
return self
def load_parser(self):
self._parser = JsonDataParser(self._path)
self._parser.load()
def load_song_status(self):
self._song_status = SongStatus(self._parser.data)
class Settings:
DEFAULTS = {
"debug": False,
"not_available_message": "N/A",
"paused_message": "Player paused.",
"stopped_message": "Player stopped.",
"status_format": "{artist} - {title}",
"offline_message": "We offline :(",
"data_path": "%APPDATA%\Google Play Music Desktop Player\json_store\playback.json"
}
def __init__(self):
self._settings = self.__class__.DEFAULTS
self._loaded = False
return
def load(self, json_data):
tmp = self.__class__.DEFAULTS.copy()
tmp.update(json.loads(json_data))
self._settings = tmp
self._loaded = True
return self
def load_from_file(self, path):
with codecs.open(path, encoding='utf-8-sig', mode='r') as f:
self.load(f.read())
return self
def data_path(self):
return os.path.expandvars(self._settings["data_path"])
def is_debug(self):
return self._settings["debug"]
def not_available_message(self):
return self._settings["not_available_message"]
def paused_message(self):
return self._settings["paused_message"]
def stopped_message(self):
return self._settings["stopped_message"]
def status_format(self):
return self._settings["status_format"]
def offline_message(self):
return self._settings["offline_message"]
RuntimeSettings = Settings()
def load_settings(json_data):
RuntimeSettings.load(json_data)
def Init():
config_file = "settings.json"
path = os.path.dirname(__file__)
try:
RuntimeSettings.load_from_file(os.path.join(path, config_file))
except:
Parent.Log(ScriptName, "Failed to load settings from %s", config_file)
def ReloadSettings(jsonData):
RuntimeSettings.load(jsonData)
def Parse(ParseString, user, target, message):
messages = Messages(RuntimeSettings)
if m_SongStatusVar not in ParseString:
return ParseString
if Parent.IsLive() or RuntimeSettings.is_debug():
status = DisplayStatus(RuntimeSettings.data_path(), messages)
if status.error_message():
Parent.Log(ScriptName, status.error_message())
return ParseString \
.replace(m_SongStatusVar, status.song_status())
else:
Parent.Log(ScriptName, "Stream is not live")
return ParseString.replace(m_SongStatusVar, messages.offline())
| StarcoderdataPython |
114841 | import asyncio
import uvloop
from rich import traceback
from command import Command
if __name__ == '__main__':
traceback.install()
uvloop.install()
asyncio.run(Command.execute())
| StarcoderdataPython |
3388974 | <gh_stars>0
from r2base.index.index import Index
from r2base.mappings import BasicMapping
import pytest
import time
WORK_DIR = "."
def test_basic_crud():
mappings = {'f1': {'type': 'keyword'},
'f2': {'type': 'integer'},
'f3': {'type': 'float'},
'f4': {'type': 'datetime'}}
i = Index(WORK_DIR, 'test_crud_index')
i.delete_index()
i.create_index(mappings)
# get mapping
dump_mapping = i.get_mappings()
for k in ['f1', 'f2', 'f3', 'f4', '_uid']:
assert k in dump_mapping
assert i.size() == 0
# add, read, delete, update
docs = [{'f1': "haha", "f2": 10, "f4": '2019-06-28'},
{'f1': "lala", "f3": 10.3},
{'f2': 12, "f3": 3.3},
{'f2': 0, "f3": 5.3, '_uid': 'ddeeff'},
{'f2': 22, "f3": 1.1, '_uid': 'aabbcc'}
]
# add docs
doc_ids = i.add_docs(docs, batch_size=2)
time.sleep(2)
assert doc_ids[3] == 'ddeeff'
assert doc_ids[4] == 'aabbcc'
assert i.size() == 5
# read docs
docs = i.read_docs(['ddeeff', 'aabbcc'])
assert len(docs) == 2
docs = i.read_docs('ddeeff')
assert len(docs) == 1
assert docs[0]['_uid'] == 'ddeeff'
# update docs
doc_ids = i.update_docs({'f2': 44, "f3": 1.1, '_uid': 'aabbcc'}, batch_size=2)
time.sleep(1)
assert len(doc_ids) == 1
assert doc_ids[0] == 'aabbcc'
docs = i.read_docs('aabbcc')
assert docs[0]['f2'] == 44
# delete docs
res = i.delete_docs('aabbcc')
time.sleep(1)
docs = i.read_docs('aabbcc')
assert len(docs) == 0
i.delete_index()
| StarcoderdataPython |
154096 |
# coding: utf-8
# # Broadcasting a spectrum - Two spectral Components model
# In[ ]:
from astropy.io import fits
import numpy as np
import scipy as sp
from scipy.interpolate import interp1d
from scipy.stats import chisquare
from PyAstronomy.pyasl import dopplerShift
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib')
# In[ ]:
def two_comp_model(wav, model1, model2, alphas, rvs, gammas):
# Make 2 component simulations, broadcasting over alpha, rv, gamma values.
# Enable single scalar inputs (turn to 1d np.array)
if not hasattr(alphas, "__len__"):
alphas = np.asarray(alphas)[np.newaxis]
if not hasattr(rvs, "__len__"):
rvs = np.asarray(rvs)[np.newaxis]
if not hasattr(gammas, "__len__"):
gammas = np.asarray(gammas)[np.newaxis]
# print(len(gammas))
am2 = model2[:,np.newaxis] * alphas # alpha * Model2 (am2)
# print(am2.shape)
am2rv = np.empty(am2.shape + (len(rvs),)) # am2rv = am2 with rv doppler-shift
# print(am2rv.shape)
for i, rv in enumerate(rvs):
#nflux, wlprime = dopplerShift(wav, am2, rv)
#am2rv[:, :, i] = nflux
wav_i = (1 + rv / c) * wav
am2rv[:, :, i] = interp1d(wav_i, am2, axis=0, bounds_error=False)(wav)
# Normalize by (1 / 1 + alpha)
am2rv = am2rv / (1 + alphas)[np.newaxis, :, np.newaxis]
am2rvm1 = h[:, np.newaxis, np.newaxis] + am2rv # am2rvm1 = am2rv + model_1
# print(am2rvm1.shape)
am2rvm1g = np.empty(am2rvm1.shape + (len(gammas),)) # am2rvm1g = am2rvm1 with gamma doppler-shift
for j, gamma in enumerate(gammas):
wav_j = (1 + gamma / 299792.458) * wav
am2rvm1g[:, :, :, j] = interp1d(wav_j, am2rvm1, axis=0, bounds_error=False)(wav)
return interp1d(w, am2rvm1g, axis=0) # pass it the wavelength values to return
# In[ ]:
wav = "/home/jneal/Phd/data/phoenixmodels/WAVE_PHOENIX-ACES-AGSS-COND-2011.fits"
host = "/home/jneal/Phd/data/phoenixmodels/HD30501-lte05200-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits"
comp = "/home/jneal/Phd/data/phoenixmodels/HD30501b-lte02500-5.00-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits"
w = fits.getdata(wav) / 10
h = fits.getdata(host)
c = fits.getdata(comp)
# In[ ]:
mask = (2111 < w) & (w < 2117)
w = w[mask]
h = h[mask]
c = c[mask]
# crude normalization
h = h/np.max(h)
c = c/np.max(c)
# In[ ]:
# Create a simulated spectrum
# Parameters
c_kms = 299792.458 # km/s
s_alpha = np.array([0.1])
s_rv = np.array([1.5])
s_gamma = np.array([0.5])
answers = (s_alpha, s_rv, s_gamma)
# COMPACT SIMULATION
comp = interp1d((1 + s_rv / c_kms) * w, s_alpha * c, bounds_error=False)(w)
Sim_func = interp1d((1 + s_gamma / c_kms) * w, (h + comp) / (1 + s_alpha), bounds_error=False, axis=0)
sim_f_orgw = Sim_func(w)
sim_w = np.linspace(2114, 2115, 1024)
sim_f = Sim_func(sim_w)
# In[ ]:
# Compare output to tcm
tcm_sim_f = two_comp_model(w, h, c, s_alpha, s_rv, s_gamma)(sim_w)
ocm_sim_f = one_comp_model(w, h, s_gamma)(sim_w)
# In[ ]:
plt.close()
plt.plot(w, sim_f_orgw, label="org_w")
plt.plot(sim_w, sim_f, label="sim")
plt.plot(sim_w, np.squeeze(tcm_sim_f), label="tcm sim")
plt.plot(sim_w, np.squeeze(ocm_sim_f), label="ocm sim")
plt.legend()
plt.show()
sim_f.shape
# sim_w, sim_f are the observations to perform chisquared against!
# In[ ]:
alphas = np.linspace(0.1, 0.3, 40)
rvs = np.arange(1.1, 2, 0.05)
gammas = np.arange(-0.9, 1, 0.015)
print(len(alphas), len(rvs), len(gammas))
# In[ ]:
tcm = two_comp_model(w, h, c, alphas=alphas, rvs=rvs, gammas=gammas)
# In[ ]:
# Two component model
tcm_obs = tcm(sim_w)
tcm_obs.shape
# In[ ]:
chi2 = chisquare(sim_f[:, np.newaxis, np.newaxis, np.newaxis], tcm_obs).statistic
print(chi2.shape)
min_indx = np.unravel_index(chi2.argmin(), chi2.shape)
print("sim results", alphas[min_indx[0]], min_rvs[indx[1]], gammas[min_indx[2]])
print("answer", answers)
# In[ ]:
# Putting resulted sim min values back into tcm model
res = two_comp_model(w, h, c, alphas[min_indx[0]], rvs[min_indx[1]], gammas[min_indx[2]])
res_f = res(sim_w) # Flux at the min min chisquare model evaulated at obs points.
# In[ ]:
# Compare to tcm generated simulation
chi2_tcm = chisquare(tcm_sim_f, tcm_obs).statistic
min_indx_tcm = np.unravel_index(chi2.argmin(), chi2.shape)
print("tcm results", alphas[min_indx_tcm[0]], rvs[min_indx_tcm[1]], gammas[min_indx_tcm[2]])
print("answer", answers)
# In[ ]:
# Putting resulted tcm sim min values back into tcm model
res_tcm = two_comp_model(w, h, c, alphas[min_indx[0]], rvs[min_indx[1]], gammas[min_indx[2]])
res_tcm_f = res_tcm(sim_w) # Flux at the min min chisquare model evaulated at obs points.
# In[ ]:
plt.plot(sim_w, sim_f, "--", label="org")
plt.plot(sim_w, np.squeeze(res_f), label= "2 comp")
plt.plot(sim_w, np.squeeze(res_tcm_f), label="fit to tcm sim")
plt.title("Comparison to Simulation")
plt.legend()
plt.show()
# In[ ]:
plt.close()
plt.figure()
# In[ ]:
plt.figure()
plt.contourf(chi2[:,:,0])
plt.figure()
plt.contourf(chi2[0,:,:])
# In[ ]:
plt.figure()
plt.contourf(chi2[:,1,:])
plt.figure()
# In[ ]:
# Slice arrays to make contour maps
xslice = np.arange(0, chi2.shape[0], 5)
yslice = np.arange(0, chi2.shape[1], 5)
zslice = np.arange(0, chi2.shape[2], 5)
for xs in xslice:
plt.figure()
plt.contourf(chi2[xs, :, :])
plt.colorbar()
plt.title("x alpha = {}".format(alphas[xs]))
plt.show()
# In[ ]:
for ys in yslice:
plt.figure()
plt.contourf(chi2[:, ys, :])
plt.colorbar()
plt.title("y rvs = {}".format(rvs[ys]))
plt.show()
# In[ ]:
for zs in zslice:
plt.figure()
plt.contourf(chi2[:, :, zs])
plt.colorbar()
plt.title("z gammas = {}".format(gammas[zs]))
plt.show()
# In[ ]:
for xs in np.concatenate([xslice, yslice, zslice]):
plt.close()
# In[ ]:
# In[ ]:
| StarcoderdataPython |
179944 | import unittest
import hail as hl
import hail.expr.aggregators as agg
from subprocess import DEVNULL, call as syscall
import numpy as np
from struct import unpack
import hail.utils as utils
from hail.linalg import BlockMatrix
from math import sqrt
from .utils import resource, doctest_resource, startTestHailContext, stopTestHailContext
setUpModule = startTestHailContext
tearDownModule = stopTestHailContext
class Tests(unittest.TestCase):
_dataset = None
def get_dataset(self):
if Tests._dataset is None:
Tests._dataset = hl.split_multi_hts(hl.import_vcf(resource('sample.vcf')))
return Tests._dataset
def test_ibd(self):
dataset = self.get_dataset()
def plinkify(ds, min=None, max=None):
vcf = utils.new_temp_file(prefix="plink", suffix="vcf")
plinkpath = utils.new_temp_file(prefix="plink")
hl.export_vcf(ds, vcf)
threshold_string = "{} {}".format("--min {}".format(min) if min else "",
"--max {}".format(max) if max else "")
plink_command = "plink --double-id --allow-extra-chr --vcf {} --genome full --out {} {}" \
.format(utils.uri_path(vcf),
utils.uri_path(plinkpath),
threshold_string)
result_file = utils.uri_path(plinkpath + ".genome")
syscall(plink_command, shell=True, stdout=DEVNULL, stderr=DEVNULL)
### format of .genome file is:
# _, fid1, iid1, fid2, iid2, rt, ez, z0, z1, z2, pihat, phe,
# dst, ppc, ratio, ibs0, ibs1, ibs2, homhom, hethet (+ separated)
### format of ibd is:
# i (iid1), j (iid2), ibd: {Z0, Z1, Z2, PI_HAT}, ibs0, ibs1, ibs2
results = {}
with open(result_file) as f:
f.readline()
for line in f:
row = line.strip().split()
results[(row[1], row[3])] = (list(map(float, row[6:10])),
list(map(int, row[14:17])))
return results
def compare(ds, min=None, max=None):
plink_results = plinkify(ds, min, max)
hail_results = hl.identity_by_descent(ds, min=min, max=max).collect()
for row in hail_results:
key = (row.i, row.j)
self.assertAlmostEqual(plink_results[key][0][0], row.ibd.Z0, places=4)
self.assertAlmostEqual(plink_results[key][0][1], row.ibd.Z1, places=4)
self.assertAlmostEqual(plink_results[key][0][2], row.ibd.Z2, places=4)
self.assertAlmostEqual(plink_results[key][0][3], row.ibd.PI_HAT, places=4)
self.assertEqual(plink_results[key][1][0], row.ibs0)
self.assertEqual(plink_results[key][1][1], row.ibs1)
self.assertEqual(plink_results[key][1][2], row.ibs2)
compare(dataset)
compare(dataset, min=0.0, max=1.0)
dataset = dataset.annotate_rows(dummy_maf=0.01)
hl.identity_by_descent(dataset, dataset['dummy_maf'], min=0.0, max=1.0)
hl.identity_by_descent(dataset, hl.float32(dataset['dummy_maf']), min=0.0, max=1.0)
def test_impute_sex_same_as_plink(self):
ds = hl.import_vcf(resource('x-chromosome.vcf'))
sex = hl.impute_sex(ds.GT, include_par=True)
vcf_file = utils.uri_path(utils.new_temp_file(prefix="plink", suffix="vcf"))
out_file = utils.uri_path(utils.new_temp_file(prefix="plink"))
hl.export_vcf(ds, vcf_file)
utils.run_command(["plink", "--vcf", vcf_file, "--const-fid",
"--check-sex", "--silent", "--out", out_file])
plink_sex = hl.import_table(out_file + '.sexcheck',
delimiter=' +',
types={'SNPSEX': hl.tint32,
'F': hl.tfloat64})
plink_sex = plink_sex.select('IID', 'SNPSEX', 'F')
plink_sex = plink_sex.select(
s=plink_sex.IID,
is_female=hl.cond(plink_sex.SNPSEX == 2,
True,
hl.cond(plink_sex.SNPSEX == 1,
False,
hl.null(hl.tbool))),
f_stat=plink_sex.F).key_by('s')
sex = sex.select('is_female', 'f_stat')
self.assertTrue(plink_sex._same(sex.select_globals(), tolerance=1e-3))
ds = ds.annotate_rows(aaf=(agg.call_stats(ds.GT, ds.alleles)).AF[1])
self.assertTrue(hl.impute_sex(ds.GT)._same(hl.impute_sex(ds.GT, aaf='aaf')))
def test_linreg(self):
phenos = hl.import_table(resource('regressionLinear.pheno'),
types={'Pheno': hl.tfloat64},
key='Sample')
covs = hl.import_table(resource('regressionLinear.cov'),
types={'Cov1': hl.tfloat64, 'Cov2': hl.tfloat64},
key='Sample')
mt = hl.import_vcf(resource('regressionLinear.vcf'))
mt = mt.annotate_cols(pheno=phenos[mt.s].Pheno, cov=covs[mt.s])
mt = mt.annotate_entries(x = mt.GT.n_alt_alleles()).cache()
t1 = hl.linear_regression(
y=mt.pheno, x=mt.GT.n_alt_alleles(), covariates=[mt.cov.Cov1, mt.cov.Cov2 + 1 - 1]).rows()
t1 = t1.select(p=t1.linreg.p_value)
t2 = hl.linear_regression(
y=mt.pheno, x=mt.x, covariates=[mt.cov.Cov1, mt.cov.Cov2]).rows()
t2 = t2.select(p=t2.linreg.p_value)
t3 = hl.linear_regression(
y=[mt.pheno], x=mt.x, covariates=[mt.cov.Cov1, mt.cov.Cov2]).rows()
t3 = t3.select(p=t3.linreg.p_value[0])
t4 = hl.linear_regression(
y=[mt.pheno, mt.pheno], x=mt.x, covariates=[mt.cov.Cov1, mt.cov.Cov2]).rows()
t4a = t4.select(p=t4.linreg.p_value[0])
t4b = t4.select(p=t4.linreg.p_value[1])
self.assertTrue(t1._same(t2))
self.assertTrue(t1._same(t3))
self.assertTrue(t1._same(t4a))
self.assertTrue(t1._same(t4b))
def test_linear_regression_with_two_cov(self):
covariates = hl.import_table(resource('regressionLinear.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
pheno = hl.import_table(resource('regressionLinear.pheno'),
key='Sample',
missing='0',
types={'Pheno': hl.tfloat})
mt = hl.import_vcf(resource('regressionLinear.vcf'))
mt = hl.linear_regression(y=pheno[mt.s].Pheno,
x=mt.GT.n_alt_alleles(),
covariates=list(covariates[mt.s].values()))
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.linreg))))
self.assertAlmostEqual(results[1].beta, -0.28589421, places=6)
self.assertAlmostEqual(results[1].standard_error, 1.2739153, places=6)
self.assertAlmostEqual(results[1].t_stat, -0.22442167, places=6)
self.assertAlmostEqual(results[1].p_value, 0.84327106, places=6)
self.assertAlmostEqual(results[2].beta, -0.5417647, places=6)
self.assertAlmostEqual(results[2].standard_error, 0.3350599, places=6)
self.assertAlmostEqual(results[2].t_stat, -1.616919, places=6)
self.assertAlmostEqual(results[2].p_value, 0.24728705, places=6)
self.assertAlmostEqual(results[3].beta, 1.07367185, places=6)
self.assertAlmostEqual(results[3].standard_error, 0.6764348, places=6)
self.assertAlmostEqual(results[3].t_stat, 1.5872510, places=6)
self.assertAlmostEqual(results[3].p_value, 0.2533675, places=6)
self.assertTrue(np.isnan(results[6].standard_error))
self.assertTrue(np.isnan(results[6].t_stat))
self.assertTrue(np.isnan(results[6].p_value))
self.assertTrue(np.isnan(results[7].standard_error))
self.assertTrue(np.isnan(results[8].standard_error))
self.assertTrue(np.isnan(results[9].standard_error))
self.assertTrue(np.isnan(results[10].standard_error))
def test_linear_regression_with_two_cov_pl(self):
covariates = hl.import_table(resource('regressionLinear.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
pheno = hl.import_table(resource('regressionLinear.pheno'),
key='Sample',
missing='0',
types={'Pheno': hl.tfloat})
mt = hl.import_vcf(resource('regressionLinear.vcf'))
mt = hl.linear_regression(y=pheno[mt.s].Pheno,
x=hl.pl_dosage(mt.PL),
covariates=list(covariates[mt.s].values()))
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.linreg))))
self.assertAlmostEqual(results[1].beta, -0.29166985, places=6)
self.assertAlmostEqual(results[1].standard_error, 1.2996510, places=6)
self.assertAlmostEqual(results[1].t_stat, -0.22442167, places=6)
self.assertAlmostEqual(results[1].p_value, 0.84327106, places=6)
self.assertAlmostEqual(results[2].beta, -0.5499320, places=6)
self.assertAlmostEqual(results[2].standard_error, 0.3401110, places=6)
self.assertAlmostEqual(results[2].t_stat, -1.616919, places=6)
self.assertAlmostEqual(results[2].p_value, 0.24728705, places=6)
self.assertAlmostEqual(results[3].beta, 1.09536219, places=6)
self.assertAlmostEqual(results[3].standard_error, 0.6901002, places=6)
self.assertAlmostEqual(results[3].t_stat, 1.5872510, places=6)
self.assertAlmostEqual(results[3].p_value, 0.2533675, places=6)
def test_linear_regression_with_two_cov_dosage(self):
covariates = hl.import_table(resource('regressionLinear.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
pheno = hl.import_table(resource('regressionLinear.pheno'),
key='Sample',
missing='0',
types={'Pheno': hl.tfloat})
mt = hl.import_gen(resource('regressionLinear.gen'), sample_file=resource('regressionLinear.sample'))
mt = hl.linear_regression(y=pheno[mt.s].Pheno,
x=hl.gp_dosage(mt.GP),
covariates=list(covariates[mt.s].values()))
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.linreg))))
self.assertAlmostEqual(results[1].beta, -0.29166985, places=4)
self.assertAlmostEqual(results[1].standard_error, 1.2996510, places=4)
self.assertAlmostEqual(results[1].t_stat, -0.22442167, places=6)
self.assertAlmostEqual(results[1].p_value, 0.84327106, places=6)
self.assertAlmostEqual(results[2].beta, -0.5499320, places=4)
self.assertAlmostEqual(results[2].standard_error, 0.3401110, places=4)
self.assertAlmostEqual(results[2].t_stat, -1.616919, places=6)
self.assertAlmostEqual(results[2].p_value, 0.24728705, places=6)
self.assertAlmostEqual(results[3].beta, 1.09536219, places=4)
self.assertAlmostEqual(results[3].standard_error, 0.6901002, places=4)
self.assertAlmostEqual(results[3].t_stat, 1.5872510, places=6)
self.assertAlmostEqual(results[3].p_value, 0.2533675, places=6)
self.assertTrue(np.isnan(results[6].standard_error))
def test_linear_regression_with_no_cov(self):
pheno = hl.import_table(resource('regressionLinear.pheno'),
key='Sample',
missing='0',
types={'Pheno': hl.tfloat})
mt = hl.import_vcf(resource('regressionLinear.vcf'))
mt = hl.linear_regression(y=pheno[mt.s].Pheno,
x=mt.GT.n_alt_alleles())
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.linreg))))
self.assertAlmostEqual(results[1].beta, -0.25, places=6)
self.assertAlmostEqual(results[1].standard_error, 0.4841229, places=6)
self.assertAlmostEqual(results[1].t_stat, -0.5163978, places=6)
self.assertAlmostEqual(results[1].p_value, 0.63281250, places=6)
self.assertAlmostEqual(results[2].beta, -0.250000, places=6)
self.assertAlmostEqual(results[2].standard_error, 0.2602082, places=6)
self.assertAlmostEqual(results[2].t_stat, -0.9607689, places=6)
self.assertAlmostEqual(results[2].p_value, 0.391075888, places=6)
self.assertTrue(np.isnan(results[6].standard_error))
self.assertTrue(np.isnan(results[7].standard_error))
self.assertTrue(np.isnan(results[8].standard_error))
self.assertTrue(np.isnan(results[9].standard_error))
self.assertTrue(np.isnan(results[10].standard_error))
def test_linear_regression_with_import_fam_boolean(self):
covariates = hl.import_table(resource('regressionLinear.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
fam = hl.import_fam(resource('regressionLinear.fam'))
mt = hl.import_vcf(resource('regressionLinear.vcf'))
mt = hl.linear_regression(y=fam[mt.s].is_case,
x=mt.GT.n_alt_alleles(),
covariates=list(covariates[mt.s].values()))
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.linreg))))
self.assertAlmostEqual(results[1].beta, -0.28589421, places=6)
self.assertAlmostEqual(results[1].standard_error, 1.2739153, places=6)
self.assertAlmostEqual(results[1].t_stat, -0.22442167, places=6)
self.assertAlmostEqual(results[1].p_value, 0.84327106, places=6)
self.assertAlmostEqual(results[2].beta, -0.5417647, places=6)
self.assertAlmostEqual(results[2].standard_error, 0.3350599, places=6)
self.assertAlmostEqual(results[2].t_stat, -1.616919, places=6)
self.assertAlmostEqual(results[2].p_value, 0.24728705, places=6)
self.assertTrue(np.isnan(results[6].standard_error))
self.assertTrue(np.isnan(results[7].standard_error))
self.assertTrue(np.isnan(results[8].standard_error))
self.assertTrue(np.isnan(results[9].standard_error))
self.assertTrue(np.isnan(results[10].standard_error))
def test_linear_regression_with_import_fam_quant(self):
covariates = hl.import_table(resource('regressionLinear.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
fam = hl.import_fam(resource('regressionLinear.fam'),
quant_pheno=True,
missing='0')
mt = hl.import_vcf(resource('regressionLinear.vcf'))
mt = hl.linear_regression(y=fam[mt.s].quant_pheno,
x=mt.GT.n_alt_alleles(),
covariates=list(covariates[mt.s].values()))
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.linreg))))
self.assertAlmostEqual(results[1].beta, -0.28589421, places=6)
self.assertAlmostEqual(results[1].standard_error, 1.2739153, places=6)
self.assertAlmostEqual(results[1].t_stat, -0.22442167, places=6)
self.assertAlmostEqual(results[1].p_value, 0.84327106, places=6)
self.assertAlmostEqual(results[2].beta, -0.5417647, places=6)
self.assertAlmostEqual(results[2].standard_error, 0.3350599, places=6)
self.assertAlmostEqual(results[2].t_stat, -1.616919, places=6)
self.assertAlmostEqual(results[2].p_value, 0.24728705, places=6)
self.assertTrue(np.isnan(results[6].standard_error))
self.assertTrue(np.isnan(results[7].standard_error))
self.assertTrue(np.isnan(results[8].standard_error))
self.assertTrue(np.isnan(results[9].standard_error))
self.assertTrue(np.isnan(results[10].standard_error))
def test_linear_regression_multi_pheno_same(self):
covariates = hl.import_table(resource('regressionLinear.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
pheno = hl.import_table(resource('regressionLinear.pheno'),
key='Sample',
missing='0',
types={'Pheno': hl.tfloat})
mt = hl.import_vcf(resource('regressionLinear.vcf'))
mt = hl.linear_regression(y=pheno[mt.s].Pheno,
x=mt.GT.n_alt_alleles(),
covariates=list(covariates[mt.s].values()),
root='single')
mt = hl.linear_regression(y=[pheno[mt.s].Pheno, pheno[mt.s].Pheno],
x=mt.GT.n_alt_alleles(),
covariates=list(covariates[mt.s].values()),
root='multi')
def eq(x1, x2):
return (hl.is_nan(x1) & hl.is_nan(x2)) | (hl.abs(x1 - x2) < 1e-4)
self.assertTrue(mt.aggregate_rows(hl.agg.all((eq(mt.single.p_value, mt.multi.p_value[0]) &
eq(mt.single.standard_error, mt.multi.standard_error[0]) &
eq(mt.single.t_stat, mt.multi.t_stat[0]) &
eq(mt.single.beta, mt.multi.beta[0]) &
eq(mt.single.y_transpose_x, mt.multi.y_transpose_x[0])))))
self.assertTrue(mt.aggregate_rows(hl.agg.all(eq(mt.multi.p_value[1], mt.multi.p_value[0]) &
eq(mt.multi.standard_error[1], mt.multi.standard_error[0]) &
eq(mt.multi.t_stat[1], mt.multi.t_stat[0]) &
eq(mt.multi.beta[1], mt.multi.beta[0]) &
eq(mt.multi.y_transpose_x[1], mt.multi.y_transpose_x[0]))))
def test_logistic_regression_wald_test_two_cov(self):
covariates = hl.import_table(resource('regressionLogistic.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
pheno = hl.import_table(resource('regressionLogisticBoolean.pheno'),
key='Sample',
missing='0',
types={'isCase': hl.tbool})
mt = hl.import_vcf(resource('regressionLogistic.vcf'))
mt = hl.logistic_regression('wald',
y=pheno[mt.s].isCase,
x=mt.GT.n_alt_alleles(),
covariates=[covariates[mt.s].Cov1, covariates[mt.s].Cov2])
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.logreg))))
self.assertAlmostEqual(results[1].beta, -0.81226793796, places=6)
self.assertAlmostEqual(results[1].standard_error, 2.1085483421, places=6)
self.assertAlmostEqual(results[1].z_stat, -0.3852261396, places=6)
self.assertAlmostEqual(results[1].p_value, 0.7000698784, places=6)
self.assertAlmostEqual(results[2].beta, -0.43659460858, places=6)
self.assertAlmostEqual(results[2].standard_error, 1.0296902941, places=6)
self.assertAlmostEqual(results[2].z_stat, -0.4240057531, places=6)
self.assertAlmostEqual(results[2].p_value, 0.6715616176, places=6)
def is_constant(r):
return (not r.fit.converged) or np.isnan(r.p_value) or abs(r.p_value - 1) < 1e-4
self.assertTrue(is_constant(results[3]))
self.assertTrue(is_constant(results[6]))
self.assertTrue(is_constant(results[7]))
self.assertTrue(is_constant(results[8]))
self.assertTrue(is_constant(results[9]))
self.assertTrue(is_constant(results[10]))
def test_logistic_regression_wald_test_two_cov_pl(self):
covariates = hl.import_table(resource('regressionLogistic.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
pheno = hl.import_table(resource('regressionLogisticBoolean.pheno'),
key='Sample',
missing='0',
types={'isCase': hl.tbool})
mt = hl.import_vcf(resource('regressionLogistic.vcf'))
mt = hl.logistic_regression('wald',
y=pheno[mt.s].isCase,
x=hl.pl_dosage(mt.PL),
covariates=[covariates[mt.s].Cov1, covariates[mt.s].Cov2])
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.logreg))))
self.assertAlmostEqual(results[1].beta, -0.8286774, places=6)
self.assertAlmostEqual(results[1].standard_error, 2.151145, places=6)
self.assertAlmostEqual(results[1].z_stat, -0.3852261, places=6)
self.assertAlmostEqual(results[1].p_value, 0.7000699, places=6)
self.assertAlmostEqual(results[2].beta, -0.4431764, places=6)
self.assertAlmostEqual(results[2].standard_error, 1.045213, places=6)
self.assertAlmostEqual(results[2].z_stat, -0.4240058, places=6)
self.assertAlmostEqual(results[2].p_value, 0.6715616, places=6)
def is_constant(r):
return (not r.fit.converged) or np.isnan(r.p_value) or abs(r.p_value - 1) < 1e-4
self.assertFalse(results[3].fit.converged)
self.assertTrue(is_constant(results[6]))
self.assertTrue(is_constant(results[7]))
self.assertTrue(is_constant(results[8]))
self.assertTrue(is_constant(results[9]))
self.assertTrue(is_constant(results[10]))
def test_logistic_regression_wald_two_cov_dosage(self):
covariates = hl.import_table(resource('regressionLogistic.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
pheno = hl.import_table(resource('regressionLogisticBoolean.pheno'),
key='Sample',
missing='0',
types={'isCase': hl.tbool})
mt = hl.import_gen(resource('regressionLogistic.gen'),
sample_file=resource('regressionLogistic.sample'))
mt = hl.logistic_regression('wald',
y=pheno[mt.s].isCase,
x=hl.gp_dosage(mt.GP),
covariates=[covariates[mt.s].Cov1, covariates[mt.s].Cov2])
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.logreg))))
self.assertAlmostEqual(results[1].beta, -0.8286774, places=4)
self.assertAlmostEqual(results[1].standard_error, 2.151145, places=4)
self.assertAlmostEqual(results[1].z_stat, -0.3852261, places=4)
self.assertAlmostEqual(results[1].p_value, 0.7000699, places=4)
self.assertAlmostEqual(results[2].beta, -0.4431764, places=4)
self.assertAlmostEqual(results[2].standard_error, 1.045213, places=4)
self.assertAlmostEqual(results[2].z_stat, -0.4240058, places=4)
self.assertAlmostEqual(results[2].p_value, 0.6715616, places=4)
def is_constant(r):
return (not r.fit.converged) or np.isnan(r.p_value) or abs(r.p_value - 1) < 1e-4
self.assertFalse(results[3].fit.converged)
self.assertTrue(is_constant(results[6]))
self.assertTrue(is_constant(results[7]))
self.assertTrue(is_constant(results[8]))
self.assertTrue(is_constant(results[9]))
self.assertTrue(is_constant(results[10]))
def test_logistic_regression_lrt_two_cov(self):
covariates = hl.import_table(resource('regressionLogistic.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
pheno = hl.import_table(resource('regressionLogisticBoolean.pheno'),
key='Sample',
missing='0',
types={'isCase': hl.tbool})
mt = hl.import_vcf(resource('regressionLogistic.vcf'))
mt = hl.logistic_regression('lrt',
y=pheno[mt.s].isCase,
x=mt.GT.n_alt_alleles(),
covariates=[covariates[mt.s].Cov1, covariates[mt.s].Cov2])
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.logreg))))
self.assertAlmostEqual(results[1].beta, -0.81226793796, places=6)
self.assertAlmostEqual(results[1].chi_sq_stat, 0.1503349167, places=6)
self.assertAlmostEqual(results[1].p_value, 0.6982155052, places=6)
self.assertAlmostEqual(results[2].beta, -0.43659460858, places=6)
self.assertAlmostEqual(results[2].chi_sq_stat, 0.1813968574, places=6)
self.assertAlmostEqual(results[2].p_value, 0.6701755415, places=6)
def is_constant(r):
return (not r.fit.converged) or np.isnan(r.p_value) or abs(r.p_value - 1) < 1e-4
self.assertFalse(results[3].fit.converged)
self.assertTrue(is_constant(results[6]))
self.assertTrue(is_constant(results[7]))
self.assertTrue(is_constant(results[8]))
self.assertTrue(is_constant(results[9]))
self.assertTrue(is_constant(results[10]))
def test_logistic_regression_score_two_cov(self):
covariates = hl.import_table(resource('regressionLogistic.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
pheno = hl.import_table(resource('regressionLogisticBoolean.pheno'),
key='Sample',
missing='0',
types={'isCase': hl.tbool})
mt = hl.import_vcf(resource('regressionLogistic.vcf'))
mt = hl.logistic_regression('score',
y=pheno[mt.s].isCase,
x=mt.GT.n_alt_alleles(),
covariates=[covariates[mt.s].Cov1, covariates[mt.s].Cov2])
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.logreg))))
self.assertAlmostEqual(results[1].chi_sq_stat, 0.1502364955, places=6)
self.assertAlmostEqual(results[1].p_value, 0.6983094571, places=6)
self.assertAlmostEqual(results[2].chi_sq_stat, 0.1823600965, places=6)
self.assertAlmostEqual(results[2].p_value, 0.6693528073, places=6)
self.assertAlmostEqual(results[3].chi_sq_stat, 7.047367694, places=6)
self.assertAlmostEqual(results[3].p_value, 0.007938182229, places=6)
def is_constant(r):
return r.chi_sq_stat is None or r.chi_sq_stat < 1e-6
self.assertTrue(is_constant(results[6]))
self.assertTrue(is_constant(results[7]))
self.assertTrue(is_constant(results[8]))
self.assertTrue(is_constant(results[9]))
self.assertTrue(is_constant(results[10]))
def test_logistic_regression_epacts(self):
covariates = hl.import_table(resource('regressionLogisticEpacts.cov'),
key='IND_ID',
types={'PC1': hl.tfloat, 'PC2': hl.tfloat})
fam = hl.import_fam(resource('regressionLogisticEpacts.fam'))
mt = hl.import_vcf(resource('regressionLogisticEpacts.vcf'))
mt = mt.annotate_cols(**covariates[mt.s], **fam[mt.s])
mt = hl.logistic_regression('wald',
y=mt.is_case,
x=mt.GT.n_alt_alleles(),
covariates=[mt.is_female, mt.PC1, mt.PC2],
root='wald')
mt = hl.logistic_regression('lrt',
y=mt.is_case,
x=mt.GT.n_alt_alleles(),
covariates=[mt.is_female, mt.PC1, mt.PC2],
root='lrt')
mt = hl.logistic_regression('score',
y=mt.is_case,
x=mt.GT.n_alt_alleles(),
covariates=[mt.is_female, mt.PC1, mt.PC2],
root='score')
mt = hl.logistic_regression('firth',
y=mt.is_case,
x=mt.GT.n_alt_alleles(),
covariates=[mt.is_female, mt.PC1, mt.PC2],
root='firth')
# 2535 samples from 1K Genomes Project
# Locus("22", 16060511) # MAC 623
# Locus("22", 16115878) # MAC 370
# Locus("22", 16115882) # MAC 1207
# Locus("22", 16117940) # MAC 7
# Locus("22", 16117953) # MAC 21
mt = mt.select_rows('wald', 'lrt', 'firth', 'score')
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.row))))
self.assertAlmostEqual(results[16060511].wald.beta, -0.097476, places=4)
self.assertAlmostEqual(results[16060511].wald.standard_error, 0.087478, places=4)
self.assertAlmostEqual(results[16060511].wald.z_stat, -1.1143, places=4)
self.assertAlmostEqual(results[16060511].wald.p_value, 0.26516, places=4)
self.assertAlmostEqual(results[16060511].lrt.p_value, 0.26475, places=4)
self.assertAlmostEqual(results[16060511].score.p_value, 0.26499, places=4)
self.assertAlmostEqual(results[16060511].firth.beta, -0.097079, places=4)
self.assertAlmostEqual(results[16060511].firth.p_value, 0.26593, places=4)
self.assertAlmostEqual(results[16115878].wald.beta, -0.052632, places=4)
self.assertAlmostEqual(results[16115878].wald.standard_error, 0.11272, places=4)
self.assertAlmostEqual(results[16115878].wald.z_stat, -0.46691, places=4)
self.assertAlmostEqual(results[16115878].wald.p_value, 0.64056, places=4)
self.assertAlmostEqual(results[16115878].lrt.p_value, 0.64046, places=4)
self.assertAlmostEqual(results[16115878].score.p_value, 0.64054, places=4)
self.assertAlmostEqual(results[16115878].firth.beta, -0.052301, places=4)
self.assertAlmostEqual(results[16115878].firth.p_value, 0.64197, places=4)
self.assertAlmostEqual(results[16115882].wald.beta, -0.15598, places=4)
self.assertAlmostEqual(results[16115882].wald.standard_error, 0.079508, places=4)
self.assertAlmostEqual(results[16115882].wald.z_stat, -1.9619, places=4)
self.assertAlmostEqual(results[16115882].wald.p_value, 0.049779, places=4)
self.assertAlmostEqual(results[16115882].lrt.p_value, 0.049675, places=4)
self.assertAlmostEqual(results[16115882].score.p_value, 0.049675, places=4)
self.assertAlmostEqual(results[16115882].firth.beta, -0.15567, places=4)
self.assertAlmostEqual(results[16115882].firth.p_value, 0.04991, places=4)
self.assertAlmostEqual(results[16117940].wald.beta, -0.88059, places=4)
self.assertAlmostEqual(results[16117940].wald.standard_error, 0.83769, places=2)
self.assertAlmostEqual(results[16117940].wald.z_stat, -1.0512, places=2)
self.assertAlmostEqual(results[16117940].wald.p_value, 0.29316, places=2)
self.assertAlmostEqual(results[16117940].lrt.p_value, 0.26984, places=4)
self.assertAlmostEqual(results[16117940].score.p_value, 0.27828, places=4)
self.assertAlmostEqual(results[16117940].firth.beta, -0.7524, places=4)
self.assertAlmostEqual(results[16117940].firth.p_value, 0.30731, places=4)
self.assertAlmostEqual(results[16117953].wald.beta, 0.54921, places=4)
self.assertAlmostEqual(results[16117953].wald.standard_error, 0.4517, places=3)
self.assertAlmostEqual(results[16117953].wald.z_stat, 1.2159, places=3)
self.assertAlmostEqual(results[16117953].wald.p_value, 0.22403, places=3)
self.assertAlmostEqual(results[16117953].lrt.p_value, 0.21692, places=4)
self.assertAlmostEqual(results[16117953].score.p_value, 0.21849, places=4)
self.assertAlmostEqual(results[16117953].firth.beta, 0.5258, places=4)
self.assertAlmostEqual(results[16117953].firth.p_value, 0.22562, places=4)
def test_trio_matrix(self):
"""
This test depends on certain properties of the trio matrix VCF and
pedigree structure. This test is NOT a valid test if the pedigree
includes quads: the trio_matrix method will duplicate the parents
appropriately, but the genotypes_table and samples_table orthogonal
paths would require another duplication/explode that we haven't written.
"""
ped = hl.Pedigree.read(resource('triomatrix.fam'))
ht = hl.import_fam(resource('triomatrix.fam'))
mt = hl.import_vcf(resource('triomatrix.vcf'))
mt = mt.annotate_cols(fam=ht[mt.s])
dads = ht.filter(hl.is_defined(ht.pat_id))
dads = dads.select(dads.pat_id, is_dad=True).key_by('pat_id')
moms = ht.filter(hl.is_defined(ht.mat_id))
moms = moms.select(moms.mat_id, is_mom=True).key_by('mat_id')
et = (mt.entries()
.key_by('s')
.join(dads, how='left')
.join(moms, how='left'))
et = et.annotate(is_dad=hl.is_defined(et.is_dad),
is_mom=hl.is_defined(et.is_mom))
et = (et
.group_by(et.locus, et.alleles, fam=et.fam.fam_id)
.aggregate(data=hl.agg.collect(hl.struct(
role=hl.case().when(et.is_dad, 1).when(et.is_mom, 2).default(0),
g=hl.struct(GT=et.GT, AD=et.AD, DP=et.DP, GQ=et.GQ, PL=et.PL)))))
et = et.filter(hl.len(et.data) == 3)
et = et.select('data').explode('data')
tt = hl.trio_matrix(mt, ped, complete_trios=True).entries().key_by('locus', 'alleles')
tt = tt.annotate(fam=tt.proband.fam.fam_id,
data=[hl.struct(role=0, g=tt.proband_entry.select('GT', 'AD', 'DP', 'GQ', 'PL')),
hl.struct(role=1, g=tt.father_entry.select('GT', 'AD', 'DP', 'GQ', 'PL')),
hl.struct(role=2, g=tt.mother_entry.select('GT', 'AD', 'DP', 'GQ', 'PL'))])
tt = tt.select('fam', 'data').explode('data')
tt = tt.filter(hl.is_defined(tt.data.g)).key_by('locus', 'alleles', 'fam')
self.assertEqual(et.key.dtype, tt.key.dtype)
self.assertEqual(et.row.dtype, tt.row.dtype)
self.assertTrue(et._same(tt))
# test annotations
e_cols = (mt.cols()
.join(dads, how='left')
.join(moms, how='left'))
e_cols = e_cols.annotate(is_dad=hl.is_defined(e_cols.is_dad),
is_mom=hl.is_defined(e_cols.is_mom))
e_cols = (e_cols.group_by(fam=e_cols.fam.fam_id)
.aggregate(data=hl.agg.collect(hl.struct(role=hl.case()
.when(e_cols.is_dad, 1).when(e_cols.is_mom, 2).default(0),
sa=hl.struct(**e_cols.row.select(*mt.col))))))
e_cols = e_cols.filter(hl.len(e_cols.data) == 3).select('data').explode('data')
t_cols = hl.trio_matrix(mt, ped, complete_trios=True).cols()
t_cols = t_cols.annotate(fam=t_cols.proband.fam.fam_id,
data=[
hl.struct(role=0, sa=t_cols.proband),
hl.struct(role=1, sa=t_cols.father),
hl.struct(role=2, sa=t_cols.mother)]).key_by('fam').select('data').explode('data')
t_cols = t_cols.filter(hl.is_defined(t_cols.data.sa))
self.assertEqual(e_cols.key.dtype, t_cols.key.dtype)
self.assertEqual(e_cols.row.dtype, t_cols.row.dtype)
self.assertTrue(e_cols._same(t_cols))
def test_sample_qc(self):
dataset = self.get_dataset()
dataset = hl.sample_qc(dataset)
def test_variant_qc(self):
data = [
{'v': '1:1:A:T', 's': '1', 'GT': hl.Call([0, 0]), 'GQ': 10, 'DP': 0},
{'v': '1:1:A:T', 's': '2', 'GT': hl.Call([1, 1]), 'GQ': 10, 'DP': 5},
{'v': '1:1:A:T', 's': '3', 'GT': hl.Call([0, 1]), 'GQ': 11, 'DP': 100},
{'v': '1:1:A:T', 's': '4', 'GT': None, 'GQ': None, 'DP': 100},
{'v': '1:2:A:T,C', 's': '1', 'GT': hl.Call([1, 2]), 'GQ': 10, 'DP': 5},
{'v': '1:2:A:T,C', 's': '2', 'GT': hl.Call([2, 2]), 'GQ': 10, 'DP': 5},
{'v': '1:2:A:T,C', 's': '3', 'GT': hl.Call([0, 1]), 'GQ': 10, 'DP': 5},
{'v': '1:2:A:T,C', 's': '4', 'GT': hl.Call([1, 1]), 'GQ': 10, 'DP': 5},
]
ht = hl.Table.parallelize(data, hl.dtype('struct{v: str, s: str, GT: call, GQ: int, DP: int}'))
ht = ht.transmute(**hl.parse_variant(ht.v))
mt = ht.to_matrix_table(['locus', 'alleles'], ['s'], partition_key=['locus'])
mt = hl.variant_qc(mt, 'vqc')
r = mt.rows().collect()
self.assertEqual(r[0].vqc.AF, [0.5, 0.5])
self.assertEqual(r[0].vqc.AC, [3, 3])
self.assertEqual(r[0].vqc.AN, 6)
self.assertEqual(r[0].vqc.homozygote_count, [1, 1])
self.assertEqual(r[0].vqc.n_called, 3)
self.assertEqual(r[0].vqc.n_not_called, 1)
self.assertEqual(r[0].vqc.call_rate, 0.75)
self.assertEqual(r[0].vqc.n_het, 1)
self.assertEqual(r[0].vqc.n_non_ref, 2)
self.assertEqual(r[0].vqc.r_expected_het_freq, 0.6)
self.assertEqual(r[0].vqc.p_hwe, 0.7)
self.assertEqual(r[0].vqc.dp_stats.min, 0)
self.assertEqual(r[0].vqc.dp_stats.max, 100)
self.assertEqual(r[0].vqc.dp_stats.mean, 51.25)
self.assertAlmostEqual(r[0].vqc.dp_stats.stdev, 48.782040752719645)
self.assertEqual(r[0].vqc.gq_stats.min, 10)
self.assertEqual(r[0].vqc.gq_stats.max, 11)
self.assertAlmostEqual(r[0].vqc.gq_stats.mean, 10.333333333333334)
self.assertAlmostEqual(r[0].vqc.gq_stats.stdev, 0.47140452079103168)
self.assertEqual(r[1].vqc.AF, [0.125, 0.5, 0.375])
self.assertEqual(r[1].vqc.AC, [1, 4, 3])
self.assertEqual(r[1].vqc.AN, 8)
self.assertEqual(r[1].vqc.homozygote_count, [0, 1, 1])
self.assertEqual(r[1].vqc.n_called, 4)
self.assertEqual(r[1].vqc.n_not_called, 0)
self.assertEqual(r[1].vqc.call_rate, 1.0)
self.assertEqual(r[1].vqc.n_het, 2)
self.assertEqual(r[1].vqc.n_non_ref, 4)
self.assertEqual(r[1].vqc.p_hwe, None)
self.assertEqual(r[1].vqc.r_expected_het_freq, None)
self.assertEqual(r[1].vqc.dp_stats.min, 5)
self.assertEqual(r[1].vqc.dp_stats.max, 5)
self.assertEqual(r[1].vqc.dp_stats.mean, 5)
self.assertEqual(r[1].vqc.dp_stats.stdev, 0.0)
self.assertEqual(r[1].vqc.gq_stats.min, 10)
self.assertEqual(r[1].vqc.gq_stats.max, 10)
self.assertEqual(r[1].vqc.gq_stats.mean, 10)
self.assertEqual(r[1].vqc.gq_stats.stdev, 0)
def test_grm(self):
tolerance = 0.001
def load_id_file(path):
ids = []
with hl.hadoop_open(path) as f:
for l in f:
r = l.strip().split('\t')
self.assertEqual(len(r), 2)
ids.append(r[1])
return ids
def load_rel(ns, path):
rel = np.zeros((ns, ns))
with hl.hadoop_open(path) as f:
for i, l in enumerate(f):
for j, n in enumerate(map(float, l.strip().split('\t'))):
rel[i, j] = n
self.assertEqual(j, i)
self.assertEqual(i, ns - 1)
return rel
def load_grm(ns, nv, path):
m = np.zeros((ns, ns))
with utils.hadoop_open(path) as f:
i = 0
for l in f:
row = l.strip().split('\t')
self.assertEqual(int(row[2]), nv)
m[int(row[0]) - 1, int(row[1]) - 1] = float(row[3])
i += 1
self.assertEqual(i, ns * (ns + 1) / 2)
return m
def load_bin(ns, path):
m = np.zeros((ns, ns))
with utils.hadoop_open(path, 'rb') as f:
for i in range(ns):
for j in range(i + 1):
b = f.read(4)
self.assertEqual(len(b), 4)
m[i, j] = unpack('<f', bytearray(b))[0]
left = f.read()
self.assertEqual(len(left), 0)
return m
b_file = utils.new_temp_file(prefix="plink")
rel_file = utils.new_temp_file(prefix="test", suffix="rel")
rel_id_file = utils.new_temp_file(prefix="test", suffix="rel.id")
grm_file = utils.new_temp_file(prefix="test", suffix="grm")
grm_bin_file = utils.new_temp_file(prefix="test", suffix="grm.bin")
grm_nbin_file = utils.new_temp_file(prefix="test", suffix="grm.N.bin")
dataset = self.get_dataset()
n_samples = dataset.count_cols()
dataset = dataset.annotate_rows(AC=agg.sum(dataset.GT.n_alt_alleles()),
n_called=agg.count_where(hl.is_defined(dataset.GT)))
dataset = dataset.filter_rows((dataset.AC > 0) & (dataset.AC < 2 * dataset.n_called))
dataset = dataset.filter_rows(dataset.n_called == n_samples).persist()
hl.export_plink(dataset, b_file, ind_id=dataset.s)
sample_ids = [row.s for row in dataset.cols().select().collect()]
n_variants = dataset.count_rows()
self.assertGreater(n_variants, 0)
grm = hl.genetic_relatedness_matrix(dataset.GT)
grm.export_id_file(rel_id_file)
############
### rel
p_file = utils.new_temp_file(prefix="plink")
syscall('''plink --bfile {} --make-rel --out {}'''
.format(utils.uri_path(b_file), utils.uri_path(p_file)), shell=True, stdout=DEVNULL, stderr=DEVNULL)
self.assertEqual(load_id_file(p_file + ".rel.id"), sample_ids)
grm.export_rel(rel_file)
self.assertEqual(load_id_file(rel_id_file), sample_ids)
self.assertTrue(np.allclose(load_rel(n_samples, p_file + ".rel"),
load_rel(n_samples, rel_file),
atol=tolerance))
############
### gcta-grm
p_file = utils.new_temp_file(prefix="plink")
syscall('''plink --bfile {} --make-grm-gz --out {}'''
.format(utils.uri_path(b_file), utils.uri_path(p_file)), shell=True, stdout=DEVNULL, stderr=DEVNULL)
self.assertEqual(load_id_file(p_file + ".grm.id"), sample_ids)
grm.export_gcta_grm(grm_file)
self.assertTrue(np.allclose(load_grm(n_samples, n_variants, p_file + ".grm.gz"),
load_grm(n_samples, n_variants, grm_file),
atol=tolerance))
############
### gcta-grm-bin
p_file = utils.new_temp_file(prefix="plink")
syscall('''plink --bfile {} --make-grm-bin --out {}'''
.format(utils.uri_path(b_file), utils.uri_path(p_file)), shell=True, stdout=DEVNULL, stderr=DEVNULL)
self.assertEqual(load_id_file(p_file + ".grm.id"), sample_ids)
grm.export_gcta_grm_bin(grm_bin_file, grm_nbin_file)
self.assertTrue(np.allclose(load_bin(n_samples, p_file + ".grm.bin"),
load_bin(n_samples, grm_bin_file),
atol=tolerance))
self.assertTrue(np.allclose(load_bin(n_samples, p_file + ".grm.N.bin"),
load_bin(n_samples, grm_nbin_file),
atol=tolerance))
def test_block_matrix_from_numpy(self):
ndarray = np.matrix([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], dtype=np.float64)
for block_size in [1, 2, 5, 1024]:
block_matrix = BlockMatrix.from_numpy(ndarray, block_size)
assert (block_matrix.n_rows == 3)
assert (block_matrix.n_cols == 5)
assert (block_matrix.to_numpy() == ndarray).all()
def test_rrm(self):
seed = 0
n1 = 100
m1 = 200
k = 3
fst = .9
dataset = hl.balding_nichols_model(k,
n1,
m1,
fst=(k * [fst]),
seed=seed,
n_partitions=4)
dataset = dataset.annotate_cols(s = hl.str(dataset.sample_idx)).key_cols_by('s')
def direct_calculation(ds):
ds = BlockMatrix.from_entry_expr(ds['GT'].n_alt_alleles()).to_numpy()
# filter out constant rows
isconst = lambda r: any([all([(gt < c + .01) and (gt > c - .01) for gt in r]) for c in range(3)])
ds = np.array([row for row in ds if not isconst(row)])
nvariants, nsamples = ds.shape
sumgt = lambda r: sum([i for i in r if i >= 0])
sumsq = lambda r: sum([i ** 2 for i in r if i >= 0])
mean = [sumgt(row) / nsamples for row in ds]
stddev = [sqrt(sumsq(row) / nsamples - mean[i] ** 2)
for i, row in enumerate(ds)]
mat = np.array([[(g - mean[i]) / stddev[i] for g in row] for i, row in enumerate(ds)])
rrm = (mat.T @ mat) / nvariants
return rrm
def hail_calculation(ds):
rrm = hl.realized_relationship_matrix(ds.GT)
fn = utils.new_temp_file(suffix='.tsv')
rrm.export_tsv(fn)
data = []
with open(utils.uri_path(fn)) as f:
f.readline()
for line in f:
row = line.strip().split()
data.append(list(map(float, row)))
return np.array(data)
manual = direct_calculation(dataset)
rrm = hail_calculation(dataset)
self.assertTrue(np.allclose(manual, rrm))
def test_hwe_normalized_pca(self):
mt = hl.balding_nichols_model(3, 100, 50)
eigenvalues, scores, loadings = hl.hwe_normalized_pca(mt.GT, k=2, compute_loadings=True)
self.assertEqual(len(eigenvalues), 2)
self.assertTrue(isinstance(scores, hl.Table))
self.assertEqual(scores.count(), 100)
self.assertTrue(isinstance(loadings, hl.Table))
_, _, loadings = hl.hwe_normalized_pca(mt.GT, k=2, compute_loadings=False)
self.assertEqual(loadings, None)
def test_pca_against_numpy(self):
mt = hl.import_vcf(resource('tiny_m.vcf'))
mt = mt.filter_rows(hl.len(mt.alleles) == 2)
mt = mt.annotate_rows(AC = hl.agg.sum(mt.GT.n_alt_alleles()),
n_called = hl.agg.count_where(hl.is_defined(mt.GT)))
mt = mt.filter_rows((mt.AC > 0) & (mt.AC < 2 * mt.n_called)).persist()
n_rows = mt.count_rows()
def make_expr(mean):
return hl.cond(hl.is_defined(mt.GT),
(mt.GT.n_alt_alleles() - mean) / hl.sqrt(mean * (2 - mean) * n_rows / 2),
0)
eigen, scores, loadings= hl.pca(hl.bind(make_expr, mt.AC / mt.n_called), k=3, compute_loadings=True)
hail_scores = scores.explode('scores').scores.collect()
hail_loadings = loadings.explode('loadings').loadings.collect()
self.assertEqual(len(eigen), 3)
self.assertEqual(scores.count(), mt.count_cols())
self.assertEqual(loadings.count(), n_rows)
# compute PCA with numpy
def normalize(a):
ms = np.mean(a, axis = 0, keepdims = True)
return np.divide(np.subtract(a, ms), np.sqrt(2.0*np.multiply(ms/2.0, 1-ms/2.0)*a.shape[1]))
g = np.pad(np.diag([1.0, 1, 2]), ((0, 1), (0, 0)), mode='constant')
g[1, 0] = 1.0 / 3
n = normalize(g)
U, s, V = np.linalg.svd(n, full_matrices=0)
np_scores = U.dot(np.diag(s)).flatten()
np_loadings = V.transpose().flatten()
np_eigenvalues = np.multiply(s,s).flatten()
def check(hail_array, np_array):
self.assertEqual(len(hail_array), len(np_array))
for i, (left, right) in enumerate(zip(hail_array, np_array)):
self.assertAlmostEqual(abs(left), abs(right),
msg=f'mismatch at index {i}: hl={left}, np={right}',
places=4)
check(eigen, np_eigenvalues)
check(hail_scores, np_scores)
check(hail_loadings, np_loadings)
def _R_pc_relate(self, mt, maf):
plink_file = utils.uri_path(utils.new_temp_file())
hl.export_plink(mt, plink_file, ind_id=hl.str(mt.col_key[0]))
utils.run_command(["Rscript",
resource("is/hail/methods/runPcRelate.R"),
plink_file,
str(maf)])
types = {
'ID1': hl.tstr,
'ID2': hl.tstr,
'nsnp': hl.tfloat64,
'kin': hl.tfloat64,
'k0': hl.tfloat64,
'k1': hl.tfloat64,
'k2': hl.tfloat64
}
plink_kin = hl.import_table(plink_file + '.out',
delimiter=' +',
types=types)
return plink_kin.select(i=hl.struct(sample_idx=plink_kin.ID1),
j=hl.struct(sample_idx=plink_kin.ID2),
kin=plink_kin.kin,
ibd0=plink_kin.k0,
ibd1=plink_kin.k1,
ibd2=plink_kin.k2).key_by('i', 'j')
def test_pc_relate_on_balding_nichols_against_R_pc_relate(self):
mt = hl.balding_nichols_model(3, 100, 1000)
mt = mt.key_cols_by(sample_idx=hl.str(mt.sample_idx))
hkin = hl.pc_relate(mt.GT, 0.00, k=2).cache()
rkin = self._R_pc_relate(mt, 0.00).cache()
self.assertTrue(rkin.select("kin")._same(hkin.select("kin"), tolerance=1e-3, absolute=True))
self.assertTrue(rkin.select("ibd0")._same(hkin.select("ibd0"), tolerance=1e-2, absolute=True))
self.assertTrue(rkin.select("ibd1")._same(hkin.select("ibd1"), tolerance=2e-2, absolute=True))
self.assertTrue(rkin.select("ibd2")._same(hkin.select("ibd2"), tolerance=1e-2, absolute=True))
def test_pcrelate_paths(self):
mt = hl.balding_nichols_model(3, 50, 100)
_, scores2, _ = hl.hwe_normalized_pca(mt.GT, k=2, compute_loadings=False)
_, scores3, _ = hl.hwe_normalized_pca(mt.GT, k=3, compute_loadings=False)
kin1 = hl.pc_relate(mt.GT, 0.10, k=2, statistics='kin', block_size=64)
kin_s1 = hl.pc_relate(mt.GT, 0.10, scores_expr=scores2[mt.col_key].scores,
statistics='kin', block_size=32)
kin2 = hl.pc_relate(mt.GT, 0.05, k=2, min_kinship=0.01, statistics='kin2', block_size=128).cache()
kin_s2 = hl.pc_relate(mt.GT, 0.05, scores_expr=scores2[mt.col_key].scores, min_kinship=0.01,
statistics='kin2', block_size=16)
kin3 = hl.pc_relate(mt.GT, 0.02, k=3, min_kinship=0.1, statistics='kin20', block_size=64).cache()
kin_s3 = hl.pc_relate(mt.GT, 0.02, scores_expr=scores3[mt.col_key].scores, min_kinship=0.1,
statistics='kin20', block_size=32)
kin4 = hl.pc_relate(mt.GT, 0.01, k=3, statistics='all', block_size=128)
kin_s4 = hl.pc_relate(mt.GT, 0.01, scores_expr=scores3[mt.col_key].scores, statistics='all', block_size=16)
self.assertTrue(kin1._same(kin_s1, tolerance=1e-4))
self.assertTrue(kin2._same(kin_s2, tolerance=1e-4))
self.assertTrue(kin3._same(kin_s3, tolerance=1e-4))
self.assertTrue(kin4._same(kin_s4, tolerance=1e-4))
self.assertTrue(kin1.count() == 50 * 49 / 2)
self.assertTrue(kin2.count() > 0)
self.assertTrue(kin2.filter(kin2.kin < 0.01).count() == 0)
self.assertTrue(kin3.count() > 0)
self.assertTrue(kin3.filter(kin3.kin < 0.1).count() == 0)
def test_rename_duplicates(self):
dataset = self.get_dataset() # FIXME - want to rename samples with same id
renamed_ids = hl.rename_duplicates(dataset).cols().select().collect()
self.assertTrue(len(set(renamed_ids)), len(renamed_ids))
def test_split_multi_hts(self):
ds1 = hl.import_vcf(resource('split_test.vcf'))
ds1 = hl.split_multi_hts(ds1)
ds2 = hl.import_vcf(resource('split_test_b.vcf'))
df = ds1.rows()
self.assertTrue(df.all((df.locus.position == 1180) | df.was_split))
ds1 = ds1.drop('was_split', 'a_index')
self.assertTrue(ds1._same(ds2))
def test_mendel_errors(self):
mt = hl.import_vcf(resource('mendel.vcf'))
ped = hl.Pedigree.read(resource('mendel.fam'))
men, fam, ind, var = hl.mendel_errors(mt['GT'], ped)
self.assertEqual(men.key.dtype, hl.tstruct(locus=mt.locus.dtype,
alleles=hl.tarray(hl.tstr),
s=hl.tstr))
self.assertEqual(men.row.dtype, hl.tstruct(locus=mt.locus.dtype,
alleles=hl.tarray(hl.tstr),
s=hl.tstr,
fam_id=hl.tstr,
mendel_code=hl.tint))
self.assertEqual(fam.key.dtype, hl.tstruct(pat_id=hl.tstr,
mat_id=hl.tstr))
self.assertEqual(fam.row.dtype, hl.tstruct(pat_id=hl.tstr,
mat_id=hl.tstr,
fam_id=hl.tstr,
children=hl.tint,
errors=hl.tint64,
snp_errors=hl.tint64))
self.assertEqual(ind.key.dtype, hl.tstruct(s=hl.tstr))
self.assertEqual(ind.row.dtype, hl.tstruct(s=hl.tstr,
fam_id=hl.tstr,
errors=hl.tint64,
snp_errors=hl.tint64))
self.assertEqual(var.key.dtype, hl.tstruct(locus=mt.locus.dtype,
alleles=hl.tarray(hl.tstr)))
self.assertEqual(var.row.dtype, hl.tstruct(locus=mt.locus.dtype,
alleles=hl.tarray(hl.tstr),
errors=hl.tint64))
self.assertEqual(men.count(), 41)
self.assertEqual(fam.count(), 2)
self.assertEqual(ind.count(), 7)
self.assertEqual(var.count(), mt.count_rows())
self.assertEqual(set(fam.select('errors', 'snp_errors').collect()),
{
hl.utils.Struct(pat_id='Dad1', mat_id='Mom1', errors=41, snp_errors=39),
hl.utils.Struct(pat_id='Dad2', mat_id='Mom2', errors=0, snp_errors=0)
})
self.assertEqual(set(ind.select('errors', 'snp_errors').collect()),
{
hl.utils.Struct(s='Son1', errors=23, snp_errors=22),
hl.utils.Struct(s='Dtr1', errors=18, snp_errors=17),
hl.utils.Struct(s='Dad1', errors=19, snp_errors=18),
hl.utils.Struct(s='Mom1', errors=22, snp_errors=21),
hl.utils.Struct(s='Dad2', errors=0, snp_errors=0),
hl.utils.Struct(s='Mom2', errors=0, snp_errors=0),
hl.utils.Struct(s='Son2', errors=0, snp_errors=0)
})
to_keep = hl.set([
(hl.Locus("1", 1), ['C', 'CT']),
(hl.Locus("1", 2), ['C', 'T']),
(hl.Locus("X", 1), ['C', 'T']),
(hl.Locus("X", 3), ['C', 'T']),
(hl.Locus("Y", 1), ['C', 'T']),
(hl.Locus("Y", 3), ['C', 'T'])
])
self.assertEqual(var.filter(to_keep.contains((var.locus, var.alleles)))
.order_by('locus')
.select('errors').collect(),
[
hl.utils.Struct(locus=hl.Locus("1", 1), alleles=['C', 'CT'], errors=2),
hl.utils.Struct(locus=hl.Locus("1", 2), alleles=['C', 'T'], errors=1),
hl.utils.Struct(locus=hl.Locus("X", 1), alleles=['C', 'T'], errors=2),
hl.utils.Struct(locus=hl.Locus("X", 3), alleles=['C', 'T'], errors=1),
hl.utils.Struct(locus=hl.Locus("Y", 1), alleles=['C', 'T'], errors=1),
hl.utils.Struct(locus=hl.Locus("Y", 3), alleles=['C', 'T'], errors=1),
])
ped2 = hl.Pedigree.read(resource('mendelWithMissingSex.fam'))
men2, _, _, _ = hl.mendel_errors(mt['GT'], ped2)
self.assertTrue(men2.filter(men2.s == 'Dtr1')._same(men.filter(men.s == 'Dtr1')))
def test_export_vcf(self):
dataset = hl.import_vcf(resource('sample.vcf.bgz'))
vcf_metadata = hl.get_vcf_metadata(resource('sample.vcf.bgz'))
hl.export_vcf(dataset, '/tmp/sample.vcf', metadata=vcf_metadata)
dataset_imported = hl.import_vcf('/tmp/sample.vcf')
self.assertTrue(dataset._same(dataset_imported))
metadata_imported = hl.get_vcf_metadata('/tmp/sample.vcf')
self.assertDictEqual(vcf_metadata, metadata_imported)
def test_concordance(self):
dataset = self.get_dataset()
glob_conc, cols_conc, rows_conc = hl.concordance(dataset, dataset)
self.assertEqual(sum([sum(glob_conc[i]) for i in range(5)]), dataset.count_rows() * dataset.count_cols())
counts = dataset.aggregate_entries(hl.Struct(n_het=agg.count(agg.filter(dataset.GT.is_het(), dataset.GT)),
n_hom_ref=agg.count(agg.filter(dataset.GT.is_hom_ref(), dataset.GT)),
n_hom_var=agg.count(agg.filter(dataset.GT.is_hom_var(), dataset.GT)),
nNoCall=agg.count(
agg.filter(hl.is_missing(dataset.GT), dataset.GT))))
self.assertEqual(glob_conc[0][0], 0)
self.assertEqual(glob_conc[1][1], counts.nNoCall)
self.assertEqual(glob_conc[2][2], counts.n_hom_ref)
self.assertEqual(glob_conc[3][3], counts.n_het)
self.assertEqual(glob_conc[4][4], counts.n_hom_var)
[self.assertEqual(glob_conc[i][j], 0) for i in range(5) for j in range(5) if i != j]
self.assertTrue(cols_conc.all(hl.sum(hl.flatten(cols_conc.concordance)) == dataset.count_rows()))
self.assertTrue(rows_conc.all(hl.sum(hl.flatten(rows_conc.concordance)) == dataset.count_cols()))
cols_conc.write('/tmp/foo.kt', overwrite=True)
rows_conc.write('/tmp/foo.kt', overwrite=True)
def test_import_table_force_bgz(self):
f = utils.new_temp_file(suffix=".bgz")
t = utils.range_table(10, 5)
t.export(f)
f2 = utils.new_temp_file(suffix=".gz")
utils.run_command(["cp", utils.uri_path(f), utils.uri_path(f2)])
t2 = hl.import_table(f2, force_bgz=True, impute=True).key_by('idx')
self.assertTrue(t._same(t2))
def test_import_locus_intervals(self):
interval_file = resource('annotinterall.interval_list')
t = hl.import_locus_intervals(interval_file, reference_genome='GRCh37')
nint = t.count()
i = 0
with open(interval_file) as f:
for line in f:
if len(line.strip()) != 0:
i += 1
self.assertEqual(nint, i)
self.assertEqual(t.interval.dtype.point_type, hl.tlocus('GRCh37'))
tmp_file = utils.new_temp_file(prefix="test", suffix="interval_list")
start = t.interval.start
end = t.interval.end
(t
.key_by(interval=hl.locus_interval(start.contig, start.position, end.position, True, True))
.select()
.export(tmp_file, header=False))
t2 = hl.import_locus_intervals(tmp_file)
self.assertTrue(t.select()._same(t2))
def test_import_locus_intervals_no_reference_specified(self):
interval_file = resource('annotinterall.interval_list')
t = hl.import_locus_intervals(interval_file, reference_genome=None)
self.assertTrue(t.count() == 2)
self.assertEqual(t.interval.dtype.point_type, hl.tstruct(contig=hl.tstr, position=hl.tint32))
def test_import_locus_intervals_badly_defined_intervals(self):
interval_file = resource('example3.interval_list')
t = hl.import_locus_intervals(interval_file, reference_genome='GRCh37', skip_invalid_intervals=True)
self.assertTrue(t.count() == 21)
t = hl.import_locus_intervals(interval_file, reference_genome=None, skip_invalid_intervals=True)
self.assertTrue(t.count() == 22)
def test_import_bed(self):
bed_file = resource('example1.bed')
bed = hl.import_bed(bed_file, reference_genome='GRCh37')
nbed = bed.count()
i = 0
with open(bed_file) as f:
for line in f:
if len(line.strip()) != 0:
try:
int(line.split()[0])
i += 1
except:
pass
self.assertEqual(nbed, i)
self.assertEqual(bed.interval.dtype.point_type, hl.tlocus('GRCh37'))
bed_file = resource('example2.bed')
t = hl.import_bed(bed_file, reference_genome='GRCh37')
self.assertEqual(t.interval.dtype.point_type, hl.tlocus('GRCh37'))
self.assertTrue(list(t.key.dtype) == ['interval'])
self.assertTrue(list(t.row.dtype) == ['interval','target'])
def test_import_bed_no_reference_specified(self):
bed_file = resource('example1.bed')
t = hl.import_bed(bed_file, reference_genome=None)
self.assertTrue(t.count() == 3)
self.assertEqual(t.interval.dtype.point_type, hl.tstruct(contig=hl.tstr, position=hl.tint32))
def test_import_bed_badly_defined_intervals(self):
bed_file = resource('example4.bed')
t = hl.import_bed(bed_file, reference_genome='GRCh37', skip_invalid_intervals=True)
self.assertTrue(t.count() == 3)
t = hl.import_bed(bed_file, reference_genome=None, skip_invalid_intervals=True)
self.assertTrue(t.count() == 4)
def test_annotate_intervals(self):
ds = self.get_dataset()
bed1 = hl.import_bed(resource('example1.bed'), reference_genome='GRCh37')
bed2 = hl.import_bed(resource('example2.bed'), reference_genome='GRCh37')
bed3 = hl.import_bed(resource('example3.bed'), reference_genome='GRCh37')
self.assertTrue(list(bed2.key.dtype) == ['interval'])
self.assertTrue(list(bed2.row.dtype) == ['interval','target'])
interval_list1 = hl.import_locus_intervals(resource('exampleAnnotation1.interval_list'))
interval_list2 = hl.import_locus_intervals(resource('exampleAnnotation2.interval_list'))
self.assertTrue(list(interval_list2.key.dtype) == ['interval'])
self.assertTrue(list(interval_list2.row.dtype) == ['interval', 'target'])
ann = ds.annotate_rows(in_interval = bed1[ds.locus]).rows()
self.assertTrue(ann.all((ann.locus.position <= 14000000) |
(ann.locus.position >= 17000000) |
(hl.is_missing(ann.in_interval))))
for bed in [bed2, bed3]:
ann = ds.annotate_rows(target = bed[ds.locus].target).rows()
expr = (hl.case()
.when(ann.locus.position <= 14000000, ann.target == 'gene1')
.when(ann.locus.position >= 17000000, ann.target == 'gene2')
.default(ann.target == hl.null(hl.tstr)))
self.assertTrue(ann.all(expr))
self.assertTrue(ds.annotate_rows(in_interval = interval_list1[ds.locus]).rows()
._same(ds.annotate_rows(in_interval = bed1[ds.locus]).rows()))
self.assertTrue(ds.annotate_rows(target = interval_list2[ds.locus].target).rows()
._same(ds.annotate_rows(target = bed2[ds.locus].target).rows()))
def test_import_fam(self):
fam_file = resource('sample.fam')
nfam = hl.import_fam(fam_file).count()
i = 0
with open(fam_file) as f:
for line in f:
if len(line.strip()) != 0:
i += 1
self.assertEqual(nfam, i)
def test_export_plink(self):
vcf_file = resource('sample.vcf')
mt = hl.split_multi_hts(hl.import_vcf(vcf_file, min_partitions=10))
split_vcf_file = utils.uri_path(utils.new_temp_file())
hl_output = utils.uri_path(utils.new_temp_file())
plink_output = utils.uri_path(utils.new_temp_file())
merge_output = utils.uri_path(utils.new_temp_file())
hl.export_vcf(mt, split_vcf_file)
hl.export_plink(mt, hl_output)
utils.run_command(["plink", "--vcf", split_vcf_file,
"--make-bed", "--out", plink_output,
"--const-fid", "--keep-allele-order"])
data = []
with open(utils.uri_path(plink_output + ".bim")) as file:
for line in file:
row = line.strip().split()
row[1] = ":".join([row[0], row[3], row[5], row[4]])
data.append("\t".join(row) + "\n")
with open(plink_output + ".bim", 'w') as f:
f.writelines(data)
utils.run_command(["plink", "--bfile", plink_output,
"--bmerge", hl_output, "--merge-mode",
"6", "--out", merge_output])
same = True
with open(merge_output + ".diff") as f:
for line in f:
row = line.strip().split()
if row != ["SNP", "FID", "IID", "NEW", "OLD"]:
same = False
break
self.assertTrue(same)
def test_export_plink_exprs(self):
ds = self.get_dataset()
fam_mapping = {'f0': 'fam_id', 'f1': 'ind_id', 'f2': 'pat_id', 'f3': 'mat_id',
'f4': 'is_female', 'f5': 'pheno'}
bim_mapping = {'f0': 'contig', 'f1': 'varid', 'f2': 'cm_position',
'f3': 'position', 'f4': 'a1', 'f5': 'a2'}
# Test default arguments
out1 = utils.new_temp_file()
hl.export_plink(ds, out1)
fam1 = (hl.import_table(out1 + '.fam', no_header=True, impute=False, missing="")
.rename(fam_mapping))
bim1 = (hl.import_table(out1 + '.bim', no_header=True, impute=False)
.rename(bim_mapping))
self.assertTrue(fam1.all((fam1.fam_id == "0") & (fam1.pat_id == "0") &
(fam1.mat_id == "0") & (fam1.is_female == "0") &
(fam1.pheno == "NA")))
self.assertTrue(bim1.all((bim1.varid == bim1.contig + ":" + bim1.position + ":" + bim1.a2 + ":" + bim1.a1) &
(bim1.cm_position == "0.0")))
# Test non-default FAM arguments
out2 = utils.new_temp_file()
hl.export_plink(ds, out2, ind_id=ds.s, fam_id=ds.s, pat_id="nope",
mat_id="nada", is_female=True, pheno=False)
fam2 = (hl.import_table(out2 + '.fam', no_header=True, impute=False, missing="")
.rename(fam_mapping))
self.assertTrue(fam2.all((fam2.fam_id == fam2.ind_id) & (fam2.pat_id == "nope") &
(fam2.mat_id == "nada") & (fam2.is_female == "2") &
(fam2.pheno == "1")))
# Test quantitative phenotype
out3 = utils.new_temp_file()
hl.export_plink(ds, out3, ind_id=ds.s, pheno=hl.float64(hl.len(ds.s)))
fam3 = (hl.import_table(out3 + '.fam', no_header=True, impute=False, missing="")
.rename(fam_mapping))
self.assertTrue(fam3.all((fam3.fam_id == "0") & (fam3.pat_id == "0") &
(fam3.mat_id == "0") & (fam3.is_female == "0") &
(fam3.pheno != "0") & (fam3.pheno != "NA")))
# Test non-default BIM arguments
out4 = utils.new_temp_file()
hl.export_plink(ds, out4, varid="hello", cm_position=100)
bim4 = (hl.import_table(out4 + '.bim', no_header=True, impute=False)
.rename(bim_mapping))
self.assertTrue(bim4.all((bim4.varid == "hello") & (bim4.cm_position == "100.0")))
# Test call expr
out5 = utils.new_temp_file()
ds_call = ds.annotate_entries(gt_fake=hl.call(0, 0))
hl.export_plink(ds_call, out5, call=ds_call.gt_fake)
ds_all_hom_ref = hl.import_plink(out5 + '.bed', out5 + '.bim', out5 + '.fam')
nerrors = ds_all_hom_ref.aggregate_entries(agg.count_where(~ds_all_hom_ref.GT.is_hom_ref()))
self.assertTrue(nerrors == 0)
# Test white-space in FAM id expr raises error
with self.assertRaisesRegex(TypeError, "has spaces in the following values:"):
hl.export_plink(ds, utils.new_temp_file(), mat_id="hello world")
# Test white-space in varid expr raises error
with self.assertRaisesRegex(utils.FatalError, "no white space allowed:"):
hl.export_plink(ds, utils.new_temp_file(), varid="hello world")
def test_export_gen(self):
gen = hl.import_gen(resource('example.gen'),
sample_file=resource('example.sample'),
contig_recoding={"01": "1"},
reference_genome='GRCh37',
min_partitions=3)
file = '/tmp/test_export_gen'
hl.export_gen(gen, file)
gen2 = hl.import_gen(file + '.gen',
sample_file=file + '.sample',
reference_genome='GRCh37',
min_partitions=3)
self.assertTrue(gen._same(gen2, tolerance=3E-4, absolute=True))
def test_export_gen_exprs(self):
gen = hl.import_gen(resource('example.gen'),
sample_file=resource('example.sample'),
contig_recoding={"01": "1"},
reference_genome='GRCh37',
min_partitions=3).add_col_index().add_row_index()
out1 = utils.new_temp_file()
hl.export_gen(gen, out1, id1=hl.str(gen.col_idx), id2=hl.str(gen.col_idx), missing=0.5,
varid=hl.str(gen.row_idx), rsid=hl.str(gen.row_idx), gp=[0.0, 1.0, 0.0])
in1 = (hl.import_gen(out1 + '.gen', sample_file=out1 + '.sample', min_partitions=3)
.add_col_index()
.add_row_index())
self.assertTrue(in1.aggregate_entries(agg.fraction(in1.GP == [0.0, 1.0, 0.0])) == 1.0)
self.assertTrue(in1.aggregate_rows(agg.fraction((in1.varid == hl.str(in1.row_idx)) &
(in1.rsid == hl.str(in1.row_idx)))) == 1.0)
self.assertTrue(in1.aggregate_cols(agg.fraction((in1.s == hl.str(in1.col_idx)))))
def test_tdt(self):
pedigree = hl.Pedigree.read(resource('tdt.fam'))
tdt_tab = (hl.transmission_disequilibrium_test(
hl.split_multi_hts(hl.import_vcf(resource('tdt.vcf'), min_partitions=4)),
pedigree))
truth = hl.import_table(
resource('tdt_results.tsv'),
types={'POSITION': hl.tint32, 'T': hl.tint32, 'U': hl.tint32,
'Chi2': hl.tfloat64, 'Pval': hl.tfloat64})
truth = (truth
.transmute(locus=hl.locus(truth.CHROM, truth.POSITION),
alleles=[truth.REF, truth.ALT])
.key_by('locus', 'alleles'))
if tdt_tab.count() != truth.count():
self.fail('Result has {} rows but should have {} rows'.format(tdt_tab.count(), truth.count()))
bad = (tdt_tab.filter(hl.is_nan(tdt_tab.p_value), keep=False)
.join(truth.filter(hl.is_nan(truth.Pval), keep=False), how='outer'))
bad.describe()
bad = bad.filter(~(
(bad.t == bad.T) &
(bad.u == bad.U) &
(hl.abs(bad.chi2 - bad.Chi2) < 0.001) &
(hl.abs(bad.p_value - bad.Pval) < 0.001)))
if bad.count() != 0:
bad.order_by(hl.asc(bad.v)).show()
self.fail('Found rows in violation of the predicate (see show output)')
def test_maximal_independent_set(self):
# prefer to remove nodes with higher index
t = hl.utils.range_table(10)
graph = t.select(i=hl.int64(t.idx), j=hl.int64(t.idx + 10), bad_type=hl.float32(t.idx))
mis_table = hl.maximal_independent_set(graph.i, graph.j, True, lambda l, r: l - r)
mis = [row['node'] for row in mis_table.collect()]
self.assertEqual(sorted(mis), list(range(0, 10)))
self.assertEqual(mis_table.row.dtype, hl.tstruct(node=hl.tint64))
self.assertEqual(mis_table.key.dtype, hl.tstruct(node=hl.tint64))
self.assertRaises(ValueError, lambda: hl.maximal_independent_set(graph.i, graph.bad_type, True))
self.assertRaises(ValueError, lambda: hl.maximal_independent_set(graph.i, hl.utils.range_table(10).idx, True))
self.assertRaises(ValueError, lambda: hl.maximal_independent_set(hl.literal(1), hl.literal(2), True))
def test_maximal_independent_set2(self):
edges = [(0, 4), (0, 1), (0, 2), (1, 5), (1, 3), (2, 3), (2, 6),
(3, 7), (4, 5), (4, 6), (5, 7), (6, 7)]
edges = [{"i": l, "j": r} for l, r in edges]
t = hl.Table.parallelize(edges, hl.tstruct(i=hl.tint64, j=hl.tint64))
mis_t = hl.maximal_independent_set(t.i, t.j)
self.assertTrue(mis_t.row.dtype == hl.tstruct(node=hl.tint64) and
mis_t.globals.dtype == hl.tstruct())
mis = set([row.node for row in mis_t.collect()])
maximal_indep_sets = [{0, 6, 5, 3}, {1, 4, 7, 2}]
non_maximal_indep_sets = [{0, 7}, {6, 1}]
self.assertTrue(mis in non_maximal_indep_sets or mis in maximal_indep_sets)
def test_maximal_independent_set3(self):
is_case = {"A", "C", "E", "G", "H"}
edges = [("A", "B"), ("C", "D"), ("E", "F"), ("G", "H")]
edges = [{"i": {"id": l, "is_case": l in is_case},
"j": {"id": r, "is_case": r in is_case}} for l, r in edges]
t = hl.Table.parallelize(edges, hl.tstruct(i=hl.tstruct(id=hl.tstr, is_case=hl.tbool),
j=hl.tstruct(id=hl.tstr, is_case=hl.tbool)))
tiebreaker = lambda l, r: (hl.case()
.when(l.is_case & (~r.is_case), -1)
.when(~(l.is_case) & r.is_case, 1)
.default(0))
mis = hl.maximal_independent_set(t.i, t.j, tie_breaker=tiebreaker)
expected_sets = [{"A", "C", "E", "G"}, {"A", "C", "E", "H"}]
self.assertTrue(mis.all(mis.node.is_case))
self.assertTrue(set([row.id for row in mis.select(mis.node.id).collect()]) in expected_sets)
def test_filter_alleles(self):
# poor man's Gen
paths = [resource('sample.vcf'),
resource('multipleChromosomes.vcf'),
resource('sample2.vcf')]
for path in paths:
ds = hl.import_vcf(path)
self.assertEqual(
hl.filter_alleles(ds, lambda a, i: False).count_rows(), 0)
self.assertEqual(hl.filter_alleles(ds, lambda a, i: True).count_rows(), ds.count_rows())
def test_filter_alleles_hts(self):
# 1 variant: A:T,G
ds = hl.import_vcf(resource('filter_alleles/input.vcf'))
self.assertTrue(
hl.filter_alleles_hts(ds, lambda a, i: a == 'T', subset=True)
.drop('old_alleles', 'old_locus', 'new_to_old', 'old_to_new')
._same(hl.import_vcf(resource('filter_alleles/keep_allele1_subset.vcf'))))
self.assertTrue(
hl.filter_alleles_hts(ds, lambda a, i: a == 'G', subset=True)
.drop('old_alleles', 'old_locus', 'new_to_old', 'old_to_new')
._same(hl.import_vcf(resource('filter_alleles/keep_allele2_subset.vcf')))
)
self.assertTrue(
hl.filter_alleles_hts(ds, lambda a, i: a != 'G', subset=False)
.drop('old_alleles', 'old_locus', 'new_to_old', 'old_to_new')
._same(hl.import_vcf(resource('filter_alleles/keep_allele1_downcode.vcf')))
)
(hl.filter_alleles_hts(ds, lambda a, i: a == 'G', subset=False)).old_to_new.show()
self.assertTrue(
hl.filter_alleles_hts(ds, lambda a, i: a == 'G', subset=False)
.drop('old_alleles', 'old_locus', 'new_to_old', 'old_to_new')
._same(hl.import_vcf(resource('filter_alleles/keep_allele2_downcode.vcf')))
)
def test_ld_prune(self):
ds = hl.split_multi_hts(hl.import_vcf(resource('sample.vcf')))
pruned_table = hl.ld_prune(ds.GT, r2=0.2, bp_window_size=1000000)
filtered_ds = (ds.filter_rows(hl.is_defined(pruned_table[(ds.locus, ds.alleles)])))
filtered_ds = filtered_ds.annotate_rows(stats=agg.stats(filtered_ds.GT.n_alt_alleles()))
filtered_ds = filtered_ds.annotate_rows(
mean=filtered_ds.stats.mean, sd_reciprocal=1 / filtered_ds.stats.stdev)
n_samples = filtered_ds.count_cols()
normalized_mean_imputed_genotype_expr = (
hl.cond(hl.is_defined(filtered_ds['GT']),
(filtered_ds['GT'].n_alt_alleles() - filtered_ds['mean'])
* filtered_ds['sd_reciprocal'] * (1 / hl.sqrt(n_samples)), 0))
block_matrix = BlockMatrix.from_entry_expr(normalized_mean_imputed_genotype_expr)
entries = ((block_matrix @ block_matrix.T) ** 2).entries()
index_table = filtered_ds.add_row_index().rows().key_by('row_idx').select('locus')
entries = entries.annotate(locus_i=index_table[entries.i].locus, locus_j=index_table[entries.j].locus)
contig_filter = entries.locus_i.contig == entries.locus_j.contig
window_filter = (hl.abs(entries.locus_i.position - entries.locus_j.position)) <= 1000000
identical_filter = entries.i != entries.j
self.assertEqual(entries.filter(
(entries['entry'] >= 0.2) & (contig_filter) & (window_filter) & (identical_filter)).count(), 0)
def test_ld_prune_inputs(self):
ds = hl.split_multi_hts(hl.import_vcf(resource('sample.vcf')))
self.assertRaises(ValueError, lambda: hl.ld_prune(ds.GT, r2=0.2, bp_window_size=1000000, memory_per_core=0))
def test_ld_prune_no_prune(self):
ds = hl.balding_nichols_model(n_populations=1, n_samples=10, n_variants=100)
pruned_table = hl.ld_prune(ds.GT, r2=0.1, bp_window_size=0)
expected_count = ds.filter_rows(agg.collect_as_set(ds.GT).size() > 1, keep=True).count_rows()
self.assertEqual(pruned_table.count(), expected_count)
def test_ld_prune_identical_variants(self):
ds = hl.import_vcf(resource('ldprune2.vcf'), min_partitions=2)
pruned_table = hl.ld_prune(ds.GT)
self.assertEqual(pruned_table.count(), 1)
def test_ld_prune_maf(self):
ds = hl.balding_nichols_model(n_populations=1, n_samples=50, n_variants=10, n_partitions=10).cache()
ht = ds.select_rows(p=hl.agg.sum(ds.GT.n_alt_alleles()) / (2 * 50)).rows()
ht = ht.select(maf=hl.cond(ht.p <= 0.5, ht.p, 1.0 - ht.p)).cache()
pruned_table = hl.ld_prune(ds.GT, 0.0)
positions = pruned_table.locus.position.collect()
self.assertEqual(len(positions), 1)
kept_position = hl.literal(positions[0])
kept_maf = ht.filter(ht.locus.position == kept_position).maf.collect()[0]
self.assertEqual(kept_maf, max(ht.maf.collect()))
def test_ld_prune_call_expression(self):
ds = hl.import_vcf(resource("ldprune2.vcf"), min_partitions=2)
ds = ds.select_entries(foo=ds.GT)
pruned_table = hl.ld_prune(ds.foo)
self.assertEqual(pruned_table.count(), 1)
def test_entries(self):
n_rows, n_cols = 5, 3
rows = [{'i': i, 'j': j, 'entry': float(i + j)} for i in range(n_rows) for j in range(n_cols)]
schema = hl.tstruct(i=hl.tint32, j=hl.tint32, entry=hl.tfloat64)
table = hl.Table.parallelize([hl.struct(i=row['i'], j=row['j'], entry=row['entry']) for row in rows], schema)
table = table.annotate(i=hl.int64(table.i),
j=hl.int64(table.j)).key_by('i', 'j')
ndarray = np.reshape(list(map(lambda row: row['entry'], rows)), (n_rows, n_cols))
for block_size in [1, 2, 1024]:
block_matrix = BlockMatrix.from_numpy(ndarray, block_size)
entries_table = block_matrix.entries()
self.assertEqual(entries_table.count(), n_cols * n_rows)
self.assertEqual(len(entries_table.row), 3)
self.assertTrue(table._same(entries_table))
def test_filter_intervals(self):
ds = hl.import_vcf(resource('sample.vcf'), min_partitions=20)
self.assertEqual(
hl.filter_intervals(ds, [hl.parse_locus_interval('20:10639222-10644705')]).count_rows(), 3)
intervals = [hl.parse_locus_interval('20:10639222-10644700'),
hl.parse_locus_interval('20:10644700-10644705')]
self.assertEqual(hl.filter_intervals(ds, intervals).count_rows(), 3)
intervals = hl.array([hl.parse_locus_interval('20:10639222-10644700'),
hl.parse_locus_interval('20:10644700-10644705')])
self.assertEqual(hl.filter_intervals(ds, intervals).count_rows(), 3)
intervals = hl.array([hl.parse_locus_interval('20:10639222-10644700').value,
hl.parse_locus_interval('20:10644700-10644705')])
self.assertEqual(hl.filter_intervals(ds, intervals).count_rows(), 3)
intervals = [hl.parse_locus_interval('[20:10019093-10026348]').value,
hl.parse_locus_interval('[20:17705793-17716416]').value]
self.assertEqual(hl.filter_intervals(ds, intervals).count_rows(), 4)
def test_filter_intervals_compound_partition_key(self):
ds = hl.import_vcf(resource('sample.vcf'), min_partitions=20)
ds = (ds.annotate_rows(variant=hl.struct(locus=ds.locus, alleles=ds.alleles))
.key_rows_by('locus', 'alleles'))
intervals = [hl.Interval(hl.Struct(locus=hl.Locus('20', 10639222), alleles=['A', 'T']),
hl.Struct(locus=hl.Locus('20', 10644700), alleles=['A', 'T']))]
self.assertEqual(hl.filter_intervals(ds, intervals).count_rows(), 3)
def test_balding_nichols_model(self):
from hail.stats import TruncatedBetaDist
ds = hl.balding_nichols_model(2, 20, 25, 3,
pop_dist=[1.0, 2.0],
fst=[.02, .06],
af_dist=TruncatedBetaDist(a=0.01, b=2.0, min=0.05, max=0.95),
seed=1)
self.assertEqual(ds.count_cols(), 20)
self.assertEqual(ds.count_rows(), 25)
self.assertEqual(ds.n_partitions(), 3)
glob = ds.globals
self.assertEqual(glob.n_populations.value, 2)
self.assertEqual(glob.n_samples.value, 20)
self.assertEqual(glob.n_variants.value, 25)
self.assertEqual(glob.pop_dist.value, [1, 2])
self.assertEqual(glob.fst.value, [.02, .06])
self.assertEqual(glob.seed.value, 1)
self.assertEqual(glob.ancestral_af_dist.value,
hl.Struct(type='TruncatedBetaDist', a=0.01, b=2.0, min=0.05, max=0.95))
def test_skat(self):
ds2 = hl.import_vcf(resource('sample2.vcf'))
covariates = (hl.import_table(resource("skat.cov"), impute=True)
.key_by("Sample"))
phenotypes = (hl.import_table(resource("skat.pheno"),
types={"Pheno": hl.tfloat64},
missing="0")
.key_by("Sample"))
intervals = (hl.import_locus_intervals(resource("skat.interval_list")))
weights = (hl.import_table(resource("skat.weights"),
types={"locus": hl.tlocus(),
"weight": hl.tfloat64})
.key_by("locus"))
ds = hl.split_multi_hts(ds2)
ds = ds.annotate_rows(gene=intervals[ds.locus],
weight=weights[ds.locus].weight)
ds = ds.annotate_cols(pheno=phenotypes[ds.s].Pheno,
cov=covariates[ds.s])
ds = ds.annotate_cols(pheno=hl.cond(ds.pheno == 1.0,
False,
hl.cond(ds.pheno == 2.0,
True,
hl.null(hl.tbool))))
hl.skat(key_expr=ds.gene,
weight_expr=ds.weight,
y=ds.pheno,
x=ds.GT.n_alt_alleles(),
covariates=[ds.cov.Cov1, ds.cov.Cov2],
logistic=False).count()
hl.skat(key_expr=ds.gene,
weight_expr=ds.weight,
y=ds.pheno,
x=hl.pl_dosage(ds.PL),
covariates=[ds.cov.Cov1, ds.cov.Cov2],
logistic=True).count()
def test_import_gen(self):
gen = hl.import_gen(resource('example.gen'),
sample_file=resource('example.sample'),
contig_recoding={"01": "1"},
reference_genome = 'GRCh37').rows()
self.assertTrue(gen.all(gen.locus.contig == "1"))
self.assertEqual(gen.count(), 199)
self.assertEqual(gen.locus.dtype, hl.tlocus('GRCh37'))
def test_import_gen_no_reference_specified(self):
gen = hl.import_gen(resource('example.gen'),
sample_file=resource('example.sample'),
reference_genome=None)
self.assertTrue(gen.locus.dtype == hl.tstruct(contig=hl.tstr, position=hl.tint32))
self.assertEqual(gen.count_rows(), 199)
def test_import_bgen(self):
hl.index_bgen(resource('example.v11.bgen'))
bgen_rows = hl.import_bgen(resource('example.v11.bgen'),
entry_fields=['GT', 'GP'],
sample_file=resource('example.sample'),
contig_recoding={'01': '1'},
reference_genome='GRCh37').rows()
self.assertTrue(bgen_rows.all(bgen_rows.locus.contig == '1'))
self.assertEqual(bgen_rows.count(), 199)
hl.index_bgen(resource('example.8bits.bgen'))
bgen = hl.import_bgen(resource('example.8bits.bgen'),
entry_fields=['dosage'],
contig_recoding={'01': '1'},
reference_genome='GRCh37')
self.assertEqual(bgen.entry.dtype, hl.tstruct(dosage=hl.tfloat64))
bgen = hl.import_bgen(resource('example.8bits.bgen'),
entry_fields=['GT', 'GP'],
sample_file=resource('example.sample'),
contig_recoding={'01': '1'},
reference_genome='GRCh37')
self.assertEqual(bgen.entry.dtype, hl.tstruct(GT=hl.tcall, GP=hl.tarray(hl.tfloat64)))
self.assertEqual(bgen.count_rows(), 199)
hl.index_bgen(resource('example.10bits.bgen'))
bgen = hl.import_bgen(resource('example.10bits.bgen'),
entry_fields=['GT', 'GP', 'dosage'],
contig_recoding={'01': '1'},
reference_genome='GRCh37')
self.assertEqual(bgen.entry.dtype, hl.tstruct(GT=hl.tcall, GP=hl.tarray(hl.tfloat64), dosage=hl.tfloat64))
self.assertEqual(bgen.locus.dtype, hl.tlocus('GRCh37'))
def test_import_bgen_no_entry_fields(self):
hl.index_bgen(resource('example.v11.bgen'))
bgen = hl.import_bgen(resource('example.v11.bgen'),
entry_fields=[],
sample_file=resource('example.sample'),
contig_recoding={'01': '1'},
reference_genome='GRCh37')
bgen._jvds.typecheck()
def test_import_bgen_no_reference_specified(self):
bgen = hl.import_bgen(resource('example.10bits.bgen'),
entry_fields=['GT', 'GP', 'dosage'],
contig_recoding={'01': '1'},
reference_genome=None)
self.assertTrue(bgen.locus.dtype == hl.tstruct(contig=hl.tstr, position=hl.tint32))
self.assertEqual(bgen.count_rows(), 199)
def test_import_vcf(self):
vcf = hl.split_multi_hts(
hl.import_vcf(resource('sample2.vcf'),
reference_genome=hl.get_reference('GRCh38'),
contig_recoding={"22": "chr22"}))
vcf_table = vcf.rows()
self.assertTrue(vcf_table.all(vcf_table.locus.contig == "chr22"))
self.assertTrue(vcf.locus.dtype, hl.tlocus('GRCh37'))
def test_import_vcf_no_reference_specified(self):
vcf = hl.import_vcf(resource('sample2.vcf'),
reference_genome=None)
self.assertTrue(vcf.locus.dtype == hl.tstruct(contig=hl.tstr, position=hl.tint32))
self.assertEqual(vcf.count_rows(), 735)
def test_import_vcf_bad_reference_allele(self):
vcf = hl.import_vcf(resource('invalid_base.vcf'))
self.assertEqual(vcf.count_rows(), 1)
def test_import_vcf_flags_are_defined(self):
# issue 3277
t = hl.import_vcf(resource('sample.vcf')).rows()
self.assertTrue(t.all(hl.is_defined(t.info.NEGATIVE_TRAIN_SITE) &
hl.is_defined(t.info.POSITIVE_TRAIN_SITE) &
hl.is_defined(t.info.DB) &
hl.is_defined(t.info.DS)))
def test_import_vcf_can_import_float_array_format(self):
mt = hl.import_vcf(resource('floating_point_array.vcf'))
self.assertTrue(mt.aggregate_entries(hl.agg.all(mt.numeric_array == [1.5, 2.5])))
def test_import_vcf_can_import_negative_numbers(self):
mt = hl.import_vcf(resource('negative_format_fields.vcf'))
self.assertTrue(mt.aggregate_entries(hl.agg.all(mt.negative_int == -1) &
hl.agg.all(mt.negative_float == -1.5) &
hl.agg.all(mt.negative_int_array == [-1, -2]) &
hl.agg.all(mt.negative_float_array == [-0.5, -1.5])))
def test_import_vcf_missing_info_field_elements(self):
mt = hl.import_vcf(resource('missingInfoArray.vcf'), reference_genome='GRCh37', array_elements_required=False)
mt = mt.select_rows(FOO=mt.info.FOO, BAR=mt.info.BAR)
expected = hl.Table.parallelize([{'locus': hl.Locus('X', 16050036), 'alleles': ['A', 'C'],
'FOO': [1, None], 'BAR': [2, None, None]},
{'locus': hl.Locus('X', 16061250), 'alleles': ['T', 'A', 'C'],
'FOO': [None, 2, None], 'BAR': [None, 1.0, None]}],
hl.tstruct(locus=hl.tlocus('GRCh37'), alleles=hl.tarray(hl.tstr),
FOO=hl.tarray(hl.tint), BAR=hl.tarray(hl.tfloat64)),
key=['locus', 'alleles'])
self.assertTrue(mt.rows()._same(expected))
def test_import_vcf_missing_format_field_elements(self):
mt = hl.import_vcf(resource('missingFormatArray.vcf'), reference_genome='GRCh37', array_elements_required=False)
mt = mt.select_rows().select_entries('AD', 'PL')
expected = hl.Table.parallelize([{'locus': hl.Locus('X', 16050036), 'alleles': ['A', 'C'], 's': 'C1046::HG02024',
'AD': [None, None], 'PL': [0, None, 180]},
{'locus': hl.Locus('X', 16050036), 'alleles': ['A', 'C'], 's': 'C1046::HG02025',
'AD': [None, 6], 'PL': [70, None]},
{'locus': hl.Locus('X', 16061250), 'alleles': ['T', 'A', 'C'], 's': 'C1046::HG02024',
'AD': [0, 0, None], 'PL': [396, None, None, 33, None, 0]},
{'locus': hl.Locus('X', 16061250), 'alleles': ['T', 'A', 'C'], 's': 'C1046::HG02025',
'AD': [0, 0, 9], 'PL': [None, None, None]}],
hl.tstruct(locus=hl.tlocus('GRCh37'), alleles=hl.tarray(hl.tstr), s=hl.tstr,
AD=hl.tarray(hl.tint), PL=hl.tarray(hl.tint)),
key=['locus', 'alleles', 's'])
self.assertTrue(mt.entries()._same(expected))
def test_export_import_plink_same(self):
mt = self.get_dataset()
mt = mt.select_rows(rsid=hl.delimit([mt.locus.contig, hl.str(mt.locus.position), mt.alleles[0], mt.alleles[1]], ':'),
cm_position=15.0)
mt = mt.select_cols(fam_id=hl.null(hl.tstr), pat_id=hl.null(hl.tstr), mat_id=hl.null(hl.tstr),
is_female=hl.null(hl.tbool), is_case=hl.null(hl.tbool))
mt = mt.select_entries('GT')
bfile = '/tmp/test_import_export_plink'
hl.export_plink(mt, bfile, ind_id=mt.s, cm_position=mt.cm_position)
mt_imported = hl.import_plink(bfile + '.bed', bfile + '.bim', bfile + '.fam',
a2_reference=True, reference_genome='GRCh37')
self.assertTrue(mt._same(mt_imported))
self.assertTrue(mt.aggregate_rows(hl.agg.all(mt.cm_position == 15.0)))
def test_import_plink_empty_fam(self):
mt = self.get_dataset().drop_cols()
bfile = '/tmp/test_empty_fam'
hl.export_plink(mt, bfile, ind_id=mt.s)
with self.assertRaisesRegex(utils.FatalError, "Empty .fam file"):
hl.import_plink(bfile + '.bed', bfile + '.bim', bfile + '.fam')
def test_import_plink_empty_bim(self):
mt = self.get_dataset().drop_rows()
bfile = '/tmp/test_empty_bim'
hl.export_plink(mt, bfile, ind_id=mt.s)
with self.assertRaisesRegex(utils.FatalError, ".bim file does not contain any variants"):
hl.import_plink(bfile + '.bed', bfile + '.bim', bfile + '.fam')
def test_import_plink_a1_major(self):
mt = self.get_dataset()
bfile = '/tmp/sample_plink'
hl.export_plink(mt, bfile, ind_id=mt.s)
def get_data(a2_reference):
mt_imported = hl.import_plink(bfile + '.bed', bfile + '.bim',
bfile + '.fam', a2_reference=a2_reference)
return (hl.variant_qc(mt_imported)
.rows()
.key_by('rsid'))
a2 = get_data(a2_reference=True)
a1 = get_data(a2_reference=False)
j = (a2.annotate(a1_alleles=a1[a2.rsid].alleles, a1_vqc=a1[a2.rsid].variant_qc)
.rename({'variant_qc': 'a2_vqc', 'alleles': 'a2_alleles'}))
self.assertTrue(j.all((j.a1_alleles[0] == j.a2_alleles[1]) &
(j.a1_alleles[1] == j.a2_alleles[0]) &
(j.a1_vqc.n_not_called == j.a2_vqc.n_not_called) &
(j.a1_vqc.n_het == j.a2_vqc.n_het) &
(j.a1_vqc.homozygote_count[0] == j.a2_vqc.homozygote_count[1]) &
(j.a1_vqc.homozygote_count[1] == j.a2_vqc.homozygote_count[0])))
def test_import_plink_contig_recoding_w_reference(self):
vcf = hl.split_multi_hts(
hl.import_vcf(resource('sample2.vcf'),
reference_genome=hl.get_reference('GRCh38'),
contig_recoding={"22": "chr22"}))
hl.export_plink(vcf, '/tmp/sample_plink')
bfile = '/tmp/sample_plink'
plink = hl.import_plink(
bfile + '.bed', bfile + '.bim', bfile + '.fam',
a2_reference=True,
contig_recoding={'chr22': '22'},
reference_genome='GRCh37').rows()
self.assertTrue(plink.all(plink.locus.contig == "22"))
self.assertEqual(vcf.count_rows(), plink.count())
self.assertTrue(plink.locus.dtype, hl.tlocus('GRCh37'))
def test_import_plink_no_reference_specified(self):
bfile = resource('fastlmmTest')
plink = hl.import_plink(bfile + '.bed', bfile + '.bim', bfile + '.fam',
reference_genome=None)
self.assertTrue(plink.locus.dtype == hl.tstruct(contig=hl.tstr, position=hl.tint32))
def test_import_matrix_table(self):
mt = hl.import_matrix_table(doctest_resource('matrix1.tsv'),
row_fields={'Barcode': hl.tstr, 'Tissue': hl.tstr, 'Days': hl.tfloat32})
self.assertEqual(mt['Barcode']._indices, mt._row_indices)
self.assertEqual(mt['Tissue']._indices, mt._row_indices)
self.assertEqual(mt['Days']._indices, mt._row_indices)
self.assertEqual(mt['col_id']._indices, mt._col_indices)
self.assertEqual(mt['row_id']._indices, mt._row_indices)
mt.count()
row_fields = {'f0': hl.tstr, 'f1': hl.tstr, 'f2': hl.tfloat32}
hl.import_matrix_table(doctest_resource('matrix2.tsv'),
row_fields=row_fields, row_key=[]).count()
hl.import_matrix_table(doctest_resource('matrix3.tsv'),
row_fields=row_fields,
no_header=True).count()
hl.import_matrix_table(doctest_resource('matrix3.tsv'),
row_fields=row_fields,
no_header=True,
row_key=[]).count()
self.assertRaises(hl.utils.FatalError,
hl.import_matrix_table,
doctest_resource('matrix3.tsv'),
row_fields=row_fields,
no_header=True,
row_key=['foo'])
def test_de_novo(self):
mt = hl.import_vcf(resource('denovo.vcf'))
mt = mt.filter_rows(mt.locus.in_y_par(), keep=False) # de_novo_finder doesn't know about y PAR
ped = hl.Pedigree.read(resource('denovo.fam'))
r = hl.de_novo(mt, ped, mt.info.ESP)
r = r.select(
prior = r.prior,
kid_id=r.proband.s,
dad_id=r.father.s,
mom_id=r.mother.s,
p_de_novo=r.p_de_novo,
confidence=r.confidence).key_by('locus', 'alleles', 'kid_id', 'dad_id', 'mom_id')
truth = hl.import_table(resource('denovo.out'), impute=True, comment='#')
truth = truth.select(
locus=hl.locus(truth['Chr'], truth['Pos']),
alleles=[truth['Ref'], truth['Alt']],
kid_id=truth['Child_ID'],
dad_id=truth['Dad_ID'],
mom_id=truth['Mom_ID'],
p_de_novo=truth['Prob_dn'],
confidence=truth['Validation_Likelihood'].split('_')[0]).key_by('locus', 'alleles', 'kid_id', 'dad_id', 'mom_id')
j = r.join(truth, how='outer')
self.assertTrue(j.all((j.confidence == j.confidence_1) & (hl.abs(j.p_de_novo - j.p_de_novo_1) < 1e-4)))
def test_window_by_locus(self):
mt = hl.utils.range_matrix_table(100, 2, n_partitions=10)
mt = mt.annotate_rows(locus=hl.locus('1', mt.row_idx + 1))
mt = mt.key_rows_by('locus')
mt = mt.annotate_entries(e_row_idx = mt.row_idx, e_col_idx = mt.col_idx)
mt = hl.window_by_locus(mt, 5).cache()
self.assertEqual(mt.count_rows(), 100)
rows = mt.rows()
self.assertTrue(rows.all((rows.row_idx < 5) | (rows.prev_rows.length() == 5)))
self.assertTrue(rows.all(hl.all(lambda x: (rows.row_idx - 1 - x[0]) == x[1].row_idx,
hl.zip_with_index(rows.prev_rows))))
entries = mt.entries()
self.assertTrue(entries.all(hl.all(lambda x: x.e_col_idx == entries.col_idx, entries.prev_entries)))
self.assertTrue(entries.all(hl.all(lambda x: entries.row_idx - 1 - x[0] == x[1].e_row_idx,
hl.zip_with_index(entries.prev_entries))))
| StarcoderdataPython |
3314260 | <reponame>kevinyamauchi/morphometrics
import numpy as np
import trimesh
from morphometrics.utils.surface_utils import (
closed_surfaces_to_label_image,
voxelize_closed_surface,
)
def _make_cuboid_mesh(origin: np.ndarray, extents: np.ndarray) -> trimesh.Trimesh:
max_point = origin + extents
vertices = np.array(
[
[origin[0], origin[1], origin[2]],
[origin[0], origin[1], max_point[2]],
[origin[0], max_point[1], origin[2]],
[origin[0], max_point[1], max_point[2]],
[max_point[0], origin[1], origin[2]],
[max_point[0], origin[1], max_point[2]],
[max_point[0], max_point[1], max_point[2]],
[max_point[0], max_point[1], origin[2]],
]
)
faces = np.array(
[
[0, 1, 2],
[1, 2, 3],
[2, 3, 6],
[2, 6, 7],
[0, 2, 7],
[0, 4, 7],
[0, 1, 5],
[0, 5, 4],
[4, 5, 7],
[5, 6, 7],
[1, 3, 5],
[3, 5, 6],
]
)
return trimesh.Trimesh(vertices=vertices, faces=faces)
def test_voxelize_closed_surface():
origin = np.array([10, 10, 20])
cube_extents = np.array([30, 10, 10])
pitch = 0.5
mesh = _make_cuboid_mesh(origin=origin, extents=cube_extents)
voxelized, image_origin = voxelize_closed_surface(
mesh, pitch=pitch, repair_mesh=True
)
np.testing.assert_allclose(image_origin, [9.5, -0.5, 9.5])
np.testing.assert_allclose([63, 63, 63], voxelized.shape)
def test_closed_surfaces_to_label_image_no_crop():
mesh_0 = _make_cuboid_mesh(np.array([10, 10, 10]), np.array([20, 20, 20]))
mesh_1 = _make_cuboid_mesh(np.array([30, 10, 10]), np.array([10, 10, 30]))
pitch = 0.5
label_image, image_origin = closed_surfaces_to_label_image(
[mesh_0, mesh_1],
pitch=pitch,
crop_around_mesh=False,
repair_mesh=True,
)
np.testing.assert_allclose(image_origin, [0, 0, 0])
np.testing.assert_allclose(label_image.shape, [81, 61, 81])
assert set(np.unique(label_image)) == {0, 1, 2}
| StarcoderdataPython |
3205970 | <gh_stars>0
#!/usr/bin/env python3
from ev3dev2.sensor import *
from ev3dev.ev3 import *
from time import sleep
from ev3dev2.motor import OUTPUT_A,OUTPUT_B,MoveTank,SpeedPercent
sensor1.mode = sensor1.MODE_COL_COLOR
class Anda:
def __init__():
rodas=MoveTank(OUTPUT_A,OUTPUT_B)
print("HEAVY ENGINE READY!!")
def virar(graus):#fun o de virada relativa a posi ao
if graus<0:
rodas.on_for_seconds(-50,50,abs(graus)*(0.5/90))
elif(graus==0): pass
else:
rodas.on_for_seconds(50,-50,abs(graus)*(0.5/90))
def stop():
rodas.on(0,0)
def frente():
rodas.on(-20,-20)
| StarcoderdataPython |
1798145 | from .lofo_importance import LOFOImportance
from .flofo_importance import FLOFOImportance
from .dataset import Dataset
from .plotting import plot_importance
| StarcoderdataPython |
3385876 | # script to transform a PSM-xml model into a graph (saved as pdf) with objects as nodes and parent-child relationships as vertices using graphviz dot.
#the central function here is xmltopdf(). it takes a directory (with trailing "//" or "\") and a name (without file ending).
#it opens the directory + <name>.xml model and creates pdfs containing the trees described in the models as graphs for each scene and reference object in the model
#using graphviz (for usage example, see "test()").
import subprocess
from copy import deepcopy
graphviz_command = "dot"
class TreeNode:
name = ""
parent = []
reference = False
def read(directory, name):
filename = directory + name + ".xml"
with open(filename, 'r') as i:
contents = i.read()
return contents
def xmltodot(directory, name):
contents = read(directory, name)
scenes = contents.split("<scene ")
dots = []
bitcomp = ""
bitcomp += directory + name + "; "
nodelist = []
rootlist = []
for scene in scenes:
if not scene.find("ocm") == -1:
scenename = scene.split("name=\"")[1].split("\"")[0]
nodes = scene.split("<")
del nodelist[:]
del rootlist[:]
for node in nodes:
values = node.split(" ")
if values[0] == "object":
if len(nodelist) > 0:
rootlist.append([deepcopy(nodelist), root.name])
del nodelist[:]
top = TreeNode()
top.name = "NULL"
root = TreeNode()
root.name = values[1].split("\"")[1]
root.parent = [top]
currentroot = root
nodelist.append(root)
if values[0] == "child":
child = TreeNode()
child.name = values[1].split("\"")[1]
child.parent = [currentroot]
child.reference = (len(values) > 2)
currentroot = child
nodelist.append(child)
if values[0] == "/child>":
currentroot = currentroot.parent[0]
rootlist.append([deepcopy(nodelist), root.name]) #last root
for root in rootlist:
nodelist = root[0]
rootname = root[1]
dot = "digraph " + name + "_" + rootname + " {\n"
#create graph nodes:
for o in rootlist:
dot += o[1] + "[label=" + o[1] + "]\n"
dot += "\n"
#create edges:
for o in nodelist:
if not o.name == "NULL" and not o.parent[0].name == "NULL":
dot += o.parent[0].name + " -> " + o.name
if o.reference:
dot += "[style=dashed]"
dot += "\n"
dot += "}\n\n"
print(dot)
dotname = scenename + "_" + rootname
outname = directory + name + "_" + dotname + ".dot"
with open(outname, 'w') as f:
f.write(dot)
dots.append(scenename + "_" + rootname)
return dots
def dottopdf(directory, name, graphs):
for graph in graphs:
subprocess.call([graphviz_command, "-Tpdf", directory + name + "_" + graph + ".dot", "-o", directory + name + "_" + graph + ".pdf"])
def xmltopdf(directory, name):
graphs = xmltodot(directory, name)
dottopdf(directory, name, graphs)
def test():
directory = "../data/"
name = "advertisement"
xmltopdf(directory, name)
| StarcoderdataPython |
3360265 | # coding: utf-8
#####################################################################
# Fill csv file while watching the glyphs of a IconFont
# autor: <NAME>
#
#####################################################################
import tkinter
import os
from fontTools.ttLib import TTFont
raiz = tkinter.Tk()
raiz.geometry("540x100")
archivoCSV = open('D:\\cosas\\listaGlyphs.csv','a',encoding='utf-8') # guardar salida aqui pada DataBase
miFont = TTFont('d:/cosas/icf_devicon.ttf') # creacion de objeto tipo TTFont
diccioDECHEXglyphs = miFont.getBestCmap() # dictionary with DEC , HEX values of glyphs
contador = 3 # to avoid CTRL characters of a font
#------------- Label variable con cada click -------------------------
def crearUnicode(indice):
global diccioDECHEXglyphs
valorINT = list( diccioDECHEXglyphs.keys() )[indice]
return chr(valorINT)
def registrar():
global contador
global diccioDECHEXglyphs
valorUnicode = crearUnicode(contador)
valorINT = list( diccioDECHEXglyphs.keys() )[contador]
archivoCSV.write("'',"+ valorUnicode +",Nombre_Fuente," + str(valorINT) + "," + descripcion.get() + "\n") # escribir en los campos del CSV
contador += 1
salidaTexto.config(text=crearUnicode(contador),) # actualizar el icono de la GUI
descripcion.delete(0, tkinter.END) # limpiar pantalla para siguiente entrada
return
#------------------------- La GUI -------------------------------------------
tkinter.Label(raiz, text="Describir el Glyph siguiente:").grid(row=0,column=0)
salidaTexto = tkinter.Label(raiz, text=crearUnicode(contador), font=('icf_devicon',40))
salidaTexto.grid(row=0,column=10)
#------------ Entrada de acciones de usuario ------------------------
boton = tkinter.Button(raiz, text="guardar a CSV", command=lambda:registrar())
boton.grid(row=1,column=0)
descripcion = tkinter.Entry(raiz, width=50)
descripcion.grid(row=1,column=1)
raiz.mainloop()
archivoCSV.close()
| StarcoderdataPython |
1776233 | <gh_stars>1-10
from functions.get_nag_vertex_type import get_nag_vertex_type
from functions.get_nag_vertex_number import get_nag_vertex_number
def to_tuple(a,b):
return (a,b)
l = ['c0', 'c1', 'i1', 'n1', 'o1', 'i2', 'n2', 'n3']
print(sorted(l, key=lambda n: (get_nag_vertex_type(n), get_nag_vertex_number(n))))
| StarcoderdataPython |
97987 | <filename>server/testing.py<gh_stars>0
# from game import Game
# from player import Player
# from board import Board
# id = 1
# conn_queue = []
# player = Player('127.0.0.1', 'Chirag')
# conn_queue.append(player)
# game = Game(id, conn_queue)
# b = Board()
# player.get_name()
# # print(game, player)
# # print(game.player_guessed(player, 'chirag'))
# print(b.get_board())
import pygame
pygame.init()
#### Create a canvas on which to display everything ####
window = (400,400)
screen = pygame.display.set_mode(window)
#### Create a canvas on which to display everything ####
#### Create a surface with the same size as the window ####
background = pygame.Surface(window)
#### Create a surface with the same size as the window ####
#### Populate the surface with objects to be displayed ####
pygame.draw.rect(background,(0,255,255),(20,20,40,40))
pygame.draw.rect(background,(255,0,255),(120,120,50,50))
#### Populate the surface with objects to be displayed ####
#### Blit the surface onto the canvas ####
screen.blit(background,(0,0))
pygame.draw.rect(background, (255,255,0), (120,120,90,50))
#### Blit the surface onto the canvas ####
#### Update the the display and wait ####
pygame.display.flip()
done = False
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
#### Update the the display and wait ####
pygame.quit() | StarcoderdataPython |
147605 | <reponame>crvernon/kids_math<filename>kids_math/gifs.py
import pkg_resources
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
from IPython import display
class PeterRabbitGif:
# Source: https://tenor.com/view/smile-wink-peter-rabbit-peter-rabbit-gifs-gif-11787571
WINKING = pkg_resources.resource_filename('kids_math', 'img/peter_rabbit_winking.gif')
# Source: https://tenor.com/view/no-nope-dont-beg-peter-rabbit-gif-11782011
NOPE = pkg_resources.resource_filename('kids_math', 'img/peter_rabbit_no.gif')
@staticmethod
def display_gif(gif):
"""Display GIF in a Jupyter, CoLab, or IPython Notebook"""
with open(gif, 'rb') as f:
return display.Image(data=f.read(), format='png')
def wink(self):
"""Display Peter Rabbit winking GIF"""
return self.display_gif(PeterRabbitGif.WINKING)
def nope(self):
"""Display Peter Rabbit saying no GIF"""
return self.display_gif(PeterRabbitGif.NOPE)
class FrozenGif:
# Source: https://tenor.com/view/disney-princess-its-me-frozen-elsa-gif-15403239
ELSA_WALKING = pkg_resources.resource_filename('kids_math', 'img/frozen_walking.gif')
# Source: https://media.tenor.com/images/69d62f9ebd55c45d08ab6af501cbfa47/tenor.gif
OLAF_HEART = pkg_resources.resource_filename('kids_math', 'img/frozen_olaf_heart.gif')
@staticmethod
def display_gif(gif):
"""Display GIF in a Jupyter, CoLab, or IPython Notebook"""
with open(gif, 'rb') as f:
return display.Image(data=f.read(), format='png')
def walking(self):
"""Display Peter Rabbit winking GIF"""
return self.display_gif(FrozenGif.ELSA_WALKING)
def olaf_heart(self):
"""Display Peter Rabbit saying no GIF"""
return self.display_gif(FrozenGif.OLAF_HEART)
class RapunzelGif:
# Source: https://media.giphy.com/media/14rbfgZ6aTgwAo/giphy.gif
SWINGING = pkg_resources.resource_filename('kids_math', 'img/rapunzel_swinging.gif')
@staticmethod
def display_gif(gif):
"""Display GIF in a Jupyter, CoLab, or IPython Notebook"""
with open(gif, 'rb') as f:
return display.Image(data=f.read(), format='png')
def swinging(self):
"""Display Peter Rabbit winking GIF"""
return self.display_gif(RapunzelGif.SWINGING)
| StarcoderdataPython |
151671 | import pytest
from emrichen import Template
HASHES = {
'MD5': '8b1a9953c4611296a827abf8c47804d7',
'SHA1': 'f7ff9e8b7bb2e09b70935a5d785e0cc5d9d0abf0',
'SHA256': '185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969',
}
@pytest.mark.parametrize('h', sorted(HASHES.items()), ids=sorted(HASHES))
def test_hash(h):
algo, expected = h
assert Template.parse(f'!{algo} "Hello"').enrich({}) == [expected]
| StarcoderdataPython |
1650577 | <gh_stars>1-10
##
## imports
##
from abc import ABC, abstractmethod
import time
import misc
##
## code
##
class X0GenericCmd(ABC):
"""The main Apex state object that does the magic"""
def __init__(self, inComm, useLog, timeoutConfig, closeOnComplete = True):
self.log = useLog
self.desired = None
self.comm = inComm
self.state = ''
self.timeout = 0
self.refAckOffset = self.makeAckOffset(timeoutConfig)
self.cmdRetries = self.makeCmdRetries(timeoutConfig)
self.checkwaitacktimeout = 0
self.opcmd = b''
self.chatty = False
self.target = None
self.closeOnComplete = closeOnComplete
self.log.debug(f'refAckOffset {self.refAckOffset}, cmdRetries {self.cmdRetries}')
@abstractmethod
def makeDesired(self, cmd, data):
""" Returns the internal encoding of the command and data
cmd: the requested command
data: the data releated to the command
"""
pass
@abstractmethod
def makeOpCmd(self):
"""Takes the self.desired parameter and makes a command that can be sent to the remote device
Sometimes this simply adds a CR or LF to the self.desired
Other times more complex headers, etc. will be needed
"""
pass
@abstractmethod
def makeAckOffset(self, timeoutConfig):
"""Return a timeout in seconds for how long we should wait before receiving an Ack
timeoutConfig: dictionary that can be used to retrieve the timeout
"""
pass
@abstractmethod
def makeCmdRetries(self, timeoutConfig):
"""Returns the number of times the command should be retried if a timeout occurs
timeoutConfig: dictionary that can be used to retrieve the value
"""
pass
@abstractmethod
def isMatchingAck(self, rxData, opcmd):
"""Takes a value retrieved from the devices and converts it into a value for comparision with the target
rxData: the data retrieved from the device
if the result of this function matches the value returned from makeOpCmd then this response is
the correct response
"""
pass
@abstractmethod
def makeCmdResult(self, rxData):
"""When a matching response is found this function will convert it into the values returned to the caller
rxData: the data received from the device
"""
pass
def action(self):
# this is where the work happens
if self.state == '':
self.log.debug(f'Inside state "{self.state}" {self.checkwaitacktimeout}')
self.opcmd = self.makeOpCmd()
self.state = 'waitACK'
print(f'Setting timeout')
self.timeout = time.time() + self.refAckOffset
self.state = 'sendCmd'
## this is intentionally an if and not an elif
## we want the code above to potentially trigger this code below
if self.state == 'sendCmd':
self.log.debug(f'Inside state "{self.state}" {self.checkwaitacktimeout}')
self.log.debug(f'Sending command {self.opcmd}')
# send the command
ok = self.comm.send(self.opcmd)
print(f'OK is {ok}')
if not ok:
# do not move to next state!
# basically we'll try again next time
self.log.debug(f'Could not send command')
if time.time() > self.timeout:
# oops. Start over
self.log.debug(f'Trying to send Timeout in {self.state}. {self.checkwaitacktimeout}. Starting over')
self.state = ''
self.checkwaitacktimeout += 1
if self.checkwaitacktimeout > self.cmdRetries:
# we just give up
self.log.warning(f'Giving up...')
self.desired = None
self.checkwaitacktimeout = 0
else:
# it was sent
self.state = 'waitACK'
# self.timeout = time.time() + self.refAckOffset
elif self.state == 'waitACK':
self.log.debug(f'Inside state "{self.state}" {self.checkwaitacktimeout}')
# quick test of whether socket is connected
ok = self.comm.send(b'')
if not ok:
# no socket even after send tried to create one
self.log.debug(f'connection lost and not ready yet. Restarting...')
# got nothing. Have we timed out?
if time.time() > self.timeout:
# oops. Start over
self.log.debug(f'Connection Loss and Timeout in {self.state}. {self.checkwaitacktimeout}. Starting over')
self.state = ''
self.checkwaitacktimeout += 1
if self.checkwaitacktimeout > self.cmdRetries:
# we just give up
self.log.warning(f'Giving up...')
self.desired = None
self.checkwaitacktimeout = 0
else:
self.state = 'sendCmd'
# sent ok
else:
# see if there's a response
rxData = self.comm.read()
if rxData == b'':
self.log.debug(f'Got no data')
# got nothing. Have we timed out?
if time.time() > self.timeout:
# oops. Start over
self.log.debug(f'Timeout in {self.state}. {self.checkwaitacktimeout}. Starting over')
self.state = ''
self.checkwaitacktimeout += 1
if self.checkwaitacktimeout > self.cmdRetries:
# we just give up
self.log.warning(f'Giving up...')
self.desired = None
self.checkwaitacktimeout = 0
else:
self.log.debug(f'Got data {rxData}')
# we got something
self.checkwaitacktimeout = 0
if self.isMatchingAck(rxData,self.opcmd):
# got the ack
self.log.debug(f'Got the ACK {rxData}')
self.state = ''
self.desired = None
result = self.makeCmdResult(rxData)
if self.closeOnComplete:
self.log.debug(f'closing connection because closeOnComplete is {self.closeOnComplete}')
self.comm.close()
return (True, result)
else:
# what happened?
self.log.warning(f'Did not receive expected results with {rxData}. Ignoring...')
else:
self.log.error('**** YIKES {self.state}')
if not self.desired:
# we are done
if self.closeOnComplete:
self.log.debug(f'closing connection because closeOnComplete is {self.closeOnComplete}')
self.comm.close()
# return that we are done with no result
return (True,None)
else:
# more to do
return (False,None)
def set(self, cmd:str, data: bytearray):
# print(f'cmd is {cmd}')
# print(f'data is {data}')
combined = self.makeDesired(cmd,data)
# combined = bytes(cmd,'utf-8') + b' ' + data
# print(f'combined {combined}')
if self.desired:
# same mode
self.log.warning(f'!!!! Already sending command {combined}')
else:
self.desired = combined
self.log.debug(f'Asked to send command {combined}')
# definitely need to restart the state machine
self.state = ''
self.action()
| StarcoderdataPython |
3232998 | <gh_stars>0
from django.shortcuts import render
from django.http import HttpResponseRedirect
from .models import Obat
from .forms import ObatForm
from django.core import serializers
from django.http.response import HttpResponse
# Create your views here.
def index(request):
obats = Obat.objects.all()
response = {'obats': obats}
return render(request, 'obat.html', response)
def add(request):
obats = Obat.objects.all()
response = {'obats': obats}
return render(request, 'form.html', response)
def add_forms(request):
context ={}
form = ObatForm(request.POST or None)
if form.is_valid():
form.save()
if request.method == 'POST':
return HttpResponseRedirect("/obat/add")
context['form']= form
return render(request, "addforms.html", context)
def delete_obat(request):
if (request.method == "POST"):
id = request.POST.get("id")
Obat.objects.filter(id=id).delete()
return HttpResponseRedirect("/obat")
def edit_obat(request):
if (request.method == "POST"):
id = request.POST.get("id")
penyakit = request.POST.get("penyakit")
penjelasan = request.POST.get("penjelasan")
daftar_obat = request.POST.get("daftar_obat")
obat = Obat.objects.get(id=id)
obat.penyakit = penyakit
obat.penjelasan = penjelasan
obat.daftar_obat = daftar_obat
obat.save()
return HttpResponseRedirect("/obat")
def json(request):
obats = Obat.objects.all()
data = serializers.serialize('json', Obat.objects.all())
return HttpResponse(data, content_type="application/json") | StarcoderdataPython |
1638446 | path = "input.txt"
file = open(path)
input = [line[:-1] for line in file.readlines()]
file.close()
class Octopus:
def __init__(self, x, y, energy):
self.x = x
self.y = y
self.energy = energy
self.neighbours = []
self.flashed = False
def find_neighbours(self, octo):
dirs = [(1,0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1), (0, -1), (1, -1)]
for dir in dirs:
x = dir[0] + self.x
y = dir[1] + self.y
if x < X_MAX and x >= 0 and y < Y_MAX and y >= 0:
self.neighbours.append(octo[y][x])
def flash(self):
self.energy += 1
if self.energy > 9:
self.energy = 0
self.flashed = True
for n in self.neighbours:
if not n.flashed:
n.flash()
Octopuses = [[Octopus(x, y, int(n)) for x, n in enumerate(line)] for y, line in enumerate(input)]
X_MAX = len(Octopuses[0])
Y_MAX = len(Octopuses)
for line in Octopuses:
for o in line:
o.find_neighbours(Octopuses)
sync = False
gen = 0
while not sync:
gen += 1
for line in Octopuses:
for o in line:
if not o.flashed:
o.flash()
for line in Octopuses:
for o in line:
o.flashed = False
e = Octopuses[0][0].energy
for line in Octopuses:
for o in line:
if e != o.energy:
sync = False
break
sync = True
print(gen)
| StarcoderdataPython |
1608838 |
from syned.util.json_tools import load_from_json_file
from syned.storage_ring.electron_beam import ElectronBeam
from syned.storage_ring.magnetic_structures.undulator import Undulator
from syned.beamline.optical_elements.ideal_elements.screen import Screen
from syned.beamline.optical_elements.ideal_elements.lens import IdealLens
from syned.beamline.optical_elements.absorbers.filter import Filter
from syned.beamline.optical_elements.absorbers.slit import Slit
from syned.beamline.optical_elements.absorbers.beam_stopper import BeamStopper
from syned.beamline.optical_elements.mirrors.mirror import Mirror
from syned.beamline.optical_elements.crystals.crystal import Crystal
from syned.beamline.optical_elements.gratings.grating import Grating
from syned.beamline.shape import SurfaceShape, Conic, Ellipsoid, Plane
from syned.beamline.shape import Rectangle
from syned.storage_ring.light_source import LightSource
from syned.beamline.beamline import Beamline
from syned.beamline.beamline_element import BeamlineElement
from syned.beamline.element_coordinates import ElementCoordinates
if __name__ == "__main__":
src1 = ElectronBeam.initialize_as_pencil_beam(energy_in_GeV=6.0,current=0.2)
src2 = Undulator()
screen1 = Screen("screen1")
lens1 = IdealLens(name="lens1",focal_y=6.0,focal_x=None,)
filter1 = Filter("filter1","H2O",3.0e-6)
slit1 = Slit(name="slit1",boundary_shape=Rectangle(-0.5e-3,0.5e-3,-2e-3,2e-3))
stopper1 = BeamStopper(name="stopper1",boundary_shape=Rectangle(-0.5e-3,0.5e-3,-2e-3,2e-3))
mirror1 = Mirror(name="mirror1",boundary_shape=Rectangle(-0.5e-3,0.5e-3,-2e-3,2e-3))
crystal1 = Crystal(name="crystal1",surface_shape=Plane())
grating1 = Grating(name="grating1",surface_shape=Conic())
mylist = [src1,src2,screen1,lens1,filter1,slit1, stopper1, mirror1, grating1, crystal1]
#
# test individual elements
#
for i,element in enumerate(mylist):
element.to_json("tmp_%d.json"%i)
for i,element in enumerate(mylist):
print("loading element %d"%i)
tmp = load_from_json_file("tmp_%d.json"%i)
print("returned class: ",type(tmp))
#
# test Ligtsource
#
lightsource1 = LightSource("test_source",src1,src2)
lightsource1.to_json("tmp_100.json")
tmp = load_from_json_file("tmp_100.json")
print("returned class: ",type(tmp))
print("\n-----------Info on: \n",tmp.info(),"----------------\n\n")
print( tmp.get_electron_beam().info() )
print( tmp.get_magnetic_structure().info() )
#
# test full beamline
#
SCREEN1 = BeamlineElement(screen1, coordinates=ElementCoordinates(p=11.0))
LENS1 = BeamlineElement(lens1, coordinates=ElementCoordinates(p=12.0))
FILTER1 = BeamlineElement(filter1, coordinates=ElementCoordinates(p=13.0))
SLIT1 = BeamlineElement(slit1, coordinates=ElementCoordinates(p=15.0))
STOPPER1 = BeamlineElement(stopper1, coordinates=ElementCoordinates(p=16.0))
MIRROR1 = BeamlineElement(mirror1, coordinates=ElementCoordinates(p=17.0))
GRATING1 = BeamlineElement(grating1, coordinates=ElementCoordinates(p=18.0))
CRYSTAL1 = BeamlineElement(crystal1, coordinates=ElementCoordinates(p=19.0))
MyList = [SCREEN1,LENS1,FILTER1,SLIT1,STOPPER1,MIRROR1,CRYSTAL1,GRATING1]
#
# test BeamlineElement
#
for i,element in enumerate(MyList):
element.to_json("tmp_%d.json"%(100+i))
tmp = load_from_json_file("tmp_%d.json"%(100+i))
print("returned class: ",type(tmp))
print("\n-----------Info on: \n",tmp.info(),"----------------\n\n")
#
# test Beamline
#
BL = Beamline(LightSource(name="test",electron_beam=src1,magnetic_structure=src2),
[SCREEN1,LENS1,FILTER1,SLIT1,STOPPER1,MIRROR1,CRYSTAL1,GRATING1])
BL.to_json("tmp_200.json")
#
tmp = load_from_json_file("tmp_200.json")
print("returned class: ",type(tmp))
print(tmp.get_light_source().info())
for element in tmp.get_beamline_elements():
print("list element class: ",type(element))
print(element.info())
#
#
print("\n-----------Info on: \n",tmp.info(),"----------------\n\n") | StarcoderdataPython |
1676536 | <filename>experimental/time_steppers.py<gh_stars>1-10
# -*- coding: utf-8 -*-
#
import numpy
class Heun(object):
'''
Heun's method for :math:`u' = F(u)`.
https://en.wikipedia.org/wiki/Heun's_method
'''
order = 2.0
def __init__(self, problem):
self.problem = problem
# alpha = 0.5
# alpha = 2.0 / 3.0
alpha = 1.0
self.tableau = {
'A': [[0.0, 0.0], [alpha, 0.0]],
'b': [1.0 - 1.0 / (2 * alpha), 1.0 / (2 * alpha)],
'c': [0.0, alpha],
}
return
def step(self, u0, t, dt):
return _runge_kutta_step(self.problem, self.tableau, u0, t, dt)
# def rk4_step(
# V,
# F,
# u0,
# t, dt,
# sympy_dirichlet_bcs=[],
# tol=1.0e-10,
# verbose=True
# ):
# '''Classical RK4.
# '''
# c = [0.0, 0.5, 0.5, 1.0]
# A = [[0.0, 0.0, 0.0, 0.0],
# [0.5, 0.0, 0.0, 0.0],
# [0.0, 0.5, 0.0, 0.0],
# [0.0, 0.0, 1.0, 0.0]]
# b = [1.0 / 6.0, 1.0 / 3.0, 1.0 / 3.0, 1.0 / 6.0]
#
# return runge_kutta_step(
# A, b, c,
# V, F, u0, t, dt,
# sympy_dirichlet_bcs=sympy_dirichlet_bcs,
# tol=tol,
# verbose=verbose
# )
#
#
# def rkf_step(
# V,
# F,
# u0,
# t, dt,
# sympy_dirichlet_bcs=[],
# tol=1.0e-10,
# verbose=True
# ):
# '''Runge--Kutta--Fehlberg method.
# '''
# c = [0.0, 0.25, 3.0 / 8.0, 12.0 / 13.0, 1.0, 0.5]
# A = [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
# [0.25, 0.0, 0.0, 0.0, 0.0, 0.0],
# [3./32, 9./32, 0.0, 0.0, 0.0, 0.0],
# [1932./2197, -7200./2197, 7296./2197, 0.0, 0.0, 0.0],
# [439./216, -8., 3680./513, -845./4104, 0.0, 0.0],
# [-8./27, 2., -3544./2565, 1859./4104, -11./40, 0.0]]
# # b = [25./216, 0.0, 1408./2565, 2197./4104, -1./5, 0.0] # 4th order
# # 5th order
# b = [16./135, 0.0, 6656./12825, 28561./56430, -9./50, 2./55]
#
# return runge_kutta_step(
# A, b, c,
# V, F, u0, t, dt,
# sympy_dirichlet_bcs=sympy_dirichlet_bcs,
# tol=tol,
# verbose=verbose
# )
def _runge_kutta_step(
problem, tableau, u0, t, dt
):
A = numpy.array(tableau['A'])
b = tableau['b']
c = tableau['c']
# Make sure that the scheme is strictly lower-triangular.
s = len(tableau['b'])
# Can't handle implicit methods yet.
assert numpy.all(abs(A[numpy.triu_indices(s)]) < 1.0e-15)
# # For the boundary values, see
# #
# # Intermediate Boundary Conditions for Runge-Kutta Time Integration of
# # Initial-Boundary Value Problems,
# # <NAME>,
# # <http://www.math.uh.edu/~hjm/june1995/p00379-p00388.pdf>.
# #
# tt = sympy.symbols('t')
# BCS = []
# # Get boundary conditions and their derivatives.
# for k in range(2):
# BCS.append([])
# for boundary, expr in sympy_dirichlet_bcs:
# # Form k-th derivative.
# DexprDt = sympy.diff(expr, tt, k)
# # TODO set degree of expression
# BCS[-1].append(
# DirichletBC(
# V,
# Expression(sympy.printing.ccode(DexprDt), t=t + dt),
# boundary
# )
# )
# Compute the stage values.
k = [u0.copy() for i in range(s)]
for i in range(s):
U = u0.copy()
for j in range(i):
if A[i][j] != 0.0:
U.vector()[:] += dt * A[i][j] * k[j].vector()
L = problem.eval_alpha_M_beta_F(0.0, 1.0, U, t + c[i]*dt)
# TODO boundary conditions!
# for g in BCS[1]:
# g.t = t + c[i] * dt
k[i].assign(problem.solve_alpha_M_beta_F(1.0, 0.0, L, t + c[i]*dt))
# Put it all together.
U = u0.copy()
for i in range(s):
U.vector()[:] += dt * b[i] * k[i].vector()
# TODO boundary conditions
# for g in BCS[0]:
# g.t = t + dt
theta = problem.solve_alpha_M_beta_F(1.0, 0.0, U, t+dt)
return theta
| StarcoderdataPython |
1624576 | <reponame>Ornella-KK/my-gallery
# Generated by Django 3.1.4 on 2020-12-21 07:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gallery', '0004_auto_20201220_1115'),
]
operations = [
migrations.RenameField(
model_name='image',
old_name='title',
new_name='name',
),
]
| StarcoderdataPython |
107837 | from datetime import datetime, timedelta
from collections import defaultdict
import os
import glob
import yaml
from airflow import DAG
from airflow.operators.data_quality_threshold_check_operator import DataQualityThresholdCheckOperator
from airflow.operators.data_quality_threshold_sql_check_operator import DataQualityThresholdSQLCheckOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils.trigger_rule import TriggerRule
YAML_DIR = "./tests/configs/yaml_configs"
default_args = {
"owner" : "airflow",
"start_date" : datetime(2020,1,16),
"retries" : 0,
"retry_delay" : timedelta(minutes=5),
"email_on_failure" : True
}
dag = DAG(
"yaml_data_quality_check_dag",
default_args=default_args,
schedule_interval="@daily"
)
def recursive_make_defaultdict(conf):
"""
recursive_make_defaultdict takes in a configuration dictionary
and recursively converts all nested dictionaries into a defaultdict
data structure with a default value as None.
"""
if isinstance(conf, dict):
for key in conf.keys():
conf[key] = recursive_make_defaultdict(conf[key])
return defaultdict(lambda: None, conf)
return conf
def get_data_quality_operator(conf, dag):
kwargs = {
"conn_id" : conf["fields"]["conn_id"],
"sql" : conf["fields"]["sql"],
"push_conn_id" : conf["push_conn_id"],
"check_description" : conf["check_description"],
"email" : conf["notification_emails"]
}
if conf["threshold"]["min_threshold_sql"]:
task = DataQualityThresholdSQLCheckOperator(
task_id=conf["test_name"],
min_threshold_sql=conf["threshold"]["min_threshold_sql"],
max_threshold_sql=conf["threshold"]["max_threshold_sql"],
threshold_conn_id=conf["threshold"]["threshold_conn_id"],
dag=dag,
**kwargs)
else:
task = DataQualityThresholdCheckOperator(
task_id=conf["test_name"],
min_threshold=conf["threshold"]["min_threshold"],
max_threshold=conf["threshold"]["max_threshold"],
dag=dag,
**kwargs)
return task
data_quality_check_tasks = []
for test_conf in glob.glob(os.path.join(str(YAML_DIR), "*.yaml")):
with open(test_conf) as config:
conf = recursive_make_defaultdict(yaml.safe_load(config))
data_quality_check_tasks.append(get_data_quality_operator(conf, dag))
task_before_dq = DummyOperator(
task_id="task_before_data_quality_checks",
dag=dag
)
task_after_dq = DummyOperator(
task_id="task_after_data_quality_checks",
trigger_rule=TriggerRule.ALL_DONE,
dag=dag
)
task_before_dq.set_downstream(data_quality_check_tasks)
task_after_dq.set_upstream(data_quality_check_tasks) | StarcoderdataPython |
50603 | <filename>pysweng/oop.py
def dummy_function(a):
return a
DUMMY_GLOBAL_CONSTANT_0 = 'FOO';
DUMMY_GLOBAL_CONSTANT_1 = 'BAR';
| StarcoderdataPython |
1742801 | <reponame>New2World/AnimEx
import os
import cv2
import argparse
import skimage.metrics
import fixer.fix_image as f_img
def parse_arg():
parser = argparse.ArgumentParser()
parser.add_argument('-i', dest='inp_path', required=True)
parser.add_argument('-o', dest='outp_path', default=None)
parser.add_argument('-g', dest='gpu', action='store_true', default=False)
parser.add_argument('-s', dest='block_size', type=int, default=-1)
parser.add_argument('-m', dest='measure', default=None)
return parser.parse_args()
def main():
argv = parse_arg()
file_name = os.path.basename(argv.inp_path)
outp_path = argv.outp_path
if outp_path is None:
outp_path = os.path.dirname(argv.inp_path)
outp_path = os.path.join(outp_path, f'{file_name}-output.jpg')
elif os.path.isdir(outp_path):
outp_path = os.path.join(outp_path, file_name)
solver = f_img.ImageFixer()
outp = solver.fix(argv.inp_path, outp_path, size=argv.block_size, gpu=argv.gpu)
if argv.measure is not None:
gt = cv2.imread(argv.measure)
if not gt.shape == outp.shape:
print('UserWarning: the ground truth has different dimensions, and to fit the scale of output the ground truth will be resized')
if gt.shape[0] < outp.shape[0]:
inter_method = cv2.INTER_CUBIC
else:
inter_method = cv2.INTER_LANCZOS4
gt = cv2.resize(gt, (outp.shape[1],outp.shape[0]), interpolation=inter_method)
psnr = skimage.metrics.peak_signal_noise_ratio(gt, outp)
print(f'PSNR: {psnr}')
if __name__ == '__main__':
main() | StarcoderdataPython |
1788340 | <filename>src/pytorch_adapt/adapters/adda.py<gh_stars>1-10
import copy
from ..containers import KeyEnforcer, MultipleContainers, Optimizers
from ..hooks import ADDAHook
from ..utils.common_functions import check_domain
from .base_adapter import BaseAdapter
from .utils import default_optimizer_tuple, with_opt
class ADDA(BaseAdapter):
"""
Wraps [ADDAHook][pytorch_adapt.hooks.adda].
"""
hook_cls = ADDAHook
def inference_default(self, x, domain):
domain = check_domain(self, domain)
fe = "G" if domain == 0 else "T"
features = self.models[fe](x)
logits = self.models["C"](features)
return features, logits
def get_default_containers(self):
optimizers = Optimizers(default_optimizer_tuple(), keys=["T", "D"])
return MultipleContainers(optimizers=optimizers)
def get_key_enforcer(self):
return KeyEnforcer(
models=["G", "C", "D", "T"],
optimizers=["D", "T"],
)
def init_hook(self, hook_kwargs):
self.hook = self.hook_cls(
d_opts=with_opt(["D"]), g_opts=with_opt(["T"]), **hook_kwargs
)
def init_containers_and_check_keys(self):
self.containers["models"]["T"] = copy.deepcopy(self.containers["models"]["G"])
super().init_containers_and_check_keys()
| StarcoderdataPython |
1730409 | from .errors import *
from .layers import *
from .loss import *
from .models import *
| StarcoderdataPython |
51935 | <filename>src/dl/models/decoders/residual/block.py
import torch
import torch.nn as nn
from ...modules import ResidualConvBlockPreact, ResidualConvBlock
class MultiBlockResidual(nn.ModuleDict):
def __init__(
self,
in_channels: int,
out_channels: int,
same_padding: bool=True,
batch_norm: str="bn",
activation: str="relu",
weight_standardize: bool=False,
n_blocks: int=2,
preactivate: bool=False
) -> None:
"""
Stack residual conv blocks in a ModuleDict. These are used in
the full sized decoderblocks. The number of basic conv blocks
can be adjusted. Default is 2. The residual connection is
applied at the final conv block, before the last activation.
Args:
----------
in_channels (int):
Number of input channels
out_channels (int):
Number of output channels
same_padding (bool, default=True):
If True, performs same-covolution
batch_norm (str, default="bn"):
Normalization method. One of "bn", "bcn", None
activation (str, default="relu"):
Activation method. One of: "relu", "swish". "mish"
weight_standardize (bool, default=False):
If True, perform weight standardization
n_blocks (int, default=2):
Number of BasicConvBlocks used in this block
preactivate (bool, default=False)
If True, normalization and activation are applied before
convolution
"""
super(MultiBlockResidual, self).__init__()
# use either preact or normal (original) resblock
ResBlock = ResidualConvBlock
if preactivate:
ResBlock = ResidualConvBlockPreact
# First res conv block. If n_blocks != 1 no residual skip
# at the first conv block
use_residual = n_blocks == 1
self.conv1 = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
same_padding=same_padding,
batch_norm=batch_norm,
activation=activation,
weight_standardize=weight_standardize,
use_residual=use_residual
)
blocks = list(range(1, n_blocks))
for i in blocks:
# apply residual connection at the final conv block
use_residual = i == blocks[-1]
conv_block = ResBlock(
in_channels=out_channels,
out_channels=out_channels,
same_padding=same_padding,
batch_norm=batch_norm,
activation=activation,
weight_standardize=weight_standardize,
use_residual=use_residual
)
self.add_module('conv%d' % (i + 1), conv_block)
def forward(self, x: torch.Tensor) -> torch.Tensor:
for _, conv_block in self.items():
x = conv_block(x)
return x | StarcoderdataPython |
3235217 | <reponame>gleis44/stellwerk<filename>addons/hr_leave_request_aliasing/models/__init__.py
# -*- coding: utf-8 -*-
from . import leave_request_alias
from . import res_config
# from . import web_planner
| StarcoderdataPython |
49114 | <reponame>DazEB2/SimplePyScripts
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
"""
Папоротник / Fern
"""
# Оригинал: http://www.cyberforum.ru/pascalabc/thread994987.html
# uses GraphABC,Utils;
#
# const
# n=255;
# max=10;
#
# var
# x,y,x1,y1,cx,cy: real;
# i,ix,iy: integer;
# // z=z^2+c
# begin
# SetWindowCaption('Фракталы: папоротник');
# SetWindowSize(300,300);
# cx:=0.251;
# cy:=0.95;
# for ix:=0 to WindowWidth-1 do
# for iy:=0 to WindowHeight-1 do
# begin
# x:=0.001*(ix-200);
# y:=0.001*(iy-150);
# for i:=1 to n do
# begin
# x1:=0.5*x*x-0.88*y*y+cx;
# y1:=x*y+cy;
# if (x1>max) or (y1>max) then break;
# x:=x1;
# y:=y1;
# end;
# if i>=n then SetPixel(ix,iy,clGreen)
# else SetPixel(ix,iy,RGB(255-i,255,255-i));
# end;
# writeln('Время расчета = ',Milliseconds/1000,' с');
# end.
def draw_fern(draw_by_image, width, height):
n = 255
cx = 0.251
cy = 0.95
for ix in range(width):
for iy in range(height):
x = 0.001 * (ix - 200)
y = 0.001 * (iy - 150)
for i in range(n):
x1 = 0.5 * x * x - 0.88 * y * y + cx
y1 = x * y + cy
if x1 > 10 or y1 > 10:
break
x = x1
y = y1
color = "green" if i >= n else (255 - i, 255, 255 - i)
draw_by_image.point((ix, iy), color)
if __name__ == '__main__':
from PIL import Image, ImageDraw
img = Image.new("RGB", (300, 300), "white")
draw_fern(ImageDraw.Draw(img), img.width, img.height)
img.save('img.png')
| StarcoderdataPython |
3328766 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import networkx as nx
from graphdot.graph import Graph
from graphdot.graph.reorder import pbr
def n_tiles(A, tile_size=8):
A = A.tocoo()
tiles = np.unique(
np.array([A.row // tile_size, A.col // tile_size]),
axis=1
)
return len(tiles)
@pytest.mark.parametrize('n', [5, 8, 13, 20, 31, 50])
@pytest.mark.parametrize('gen', [
lambda n: nx.wheel_graph(n),
lambda n: nx.star_graph(n - 1),
lambda n: nx.newman_watts_strogatz_graph(n, 3, 0.1),
lambda n: nx.erdos_renyi_graph(n, 0.2),
])
def test_rcm_fancy_graphs(n, gen):
nxg = gen(n)
if nxg.number_of_edges() > 0:
g = Graph.from_networkx(nxg)
p = pbr(g)
assert(np.min(p) == 0)
assert(np.max(p) == n - 1)
assert(len(np.unique(p)) == n)
g_perm = g.permute(p)
assert(n_tiles(g.adjacency_matrix) >= n_tiles(g_perm.adjacency_matrix))
| StarcoderdataPython |
3316345 | # -*- coding: utf-8 -*-
# @Author: zero_kelvin
# @Date: 2021-06-30 19:17:03
# @Last Modified by: zero_kelvin
# @Last Modified time: 2021-06-30 19:17:49
print("Welcome to the rollercoaster!")
height = int(input("What is your height in cm? "))
bill = 0
if height >= 120:
print("You are tall enough to ride this rollercoaster!")
age = int(input("What is your age? "))
if age < 12:
bill = 5
print("The child ticket price is $5")
elif age <= 18:
bill = 7
print("The youth ticket price is $7")
elif age >= 45 and age <= 55:
print("You're aged between 45 & 55 so are statistically likely to be having a midlife crisis. We're giving you this shit for free!")
bill = 0
else:
bill = 12
print("Adult tickets are $12")
souvenir = input("Do you want a photo souvenir with your ride? Type y or n: ")
if souvenir == "y":
# Add $3 to the price if the person says yes to a photo souvenir
bill += 3
print(f"Your final ticket price is ${bill}. Enjoy your ride try not to spew all over our equipment.")
else:
print("Sorry short arse, you will need to grow taller to ride this rollercoaster.") | StarcoderdataPython |
3393298 | import os
# from pathlib import Path
# import re
import shutil
import sys
import simur
import prepPDB
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
def about_cvdump():
print(' You need to have cvdump.exe in your path to index'
' static libraries')
print(' - you can find it here:')
print(' https://github.com/microsoft/microsoft-pdb/blob/master'
'/cvdump/cvdump.exe')
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
def usage():
the_script = os.path.basename(sys.argv[0])
print(f'Usage:')
print(f'{the_script} lib-pdb-file')
print(f' e.g. {the_script} RelWithDebInfo/TestLibCat.lib\n')
print(f' this is an attempt at mimicking srctool for PDBs from static')
print(f' libraries, since there is none from Microsoft\n')
about_cvdump()
#-------------------------------------------------------------------------------
# --- Routines for extracting the data from the pdb and associated vcs:s ---
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
def plural_files(no):
if no == 1:
return 'file'
return 'files'
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
def check_paths(root):
if not os.path.exists(root):
print(f'Sorry, the pdb {root} does not exist')
return 3
if not os.path.isfile(root):
print(f'Sorry, {root} is not a file')
return 3
return 0
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
def process_raw_cvdump_data(raw_data):
files = []
lines = raw_data.splitlines()
next_line = False
for line in lines:
if next_line:
file_in_spe = line.strip()
# print(f'Looking at {file_in_spe}')
if os.path.isfile(file_in_spe):
files.append(file_in_spe)
# print(f'Found {file_in_spe}')
next_line = False
if 'LF_STRING_ID' in line:
next_line = True
continue
return files
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
def get_lib_source_files(pdb_file, cvdump, srcsrv):
# First check if srctool returns anything - then it is NOT a lib-PDB
srctool_files = prepPDB.get_non_indexed(pdb_file, srcsrv, {})
if len(srctool_files):
print(f'{pdb_file} is not a lib-PDB file - skipped')
return []
commando = f'{cvdump} {pdb_file}'
raw_data, exit_code = simur.run_process(commando, True)
files = process_raw_cvdump_data(raw_data)
return files
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
def check_cvdump(cvdump):
second_try = cvdump
cvdump = shutil.which(cvdump)
if cvdump is None:
# Not in path, try once more in 'this' directory
cvdump = shutil.which(second_try,
path=os.path.dirname(os.path.abspath(__file__)))
if cvdump is None:
about_cvdump()
return cvdump
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
def check_requirements(root, cvdump):
return_value = 0
return_value = check_cvdump(cvdump)
if check_paths(root):
return_value = 3
return return_value
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
def main():
if len(sys.argv) < 2:
print("Too few arguments")
usage()
exit(3)
root = sys.argv[1]
cvdump = 'cvdump.exe'
srcsrv = 'C:\\Program Files (x86)\\Windows Kits\\10\\Debuggers\\x64\\srcsrv'
if len(sys.argv) > 2:
srcsrv = sys.argv[2]
failing_requirements = check_requirements(root, cvdump)
if failing_requirements:
return failing_requirements
if prepPDB.check_winkits(srcsrv):
return 3
files = get_lib_source_files(root, cvdump, srcsrv)
if not files:
print(f'No source files from static lib found in {root}')
return 3
print(f'Found {len(files)} source {plural_files(len(files))}')
for file in files:
print(file)
return 0
#-------------------------------------------------------------------------------
#
#-------------------------------------------------------------------------------
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
4825529 | from txaws.credentials import AWSCredentials
from txaws.service import AWSServiceEndpoint
from txaws.testing.ec2 import FakeEC2Client
from txaws.testing.s3 import MemoryS3
from txaws.testing.route53 import MemoryRoute53
class FakeAWSServiceRegion:
key_material = ""
def __init__(self, access_key="", secret_key="", uri="",
ec2_client_factory=None, keypairs=None, security_groups=None,
instances=None, volumes=None, snapshots=None,
availability_zones=None):
self.access_key = access_key
self.secret_key = secret_key
self.uri = uri
self.ec2_client = None
if not ec2_client_factory:
ec2_client_factory = FakeEC2Client
self.ec2_client_factory = ec2_client_factory
self.keypairs = keypairs
self.security_groups = security_groups
self.instances = instances
self.volumes = volumes
self.snapshots = snapshots
self.availability_zones = availability_zones
self.s3 = MemoryS3()
self._creds = AWSCredentials(
access_key=self.access_key,
secret_key=self.secret_key,
)
self._endpoint = AWSServiceEndpoint(uri=self.uri)
self._route53_controller = MemoryRoute53()
def get_ec2_client(self, *args, **kwds):
self.ec2_client = self.ec2_client_factory(
self._creds, self._endpoint,
instances=self.instances, keypairs=self.keypairs,
volumes=self.volumes, key_material=self.key_material,
security_groups=self.security_groups, snapshots=self.snapshots,
availability_zones=self.availability_zones)
return self.ec2_client
def get_s3_client(self, creds=None):
if creds is None:
creds = AWSCredentials(
access_key=self.access_key,
secret_key=self.secret_key,
)
endpoint = AWSServiceEndpoint(uri=self.uri)
self.s3_client, self.s3_state = self.s3.client(creds, endpoint)
return self.s3_client
def get_route53_client(self, creds=None):
if creds is None:
creds = AWSCredentials(
access_key=self.access_key,
secret_key=self.secret_key,
)
endpoint = AWSServiceEndpoint(uri=self.uri)
client, state = self._route53_controller.client(creds, endpoint)
return client
| StarcoderdataPython |
1627279 | <reponame>Make-Munich/SaBoT
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-11 13:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exhibitor', '0002_exhibitor_year'),
]
operations = [
migrations.AddField(
model_name='exhibitor',
name='boothPower',
field=models.PositiveIntegerField(blank=True, null=True, verbose_name='Do you need power? (How many kwH)'),
preserve_default=False,
),
migrations.AddField(
model_name='exhibitor',
name='boothArea',
field=models.PositiveIntegerField(choices=[(1, b'Electronics'), (2, b'3D Printing'), (0, b'No preference')], default=0, verbose_name='Which area is your booth in?'),
preserve_default=False,
),
]
| StarcoderdataPython |
144643 | from snake_utils import *
from snake_agent_ai import *
from snake_agent_astar import *
from snake_agent_greedy import *
from snake_agent_bfs import *
from snake_agent_rl_qlearning import *
class AgentFactory():
def __init__(self, session, agent_type : Agents):
self._session = session
self._agent_type = agent_type
def get_agent(self):
if self._agent_type == Agents.AGENT_GREEDY:
return AgentGreedy(self._session, self._agent_type)
elif self._agent_type == Agents.AGENT_A_STAR:
return AgentAStar(self._session, self._agent_type)
elif self._agent_type == Agents.AGENT_BFS:
return AgentBFS(self._session, self._agent_type)
elif self._agent_type == Agents.AGENT_SUPER_STAR:
return AgentSuperStar(self._session, self._agent_type)
elif self._agent_type == Agents.AGENT_Q_LEARNING:
return AgentQLearning(self._session, self._agent_type)
else:
raise ValueError("Agents type incorrect")
| StarcoderdataPython |
1607451 | <filename>azure-kusto-data/azure/kusto/data/security.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License
from typing import Optional, Dict, TYPE_CHECKING
from urllib.parse import urlparse
from ._token_providers import (
TokenProviderBase,
BasicTokenProvider,
CallbackTokenProvider,
MsiTokenProvider,
AzCliTokenProvider,
UserPassTokenProvider,
DeviceLoginTokenProvider,
InteractiveLoginTokenProvider,
ApplicationKeyTokenProvider,
ApplicationCertificateTokenProvider,
TokenConstants,
)
from .exceptions import KustoAuthenticationError, KustoClientError
if TYPE_CHECKING:
from . import KustoConnectionStringBuilder
class _AadHelper:
kusto_uri = None # type: str
authority_uri = None # type: str
token_provider = None # type: TokenProviderBase
def __init__(self, kcsb: "KustoConnectionStringBuilder", is_async: bool):
self.kusto_uri = "{0.scheme}://{0.hostname}".format(urlparse(kcsb.data_source))
self.username = None
if kcsb.interactive_login:
self.token_provider = InteractiveLoginTokenProvider(self.kusto_uri, kcsb.authority_id, kcsb.login_hint, kcsb.domain_hint, is_async=is_async)
elif all([kcsb.aad_user_id, kcsb.password]):
self.token_provider = UserPassTokenProvider(self.kusto_uri, kcsb.authority_id, kcsb.aad_user_id, kcsb.password, is_async=is_async)
elif all([kcsb.application_client_id, kcsb.application_key]):
self.token_provider = ApplicationKeyTokenProvider(
self.kusto_uri, kcsb.authority_id, kcsb.application_client_id, kcsb.application_key, is_async=is_async
)
elif all([kcsb.application_client_id, kcsb.application_certificate, kcsb.application_certificate_thumbprint]):
# kcsb.application_public_certificate can be None if SNI is not used
self.token_provider = ApplicationCertificateTokenProvider(
self.kusto_uri,
kcsb.application_client_id,
kcsb.authority_id,
kcsb.application_certificate,
kcsb.application_certificate_thumbprint,
kcsb.application_public_certificate,
is_async=is_async,
)
elif kcsb.msi_authentication:
self.token_provider = MsiTokenProvider(self.kusto_uri, kcsb.msi_parameters, is_async=is_async)
elif kcsb.user_token:
self.token_provider = BasicTokenProvider(kcsb.user_token, is_async=is_async)
elif kcsb.application_token:
self.token_provider = BasicTokenProvider(kcsb.application_token, is_async=is_async)
elif kcsb.az_cli:
self.token_provider = AzCliTokenProvider(self.kusto_uri, is_async=is_async)
elif kcsb.token_provider or kcsb.async_token_provider:
self.token_provider = CallbackTokenProvider(token_callback=kcsb.token_provider, async_token_callback=kcsb.async_token_provider, is_async=is_async)
else:
self.token_provider = DeviceLoginTokenProvider(self.kusto_uri, kcsb.authority_id, is_async=is_async)
def acquire_authorization_header(self):
try:
return _get_header_from_dict(self.token_provider.get_token())
except Exception as error:
kwargs = self.token_provider.context()
kwargs["kusto_uri"] = self.kusto_uri
raise KustoAuthenticationError(self.token_provider.name(), error, **kwargs)
async def acquire_authorization_header_async(self):
try:
return _get_header_from_dict(await self.token_provider.get_token_async())
except Exception as error:
kwargs = await self.token_provider.context_async()
kwargs["resource"] = self.kusto_uri
raise KustoAuthenticationError(self.token_provider.name(), error, **kwargs)
def _get_header_from_dict(token: dict):
if TokenConstants.MSAL_ACCESS_TOKEN in token:
return _get_header(token[TokenConstants.MSAL_TOKEN_TYPE], token[TokenConstants.MSAL_ACCESS_TOKEN])
elif TokenConstants.AZ_ACCESS_TOKEN in token:
return _get_header(token[TokenConstants.AZ_TOKEN_TYPE], token[TokenConstants.AZ_ACCESS_TOKEN])
else:
raise KustoClientError("Unable to determine the token type. Neither 'tokenType' nor 'token_type' property is present.")
def _get_header(token_type: str, access_token: str) -> str:
return "{0} {1}".format(token_type, access_token)
| StarcoderdataPython |
1152 | <gh_stars>1-10
# Copyright 2020 Soil, Inc.
from soil.openstack.base import DataBase
from soil.openstack.base import SourceBase
class SnapshotData(DataBase):
"""A class for openstack snapshot data"""
def __init__(self, data):
self.data = data['snapshot']
class Snapshot(SourceBase):
"""A class for openstack snapshot"""
def __init__(self, plugin, source_id):
super(Snapshot, self).__init__(plugin, source_id)
self._snapshot_obj = None
@property
def snapshot_obj(self):
if self._snapshot_obj is not None:
return self._snapshot_obj
self._snapshot_obj = SnapshotData(self.show())
return self._snapshot_obj
def show(self):
return self.plugin.cinder.show_snapshot(self.source_id)
def delete(self):
self.plugin.cinder.delete_snapshot(self.source_id)
def is_created(self):
snapshot_info = self.show()
status = snapshot_info['snapshot']['status']
if status in ('available', ):
return True
self._check_failed_status(status)
return False
def is_delete(self):
pass
| StarcoderdataPython |
1717304 | # -*- coding: utf-8 -*-
from django.contrib.gis.db.models.query import GeoQuerySet as BaseGeoQuerySet
from django_orm.cache.queryset import ObjectCacheMixIn
class GeoQuerySet(ObjectCacheMixIn, BaseGeoQuerySet):
pass
| StarcoderdataPython |
1717101 | <filename>useintest/tests/common.py<gh_stars>1-10
import re
from useintest.common import MOUNTABLE_TEMP_DIRECTORY
MOUNTABLE_TEMP_CREATION_KWARGS = {"dir": MOUNTABLE_TEMP_DIRECTORY}
MAX_RUN_TIME_IN_SECONDS = 120
_EXTRACT_VERSION_PATTERN = re.compile("[0-9]+(_[0-9]+)*")
def extract_version_number(string: str) -> str:
"""
Extracts a version from a string in the form: `.*[0-9]+(_[0-9]+)*.*`, e.g. Irods4_1_9CompatibleController.
If the string contains multiple version numbers, the first (from left) is extracted.
Will raise a `ValueError` if there is no version number in the given string.
:param string: the string containing the version number
:return: the extracted version
"""
matched = _EXTRACT_VERSION_PATTERN.search(string)
if matched is None:
raise ValueError("No version number in string")
return matched.group().replace("_", ".")
| StarcoderdataPython |
177752 |
# standard imports
from landsat_metadata import landsat_metadata
from dnppy import core
import math
import os
import arcpy
if arcpy.CheckExtension('Spatial')=='Available':
arcpy.CheckOutExtension('Spatial')
arcpy.env.overwriteOutput = True
__all__=['toa_reflectance_8', # complete
'toa_reflectance_457'] # complete
def toa_reflectance_8(band_nums, meta_path, outdir = None):
"""
Converts Landsat 8 bands to Top-of-Atmosphere reflectance. To be performed
on raw Landsat 8 level 1 data. See link below for details
see here [http://landsat.usgs.gov/Landsat8_Using_Product.php]
:param band_nums: A list of desired band numbers such as [3,4,5]
:param meta_path: The full filepath to the metadata file for those bands
:param outdir: Output directory to save converted files. If left False it will save ouput
files in the same directory as input files.
:return output_filelist: List of files created by this function
"""
output_filelist = []
# enforce the list of band numbers and grab metadata from the MTL file
band_nums = core.enf_list(band_nums)
band_nums = map(str, band_nums)
OLI_bands = ['1','2','3','4','5','6','7','8','9']
meta_path = os.path.abspath(meta_path)
meta = landsat_metadata(meta_path)
# cycle through each band in the list for calculation, ensuring each is in the list of OLI bands
for band_num in band_nums:
if band_num in OLI_bands:
# scrape data from the given file path and attributes in the MTL file
band_path = meta_path.replace("MTL.txt","B{0}.tif".format(band_num))
Qcal = arcpy.Raster(band_path)
Mp = getattr(meta,"REFLECTANCE_MULT_BAND_{0}".format(band_num)) # multiplicative scaling factor
Ap = getattr(meta,"REFLECTANCE_ADD_BAND_{0}".format(band_num)) # additive rescaling factor
SEA = getattr(meta,"SUN_ELEVATION")*(math.pi/180) # sun elevation angle theta_se
# get rid of the zero values that show as the black background to avoid skewing values
null_raster = arcpy.sa.SetNull(Qcal, Qcal, "VALUE = 0")
# calculate top-of-atmosphere reflectance
TOA_ref = (((null_raster * Mp) + Ap)/(math.sin(SEA)))
# save the data to the automated name if outdir is given or in the parent folder if not
if outdir is not None:
outdir = os.path.abspath(outdir)
outname = core.create_outname(outdir, band_path, "TOA_Ref", "tif")
else:
folder = os.path.split(meta_path)[0]
outname = core.create_outname(folder, band_path, "TOA_Ref", "tif")
TOA_ref.save(outname)
output_filelist.append(outname)
print("Saved output at {0}".format(outname))
# if listed band is not an OLI sensor band, skip it and print message
else:
print("Can only perform reflectance conversion on OLI sensor bands")
print("Skipping band {0}".format(band_num))
return output_filelist
def toa_reflectance_457(band_nums, meta_path, outdir = None):
"""
This function is used to convert Landsat 4, 5, or 7 pixel values from
digital numbers to Top-of-Atmosphere Reflectance. To be performed on raw
Landsat 4, 5, or 7 data.
:param band_nums: A list of desired band numbers such as [3,4,5]
:param meta_path: The full filepath to the metadata file for those bands
:param outdir: Output directory to save converted files. If left False it will save ouput
files in the same directory as input files.
:return output_filelist: List of files created by this function
"""
output_filelist = []
band_nums = core.enf_list(band_nums)
band_nums = map(str, band_nums)
# metadata format was changed August 29, 2012. This tool can process either the new or old format
f = open(meta_path)
MText = f.read()
meta_path = os.path.abspath(meta_path)
metadata = landsat_metadata(meta_path)
# the presence of a PRODUCT_CREATION_TIME category is used to identify old metadata
# if this is not present, the meta data is considered new.
# Band6length refers to the length of the Band 6 name string. In the new metadata this string is longer
if "PRODUCT_CREATION_TIME" in MText:
Meta = "oldMeta"
Band6length = 2
else:
Meta = "newMeta"
Band6length = 8
# The tilename is located using the newMeta/oldMeta indixes and the date of capture is recorded
if Meta == "newMeta":
TileName = getattr(metadata, "LANDSAT_SCENE_ID")
year = TileName[9:13]
jday = TileName[13:16]
date = getattr(metadata, "DATE_ACQUIRED")
elif Meta == "oldMeta":
TileName = getattr(metadata, "BAND1_FILE_NAME")
year = TileName[13:17]
jday = TileName[17:20]
date = getattr(metadata, "ACQUISITION_DATE")
# the spacecraft from which the imagery was capture is identified
# this info determines the solar exoatmospheric irradiance (ESun) for each band
spacecraft = getattr(metadata, "SPACECRAFT_ID")
if "7" in spacecraft:
ESun = (1969.0, 1840.0, 1551.0, 1044.0, 255.700, 0., 82.07, 1368.00)
TM_ETM_bands = ['1','2','3','4','5','7','8']
elif "5" in spacecraft:
ESun = (1957.0, 1826.0, 1554.0, 1036.0, 215.0, 0. ,80.67)
TM_ETM_bands = ['1','2','3','4','5','7']
elif "4" in spacecraft:
ESun = (1957.0, 1825.0, 1557.0, 1033.0, 214.9, 0. ,80.72)
TM_ETM_bands = ['1','2','3','4','5','7']
else:
arcpy.AddError("This tool only works for Landsat 4, 5, or 7")
raise arcpy.ExecuteError()
# determing if year is leap year and setting the Days in year accordingly
if float(year) % 4 == 0: DIY = 366.
else: DIY=365.
# using the date to determining the distance from the sun
theta = 2 * math.pi * float(jday)/DIY
dSun2 = (1.00011 + 0.034221 * math.cos(theta) + 0.001280 * math.sin(theta) +
0.000719 * math.cos(2*theta)+ 0.000077 * math.sin(2 * theta))
SZA = 90. - float(getattr(metadata, "SUN_ELEVATION"))
# Calculating values for each band
for band_num in band_nums:
if band_num in TM_ETM_bands:
print("Processing Band {0}".format(band_num))
pathname = meta_path.replace("MTL.txt", "B{0}.tif".format(band_num))
Oraster = arcpy.Raster(pathname)
null_raster = arcpy.sa.SetNull(Oraster, Oraster, "VALUE = 0")
# using the oldMeta/newMeta indices to pull the min/max for radiance/Digital numbers
if Meta == "newMeta":
LMax = getattr(metadata, "RADIANCE_MAXIMUM_BAND_{0}".format(band_num))
LMin = getattr(metadata, "RADIANCE_MINIMUM_BAND_{0}".format(band_num))
QCalMax = getattr(metadata, "QUANTIZE_CAL_MAX_BAND_{0}".format(band_num))
QCalMin = getattr(metadata, "QUANTIZE_CAL_MIN_BAND_{0}".format(band_num))
elif Meta == "oldMeta":
LMax = getattr(metadata, "LMAX_BAND{0}".format(band_num))
LMin = getattr(metadata, "LMIN_BAND{0}".format(band_num))
QCalMax = getattr(metadata, "QCALMAX_BAND{0}".format(band_num))
QCalMin = getattr(metadata, "QCALMIN_BAND{0}".format(band_num))
Radraster = (((LMax - LMin)/(QCalMax-QCalMin)) * (null_raster - QCalMin)) + LMin
Oraster = 0
del null_raster
# Calculating temperature for band 6 if present
Refraster = (math.pi * Radraster * dSun2) / (ESun[int(band_num[0])-1] * math.cos(SZA*(math.pi/180)))
# construc output names for each band based on whether outdir is set (default is False)
if outdir is not None:
outdir = os.path.abspath(outdir)
BandPath = core.create_outname(outdir, pathname, "TOA_Ref", "tif")
else:
folder = os.path.split(meta_path)[0]
BandPath = core.create_outname(folder, pathname, "TOA_Ref", "tif")
Refraster.save(BandPath)
output_filelist.append(BandPath)
del Refraster, Radraster
print("Reflectance Calculated for Band {0}".format(band_num))
# if listed band is not a TM/ETM+ sensor band, skip it and print message
else:
print("Can only perform reflectance conversion on TM/ETM+ sensor bands")
print("Skipping band {0}".format(band_num))
f.close()
return output_filelist
| StarcoderdataPython |
14634 | <reponame>syz247179876/Flask-Sports
# -*- coding: utf-8 -*-
# @Time : 2020/12/1 下午11:24
# @Author : 司云中
# @File : production.py
# @Software: Pycharm
from configs.default import DefaultConfig
class ProductionConfig(DefaultConfig):
"""the config of production env"""
DEBUG = False
TESTING = False
MONGODB_DB = ''
MONGODB_HOST = ''
MONGODB_PORT = ''
MONGODB_USERNAME = ''
MONGODB_PASSWORD = ''
production_config = ProductionConfig()
| StarcoderdataPython |
3255963 | <gh_stars>0
import nanome
import os
from functools import partial
dir_path = os.path.dirname(os.path.realpath(__file__))
MENU_PATH = dir_path + "/WebLoad.json"
PPT_TAB_PATH = dir_path + "/PPTTab.json"
IMAGE_TAB_PATH = dir_path + "/ImageTab.json"
LIST_ITEM_PATH = dir_path + "/ListItem.json"
UP_ICON_PATH = dir_path + "/UpIcon.png"
class Prefabs(object):
tab_prefab = None
ppt_prefab = None
image_prefab = None
list_item_prefab = None
class PageTypes(nanome.util.IntEnum):
Home = 1
Image = 2
PPT = 3
#Singleton class.
class MenuManager(object):
def __init__(self, plugin, address, load_file_delegate):
MenuManager.instance = self
self.plugin = plugin
self.ReadJsons()
MenuManager.Page.tab_bar = self.plugin.menu.root.find_node("TabBar")
MenuManager.Page.page_parent = self.plugin.menu.root.find_node("Pages")
MenuManager.Page.menu_manager = self
home = self.plugin.menu.root.find_node("FilesPage")
home_tab = self.plugin.menu.root.find_node("HomeTab")
self.home_page = MenuManager.HomePage(home_tab, home, address, load_file_delegate)
self.selected_page = self.home_page
self.uploaded = False
self.Refresh()
def ReadJsons(self):
self.plugin.menu = nanome.ui.Menu.io.from_json(MENU_PATH)
Prefabs.ppt_prefab = nanome.ui.LayoutNode.io.from_json(PPT_TAB_PATH).get_children()[0]
Prefabs.image_prefab = nanome.ui.LayoutNode.io.from_json(IMAGE_TAB_PATH).get_children()[0]
Prefabs.list_item_prefab = nanome.ui.LayoutNode.io.from_json(LIST_ITEM_PATH)
Prefabs.tab_prefab = self.plugin.menu.root.find_node("TabPrefab")
Prefabs.tab_prefab.parent.remove_child(Prefabs.tab_prefab)
def SwitchTab(self, page=None):
if page==None:
page = self.home_page
self.selected_page.deselect()
self.selected_page = page
self.selected_page.select()
MenuManager.RefreshMenu()
def OpenPage(self, type, data, name):
if type == PageTypes.Image:
MenuManager.ImagePage(data, name)
if type == PageTypes.PPT:
MenuManager.PPTPage(data, name)
self.Refresh()
@classmethod
def RefreshMenu(cls, content = None):
MenuManager.instance.Refresh(content)
def Refresh(self, content = None):
if content and self.uploaded:
self.plugin.update_content(content)
else:
self.uploaded = True
self.plugin.menu.enable = True
self.plugin.update_menu(self.plugin.menu)
def ClearList(self):
self.home_page.file_list.items.clear()
def UpdateList(self, files, folders, can_upload):
self.home_page.upload_button.unusable = not can_upload
self.Refresh(self.home_page.upload_button)
old_items = set(map(lambda item: item.name, self.home_page.file_list.items))
new_items = folders + files
add_set = set(new_items)
remove_items = old_items - add_set
add_items = add_set - old_items
changed = False
for item in remove_items:
self.home_page.RemoveItem(item)
changed = True
# iterate list to preserve ordering
for item in new_items:
if item not in add_items:
continue
self.home_page.AddItem(item, item in folders)
changed = True
if changed or not len(old_items):
self.Refresh(self.home_page.file_list)
def GetFiles(self):
return list(map(lambda item: item.name, self.home_page.file_list.items))
def GetOpenFiles(self):
return list(map(lambda item: item.name, MenuManager.Page.page_parent.get_children()))
class Page(object):
tab_bar = None
page_parent = None
menu_manager = None
def __init__(self, name, tab_prefab, page_prefab):
#setup tab
self.tab_base = tab_prefab.clone()
tab_prefab = None
self.tab_button = self.tab_base.get_content()
self.tab_label = self.tab_base.find_node("TabPrefabLabel").get_content()
self.tab_delete_button = self.tab_base.find_node("TabPrefabDelete").get_content()
base_name = os.path.basename(name)
base_name = os.path.splitext(base_name)[0]
tab_name = base_name[:6]
self.tab_label.text_value = tab_name
fill = self.tab_bar.find_node("Fill")
self.tab_bar.add_child(self.tab_base)
self.tab_bar.remove_child(fill)
self.tab_bar.add_child(fill)
#setup page
self.base = page_prefab.clone()
self.base.name = base_name
page_prefab = None
self.page_parent.add_child(self.base)
#setup buttons
def tab_delete(button):
self.page_parent.remove_child(self.base)
self.tab_bar.remove_child(self.tab_base)
self.menu_manager.SwitchTab()
self.tab_delete_button.register_pressed_callback(tab_delete)
def tab_pressed(button):
self.menu_manager.SwitchTab(self)
self.tab_button.register_pressed_callback(tab_pressed)
self.menu_manager.SwitchTab(self)
def select(self):
self.base.enabled = True
self.tab_base.get_content().selected = True
def deselect(self):
self.base.enabled = False
self.tab_base.get_content().selected = False
class HomePage(Page):
def __init__(self, tab, page, address, load_file_delegate):
self.tab_base = tab
self.base = page
self.type = PageTypes.Home
self.tab_button = self.tab_base.get_content()
self.load_file_delegate = load_file_delegate
self.showing_upload = False
def tab_pressed(button):
self.menu_manager.SwitchTab(self)
self.tab_button.register_pressed_callback(tab_pressed)
def open_url(button):
self.menu_manager.plugin.open_url(address)
url_button = self.base.find_node("URLButton").get_content()
url_button.register_pressed_callback(open_url)
def go_up(button):
self.menu_manager.plugin.chdir('..')
self.ToggleUpload(show=False)
self.up_button = self.base.find_node("GoUpButton").get_content()
self.up_button.register_pressed_callback(go_up)
self.up_button.unusable = True
self.up_button.set_all_icon(UP_ICON_PATH)
self.up_button.icon.size = 0.5
self.up_button.icon.color_unusable = nanome.util.Color.Grey()
self.upload_button = self.base.find_node("UploadButton").get_content()
self.upload_button.register_pressed_callback(self.ToggleUpload)
self.ins_add_files = "Visit %s in browser to add files" % address
self.ins_select_complex = "Select a structure from the workspace"
self.instructions = self.base.find_node("InstructionLabel").get_content()
self.instructions.text_value = self.ins_add_files
self.breadcrumbs = self.base.find_node("Breadcrumbs").get_content()
self.file_explorer = self.base.find_node("FileExplorer")
ln_file_list = self.base.find_node("FileList")
self.file_list = ln_file_list.get_content()
self.file_list.parent = ln_file_list
ln_file_loading = self.base.find_node("FileLoading")
self.file_loading = ln_file_loading.get_content()
self.file_loading.parent = ln_file_loading
self.file_upload = self.base.find_node("FileUpload")
# upload components
self.panel_list = self.base.find_node("SelectComplex")
self.panel_upload = self.base.find_node("SelectType")
button_pdb = self.base.find_node("PDB").get_content()
button_pdb.register_pressed_callback(partial(self.UploadComplex, "PDB"))
button_sdf = self.base.find_node("SDF").get_content()
button_sdf.register_pressed_callback(partial(self.UploadComplex, "SDF"))
button_mmcif = self.base.find_node("MMCIF").get_content()
button_mmcif.register_pressed_callback(partial(self.UploadComplex, "MMCIF"))
self.complex_list = self.base.find_node("ComplexList").get_content()
self.selected_complex = None
self.select()
def UpdateBreadcrumbs(self, path, at_root):
self.breadcrumbs.text_value = path
MenuManager.RefreshMenu(self.breadcrumbs)
self.up_button.unusable = at_root
MenuManager.RefreshMenu(self.up_button)
def AddItem(self, name, is_folder):
new_item = Prefabs.list_item_prefab.clone()
new_item.name = name
button = new_item.find_node("ButtonNode").get_content()
button.item_name = name
plugin = MenuManager.instance.plugin
display_name = name.replace(plugin.account, 'account')
label = new_item.find_node("LabelNode").get_content()
label.text_value = display_name
if is_folder:
label.text_value += '/'
def FilePressedCallback(button):
self.file_list.parent.enabled = False
self.file_loading.parent.enabled = True
self.file_loading.text_value = 'loading...\n' + button.item_name
MenuManager.RefreshMenu()
def OnFileLoaded():
self.file_list.parent.enabled = True
self.file_loading.parent.enabled = False
MenuManager.RefreshMenu()
self.load_file_delegate(button.item_name, OnFileLoaded)
def FolderPressedCallback(button):
MenuManager.instance.plugin.chdir(button.item_name)
cb = FolderPressedCallback if is_folder else FilePressedCallback
button.register_pressed_callback(cb)
self.file_list.items.append(new_item)
def RemoveItem(self, name):
items = self.file_list.items
for child in items:
if child.name == name:
items.remove(child)
break
def ToggleUpload(self, button=None, show=None):
show = not self.showing_upload if show is None else show
self.showing_upload = show
self.file_upload.enabled = show
self.file_explorer.enabled = not show
self.upload_button.set_all_text('Cancel' if show else 'Upload Here')
self.instructions.text_value = self.ins_select_complex if show else self.ins_add_files
if show:
plugin = MenuManager.instance.plugin
plugin.request_complex_list(self.PopulateComplexes)
self.panel_list.enabled = True
self.panel_upload.enabled = False
MenuManager.RefreshMenu()
def PopulateComplexes(self, complexes):
def select_complex(button):
self.selected_complex = button.complex
self.panel_list.enabled = False
self.panel_upload.enabled = True
MenuManager.RefreshMenu()
self.complex_list.items = []
for complex in complexes:
item = Prefabs.list_item_prefab.clone()
label = item.find_node("LabelNode").get_content()
label.text_value = complex.full_name
button = item.find_node("ButtonNode").get_content()
button.complex = complex
button.register_pressed_callback(select_complex)
self.complex_list.items.append(item)
if not complexes:
# empty ln for spacing
self.complex_list.items.append(nanome.ui.LayoutNode())
ln = nanome.ui.LayoutNode()
lbl = ln.add_new_label("no structures found in workspace")
lbl.text_horizontal_align = lbl.HorizAlignOptions.Middle
lbl.text_max_size = 0.4
self.complex_list.items.append(ln)
MenuManager.RefreshMenu(self.complex_list)
def UploadComplex(self, save_type, button):
plugin = MenuManager.instance.plugin
def save_func(complexes):
plugin.save_molecule(save_type, complexes[0])
self.ToggleUpload(show=False)
plugin.request_complexes([self.selected_complex.index], save_func)
class ImagePage(Page):
def __init__(self, image, name):
MenuManager.Page.__init__(self, name, Prefabs.tab_prefab, Prefabs.image_prefab)
self.type = PageTypes.Image
self.image = image
self.image_content = self.base.find_node("ImageContent").add_new_image(image)
self.image_content.scaling_option = nanome.util.enums.ScalingOptions.fit
class PPTPage(Page):
def __init__(self, images, name):
MenuManager.Page.__init__(self, name, Prefabs.tab_prefab, Prefabs.ppt_prefab)
self.type = PageTypes.PPT
self.images = images
self.prev_button = self.base.find_node("PrevButton").get_content()
self.next_button = self.base.find_node("NextButton").get_content()
self.page_text = self.base.find_node("PageText").get_content()
self.ppt_content = self.base.find_node("PPTContent").add_new_image()
self.ppt_content.scaling_option = nanome.util.enums.ScalingOptions.fit
self.current_slide = 0
def move_next(button):
next_slide = (self.current_slide+1) % len(self.images)
self.change_slide(next_slide)
MenuManager.RefreshMenu(self.ppt_content)
MenuManager.RefreshMenu(self.page_text)
def move_prev(button):
next_slide = (self.current_slide-1) % len(self.images)
self.change_slide(next_slide)
MenuManager.RefreshMenu(self.ppt_content)
MenuManager.RefreshMenu(self.page_text)
self.prev_button.register_pressed_callback(move_prev)
self.next_button.register_pressed_callback(move_next)
self.change_slide(0)
def change_slide(self, index):
num_slides = len(self.images)
self.current_slide = index
self.ppt_content.file_path = self.images[index]
self.page_text.text_value = str(self.current_slide+1) + "/" + str(num_slides)
| StarcoderdataPython |
90141 | # -*- coding: utf-8 -*-
import ustruct as struct
class ValueType:
STRING = "string"
CHAR = "char"
DOUBLE = "double"
FLOAT = "float"
INT = "int"
UINT = "uint"
SHORT = "short"
BOOLEAN = "boolean"
def fill_bytes(byte_array, start, end, value, value_type):
if value_type == ValueType.CHAR and value is not None and value is not '':
byte_array[start:end] = struct.pack("s", value)
elif value_type == ValueType.STRING and value is not None and value is not '':
byte_array[start:end] = struct.pack("{}s".format(end - start), value)
elif value_type == ValueType.DOUBLE and value is not None and value is not '':
byte_array[start:end] = struct.pack("d", float(value))
elif value_type == ValueType.FLOAT and value is not None and value is not '':
byte_array[start:end] = struct.pack("f", float(value))
elif value_type == ValueType.INT and value is not None and value is not '':
byte_array[start:end] = struct.pack("i", int(value))
elif value_type == ValueType.UINT and value is not None and value is not '':
byte_array[start:end] = struct.pack("I", int(value))
elif value_type == ValueType.SHORT and value is not None and value is not '':
byte_array[start:end] = struct.pack("h", int(value))
return byte_array
| StarcoderdataPython |
3377065 | <reponame>KidLanz/bash_basics<filename>littleBuster.py
#!/bin/bash
# read the name of the user and print hello
#echo "Hello! What is your name"
#read name
#echo "Welcome, $name"
# single quotes prevent the expansion of the variable
#echo 'Your name was stored in $name'
# exercise: write a script that asks the user for a
# filename and create an empty file named after it
#echo "Hey $name, What is your filename?"
#read filename
#echo "You want $filename"
#echo "Creating $filename ..."
#touch $filename
#echo "$filename creted"
#ls
#echo "Bye,bye"
# So I Love this song but the chorus is about more than
# 50% ao the song is the chorus and is japanese which I
# know a thing or two about I hope you listen to it.
#This is my first time using python and is not bad at all
#LITTLE BUSTERS by The Pillows
i = 0
while i < 2:
print("With the kids sing out the future")
print("Maybe, kids don't need the masters")
print("Just waiting for the little Busters")
print("OH YEAHHHHHH")
print(" ")
i+=1
print("YEAH YEAH YEAHHH!!!!")
print("色あせないキッドナップミュージック")
print("手と手が知っている")
print("同じドアをノックしたい")
print("この声が聞こえたら 飛び出して")
print(" ")
j = 0
while j < 3:
print("With the kids sing out the future")
print("Maybe, kids don't need the masters")
print("Just waiting for the little Busters")
print("OH YEAHHHHHH")
print(" ")
j+=1
print("YEAH YEAH YEAHHH!!!!")
print("はずれやすいティーンエイジ・ギア")
print("転がる日々も")
print("空と海と大地はただ")
print("あるがまま いつまでも逃げないぜ")
print(" ")
j = 0
while j < 4:
print("With the kids sing out the future")
print("Maybe, kids don't need the masters")
print("Just waiting for the little Busters")
print("OH YEAHHHHHH")
print(" ")
j+=1
print("YEAHHHH YEAHHH YEAHH!!!!....")
| StarcoderdataPython |
154148 | """A utility class to summarize all results in a directory.
"""
__author__ = '<NAME>'
from dataclasses import dataclass, field
from pathlib import Path
import logging
import pandas as pd
from zensols.util.time import time
from zensols.deeplearn import DatasetSplitType
from . import (
ModelResult, DatasetResult, ModelResultManager, ArchivedResult,
PredictionsDataFrameFactory,
)
logger = logging.getLogger(__name__)
@dataclass
class ModelResultReporter(object):
"""Summarize all results in a directory from the output of model execution from
:class:`~zensols.deeplearn.model.ModelExectuor`.
The class iterates through the pickled binary output files from the run and
summarizes in a Pandas dataframe, which is handy for reporting in papers.
"""
METRIC_DESCRIPTIONS = PredictionsDataFrameFactory.METRIC_DESCRIPTIONS
"""Dictionary of performance metrics column names to human readable
descriptions.
"""
result_manager: ModelResultManager = field()
"""Contains the results to report on--and specifically the path to directory
where the results were persisted.
"""
include_validation: bool = field(default=True)
"""Whether or not to include validation performance metrics."""
@property
def dataframe(self) -> pd.DataFrame:
"""Return the summarized results (see class docs).
:return: the Pandas dataframe of the results
"""
rows = []
cols = 'name file start train_duration converged features '.split()
if self.include_validation:
cols.extend('wF1v wPv wRv mF1v mPv mRv MF1v MPv MRv '.split())
cols.extend(('wF1t wPt wRt mF1t mPt mRt MF1t MPt MRt ' +
'train_occurs validation_occurs test_occurs').split())
dpt_key = 'n_total_data_points'
arch_res: ArchivedResult
for fname, arch_res in self.result_manager.results_stash.items():
res: ModelResult = arch_res.model_result
train: DatasetResult = res.dataset_result.get(DatasetSplitType.train)
validate: DatasetResult = res.dataset_result.get(DatasetSplitType.validation)
test: DatasetResult = res.dataset_result.get(DatasetSplitType.test)
if train is not None:
dur = train.end_time - train.start_time
hours, remainder = divmod(dur.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
dur = f'{hours:02}:{minutes:02}:{seconds:02}'
if validate is not None:
conv_epoch = validate.statistics['n_epoch_converged']
else:
conv_epoch = None
if test is not None:
vm = validate.metrics
tm = test.metrics
features = ', '.join(res.decoded_attributes)
row = [res.name, fname, train.start_time, dur, conv_epoch, features]
if self.include_validation:
row.extend([
vm.weighted.f1, vm.weighted.precision, vm.weighted.recall,
vm.micro.f1, vm.micro.precision, vm.micro.recall,
vm.macro.f1, vm.macro.precision, vm.macro.recall])
row.extend([
tm.weighted.f1, tm.weighted.precision, tm.weighted.recall,
tm.micro.f1, tm.micro.precision, tm.micro.recall,
tm.macro.f1, tm.macro.precision, tm.macro.recall,
train.statistics[dpt_key], validate.statistics[dpt_key],
test.statistics[dpt_key]])
rows.append(row)
if logger.isEnabledFor(logging.INFO):
logger.info('result calculation complete for ' +
f'{res.name} ({fname})')
return pd.DataFrame(rows, columns=cols)
def dump(self, path: Path):
"""Create the summarized results and write them to the file system.
"""
with time(f'wrote results summary: {path}'):
self.dataframe.to_csv(path)
| StarcoderdataPython |
3212804 | <reponame>ming-hai/spleeter<gh_stars>1000+
#!/usr/bin/env python
# coding: utf8
""" This module provides audio data convertion functions. """
# pyright: reportMissingImports=false
# pylint: disable=import-error
import numpy as np
import tensorflow as tf
from ..utils.tensor import from_float32_to_uint8, from_uint8_to_float32
# pylint: enable=import-error
__email__ = "<EMAIL>"
__author__ = "Deezer Research"
__license__ = "MIT License"
def to_n_channels(waveform: tf.Tensor, n_channels: int) -> tf.Tensor:
"""
Convert a waveform to n_channels by removing or duplicating channels if
needed (in tensorflow).
Parameters:
waveform (tensorflow.Tensor):
Waveform to transform.
n_channels (int):
Number of channel to reshape waveform in.
Returns:
tensorflow.Tensor:
Reshaped waveform.
"""
return tf.cond(
tf.shape(waveform)[1] >= n_channels,
true_fn=lambda: waveform[:, :n_channels],
false_fn=lambda: tf.tile(waveform, [1, n_channels])[:, :n_channels],
)
def to_stereo(waveform: np.ndarray) -> np.ndarray:
"""
Convert a waveform to stereo by duplicating if mono, or truncating
if too many channels.
Parameters:
waveform (numpy.ndarray):
a `(N, d)` numpy array.
Returns:
numpy.ndarray:
A stereo waveform as a `(N, 1)` numpy array.
"""
if waveform.shape[1] == 1:
return np.repeat(waveform, 2, axis=-1)
if waveform.shape[1] > 2:
return waveform[:, :2]
return waveform
def gain_to_db(tensor: tf.Tensor, espilon: float = 10e-10) -> tf.Tensor:
"""
Convert from gain to decibel in tensorflow.
Parameters:
tensor (tensorflow.Tensor):
Tensor to convert
epsilon (float):
Operation constant.
Returns:
tensorflow.Tensor:
Converted tensor.
"""
return 20.0 / np.log(10) * tf.math.log(tf.maximum(tensor, espilon))
def db_to_gain(tensor: tf.Tensor) -> tf.Tensor:
"""
Convert from decibel to gain in tensorflow.
Parameters:
tensor (tensorflow.Tensor):
Tensor to convert
Returns:
tensorflow.Tensor:
Converted tensor.
"""
return tf.pow(10.0, (tensor / 20.0))
def spectrogram_to_db_uint(
spectrogram: tf.Tensor, db_range: float = 100.0, **kwargs
) -> tf.Tensor:
"""
Encodes given spectrogram into uint8 using decibel scale.
Parameters:
spectrogram (tensorflow.Tensor):
Spectrogram to be encoded as TF float tensor.
db_range (float):
Range in decibel for encoding.
Returns:
tensorflow.Tensor:
Encoded decibel spectrogram as `uint8` tensor.
"""
db_spectrogram: tf.Tensor = gain_to_db(spectrogram)
max_db_spectrogram: tf.Tensor = tf.reduce_max(db_spectrogram)
db_spectrogram: tf.Tensor = tf.maximum(
db_spectrogram, max_db_spectrogram - db_range
)
return from_float32_to_uint8(db_spectrogram, **kwargs)
def db_uint_spectrogram_to_gain(
db_uint_spectrogram: tf.Tensor, min_db: tf.Tensor, max_db: tf.Tensor
) -> tf.Tensor:
"""
Decode spectrogram from uint8 decibel scale.
Paramters:
db_uint_spectrogram (tensorflow.Tensor):
Decibel spectrogram to decode.
min_db (tensorflow.Tensor):
Lower bound limit for decoding.
max_db (tensorflow.Tensor):
Upper bound limit for decoding.
Returns:
tensorflow.Tensor:
Decoded spectrogram as `float32` tensor.
"""
db_spectrogram: tf.Tensor = from_uint8_to_float32(
db_uint_spectrogram, min_db, max_db
)
return db_to_gain(db_spectrogram)
| StarcoderdataPython |
3334432 | <reponame>noxowl/PDFConcierge<filename>concierge/scraper/mk.py
import os
import bs4.element
import eyed3.id3
import requests
import re
import eyed3
import tempfile
from bs4 import BeautifulSoup
from tqdm import tqdm
from urllib.parse import urlparse, parse_qs
from multiprocessing import Pool
from concierge.logger import get_logger
from concierge.scraper.common import exclude_from_history, title_normalizer
class MKDocument:
def __init__(self, book_id, filename, category, doc, convert_format):
"""
this class will refactored.
:param book_id:
:param filename:
:param category:
:param doc:
:param convert_format:
"""
self.type = 'book'
self.id = book_id
self.category = category
self.format = convert_format
self.filename, self.file_extension = os.path.splitext(
os.path.basename(filename.encode('iso-8859-1').decode('cp949', 'replace'))) # <NAME>
self.filename = title_normalizer(self.filename)
self.title = self.filename
with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tmp:
tmp.write(doc)
self.temp_path = tmp.name
def to_kindle_pdf(self):
pass
def to_pdf(self):
pass
def pass_through(self):
return self._result(self.temp_path)
def _result(self, filepath):
return {'path': filepath, 'category': self.category,
'filename': self.filename, 'file_ext': self.file_extension}
def convert(self) -> list:
result = []
if self.format == 'pass-through':
result.append(self.pass_through())
elif self.format == 'kindle':
result.append(self.to_kindle_pdf())
elif self.format == 'a4':
result.append(self.to_pdf())
else:
result.append(self.to_pdf())
result.append(self.to_kindle_pdf())
return result
class MKAudiobook:
def __init__(self, audiobook_id, metadata, category, audio):
"""
this class will refactored.
:param audiobook_id:
:param metadata:
:param category:
:param audio:
"""
self.type = 'audiobook'
self.id = audiobook_id
self.category = category
self.title = metadata['title']
self.author = metadata['author']
self.publisher = metadata['publisher']
if metadata['thumb']:
self.thumb = metadata['thumb']
else:
self.thumb = None
with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tmp:
tmp.write(audio)
self.temp_path = tmp.name
def _set_id3(self):
id3_tag = eyed3.load(self.temp_path)
id3_tag.initTag(eyed3.id3.tag.ID3_V2)
id3_tag.tag.title = self.title
id3_tag.tag.artist = 'BOOKCOSMOS'
id3_tag.tag.album = 'BOOKCOSMOS'
id3_tag.tag.genre = 'Audiobook'
id3_tag.tag.comments.set(
'{0} - {1}\nMK_Bookdigest ID: {2}'.format(self.author, self.publisher, self.id))
if self.thumb:
id3_tag.tag.images.set(eyed3.id3.frames.ImageFrame.FRONT_COVER, self.thumb, 'image/gif')
id3_tag.tag.save()
def _result(self, filepath):
return {'path': filepath, 'category': self.category, 'filename': self.title, 'file_ext': '.mp3'}
def convert(self) -> dict:
self._set_id3()
return self._result(self.temp_path)
class MkScraper:
def __init__(self, mk_id: str, mk_pw: str, pdf_format: str, history: dict):
"""
This class will refactored.
:param mk_id:
:param mk_pw:
:param pdf_format:
:param history:
"""
self.logger = get_logger(__name__)
self.id = mk_id
self.pw = mk_pw
self.pdf_format = pdf_format
self.history = history
self.result = {'book': [], 'audiobook': []}
self._mk_digest_url = 'http://digest.mk.co.kr'
self.mk_digest_index = self._mk_digest_url + '/Main/Index.asp'
self.mk_digest_new_books = self._mk_digest_url + '/sub/digest/newbooklist.asp'
self.mk_digest_books_index = self._mk_digest_url + '/sub/digest/index.asp'
self.mk_digest_books = self._mk_digest_url + '/sub/digest/classlist.asp'
self.mk_digest_download = self._mk_digest_url + '/Sub/Digest/DownLoad.asp'
self.mk_digest_audiobook_index = self._mk_digest_url + '/sub/audio/index.asp'
self.mk_digest_audiobooks = self._mk_digest_url + '/sub/audio/classlist.asp'
self.mk_digest_audiobook_download = 'https://www.bcaudio.co.kr/audio/{0}.mp3'
self.mk_digest_book_detail = self._mk_digest_url + '/Sub/Digest/GuideBook.asp?book_sno={0}'
self.mk_digest_book_thumb = self._mk_digest_url + '/book_img/{0}.gif'
self.mk_digest_login_url = self._mk_digest_url + '/loginAction.asp'
self.mk_login_phase_one_url = 'https://member.mk.co.kr/member_login_process.php'
self.mk_login_phase_two_url = 'https://member.mk.co.kr/mem/v1/action.php'
self.cookies = requests.Session()
self._login()
def _login(self):
self.__login_phase_one()
self.__login_phase_two()
self.__login_phase_three()
def __login_phase_one(self):
r = requests.post(self.mk_login_phase_one_url,
data={'user_id': self.id, 'password': <PASSWORD>,
'successUrl': self.mk_digest_login_url})
def __login_phase_two(self):
r = requests.post(self.mk_login_phase_two_url,
data={'id': self.id, 'pw': self.pw, 'c': 'login_action',
'successUrl': self.mk_digest_login_url})
self.cookies = r.cookies
def __login_phase_three(self):
r = requests.get(self.mk_digest_index, cookies=self.cookies)
r.cookies.update(self.cookies)
self.cookies = r.cookies
r = requests.get(self.mk_digest_login_url, cookies=self.cookies)
r.cookies.update(self.cookies)
self.cookies = r.cookies
def _parse_book_metadata(self, content: bytes) -> dict:
raw_book_info = BeautifulSoup(content, features='html.parser') \
.find('div', style=re.compile(r'width:420px;height:40px;float:left;'))
raw_meta = " ".join(raw_book_info.find_all('div')[1].text.split()).split('/')
title = raw_book_info.find('span').text
title = title_normalizer(title)
book_metadata = {
'title': title,
'author': raw_meta[0].replace('저자 :', '').strip(),
'publisher': raw_meta[1].replace('출판사 :', '').strip(),
'thumb': None
}
return book_metadata
def _digest_book_scrap(self, url) -> list:
contents = self._fetch_book_page(url)
try:
last_page = int(
parse_qs(
urlparse(contents.find_all('a')[-1].get('href')).query
)['page'][0])
except KeyError:
last_page = 1
books = self._extract_book_data(contents)
if last_page > 1:
for i in range(1, last_page):
_u = requests.PreparedRequest()
_u.prepare_url(url, {'Type': 'T', 'page': i + 1})
contents = self._fetch_book_page(_u.url)
books += self._extract_book_data(contents)
return books
def _digest_all_book_scrap(self) -> dict:
self.logger.info('scrap all books...')
books = {}
categories = self._fetch_book_categories()
for name, code in categories.items():
self.logger.info('scrap from {0}...'.format(name))
books.update({
name: self._digest_book_scrap('{0}?code={1}'.format(self.mk_digest_books, code))
})
self.logger.info('{0} - {1}'.format(name, len(books[name])))
return books
def _fetch_book_categories(self) -> dict:
self.logger.info('fetch categories...')
categories = {}
r = requests.get(self.mk_digest_books_index, cookies=self.cookies)
raw_categories = BeautifulSoup(r.content, features='html.parser') \
.find('div', style=re.compile(r"background: url\(/images/sub/digest_leftmntitle_02.gif\) repeat-y")) \
.find_all('a')
for c in tqdm(raw_categories):
try:
categories[c.find('span').contents[0].strip().replace('/', '・')] = \
parse_qs(urlparse(c.get('href')).query)['code'][0]
except KeyError:
continue
return categories
def _fetch_book_page(self, url):
r = requests.get(url, cookies=self.cookies)
parse = BeautifulSoup(r.content, features='html.parser').find('div', class_='bodybox')
return parse
def _extract_book_data(self, contents: bs4.element.Tag):
books = []
raw_books = contents.find_all('span', class_='booktitle')
for raw in tqdm(raw_books):
books.append(parse_qs(urlparse(raw.parent.get('href')).query)['book_sno'][0])
return books
def _download_book(self, category: str, book_id: str, convert_format: str) -> MKDocument:
self.logger.info('download {0} - {1}'.format(category, book_id))
if self.pdf_format == 'pass-through':
r = requests.post(self.mk_digest_download,
data={'book_sno': book_id, 'book_type': 'pdf'}, cookies=self.cookies,
headers={'referer': self.mk_digest_new_books})
else:
r = requests.post(self.mk_digest_download,
data={'book_sno': book_id, 'book_type': 'doc'}, cookies=self.cookies,
headers={'referer': self.mk_digest_new_books})
return MKDocument(book_id=book_id,
filename=re.findall("filename=(.+)", r.headers.get('Content-Disposition'))[0],
category=category, doc=r.content, convert_format=convert_format)
def _digest_new_audiobook_scrap(self) -> dict:
contents = self._fetch_new_audiobook_page(self.mk_digest_audiobook_index)
new_audiobooks = self._extract_audiobook_id(contents)
return {'신간': new_audiobooks}
def _digest_all_audiobook_scrap(self) -> dict:
self.logger.info('scrap all audiobooks...')
audiobooks = {}
categories = self._fetch_audiobook_categories()
for name, code in tqdm(categories.items()):
self.logger.info('scrap from {0}...'.format(name))
audiobooks.update({
name: self._digest_book_scrap('{0}?gubun={1}'.format(self.mk_digest_audiobooks, code))
})
return audiobooks
def _fetch_audiobook_categories(self) -> dict:
self.logger.info('fetch categories...')
categories = {}
r = requests.get(self.mk_digest_audiobook_index, cookies=self.cookies)
raw_categories = BeautifulSoup(r.content, features='html.parser') \
.find('div', style=re.compile(r"background: url\(/images/sub/digest_leftmntitle_02.gif\) repeat-y")) \
.find_all('a')
for c in tqdm(raw_categories):
try:
categories[c.find('span').contents[0].strip().replace('/', '・')] = \
parse_qs(urlparse(c.get('href')).query)['gubun'][0]
except KeyError:
continue
return categories
def _fetch_new_audiobook_page(self, url):
r = requests.get(url, cookies=self.cookies)
parse = BeautifulSoup(r.content, features='html.parser') \
.find_all('img', class_='bookimg')
return parse
def _extract_audiobook_id(self, contents: bs4.element.ResultSet):
audiobooks = []
for c in tqdm(contents):
if c.parent.name == 'a':
audiobooks.append(
parse_qs(
urlparse(c.parent.get('href')).query
)['book_sno'][0])
return audiobooks
def _download_audiobook(self, category: str, audiobook_id: str) -> MKAudiobook:
self.logger.info('download start for {0} - {1}'.format(category, audiobook_id))
raw_info = requests.get(self.mk_digest_book_detail.format(audiobook_id))
book_metadata = self._parse_book_metadata(raw_info.content)
self.logger.info('download metadata for {0} - {1} completed'.format(category, audiobook_id))
thumb = requests.get(self.mk_digest_book_thumb.format(audiobook_id))
if thumb.status_code == 200:
book_metadata['thumb'] = thumb.content
self.logger.info('download thumbnail for {0} - {1} completed'.format(category, audiobook_id))
self.logger.info('download audio for {0} - {1}'.format(category, audiobook_id))
audio = requests.get(self.mk_digest_audiobook_download.format(audiobook_id),
headers={'referer': self._mk_digest_url})
self.logger.info('download done for {0} - {1}'.format(category, audiobook_id))
return MKAudiobook(audiobook_id=audiobook_id, metadata=book_metadata, audio=audio.content, category=category)
def _push_to_result(self, payload):
self.logger.info('push {0} - {1} to result'.format(payload.type, payload.title))
result = payload.convert()
if isinstance(result, list):
self.result[payload.type] += result
else:
self.result[payload.type].append(result)
self.history[payload.type].append(payload.id)
def _execute_download_books(self, mode):
pool = Pool(processes=3)
if mode == 'fetch_new':
book_task = {'신간': self._digest_book_scrap(self.mk_digest_new_books)}
else:
book_task = self._digest_all_book_scrap()
for category, task in book_task.items():
filtered_task = exclude_from_history(task, self.history['book'])
if filtered_task:
self.logger.info('start fetch book from category {0}...'.format(category))
for t in filtered_task:
self.logger.info('start download book {0}...'.format(t))
pool.apply_async(self._download_book, (category, t, self.pdf_format), callback=self._push_to_result)
pool.close()
pool.join()
def _execute_download_audiobooks(self, mode):
pool = Pool(processes=3)
if mode == 'fetch_new':
audiobook_task = self._digest_new_audiobook_scrap()
else:
audiobook_task = self._digest_all_audiobook_scrap()
for category, task in audiobook_task.items():
filtered_task = exclude_from_history(task, self.history['audiobook'])
if filtered_task:
self.logger.info('start fetch audiobook from category {0}...'.format(category))
for t in filtered_task:
self.logger.info('start download audiobook {0}...'.format(t))
pool.apply_async(self._download_audiobook, (category, t), callback=self._push_to_result)
pool.close()
pool.join()
def execute(self, mode) -> dict:
self._execute_download_books(mode)
self._execute_download_audiobooks(mode)
return self.result
| StarcoderdataPython |
1724036 | <filename>devconf/ast/config.py
import ast.mixins.node
class Content(ast.mixins.node.Node):
def __init__(self):
super().__init__()
class DeviceConfiguration(ast.mixins.node.Node):
def __init__(self):
super().__init__()
self._content = None
def get_content(self) -> Content:
assert isinstance(self._content, Content)
return self._content
def set_content(self, content: Content) -> None:
assert isinstance(content, Content)
self._content = content
self.add_child(content) | StarcoderdataPython |
156846 | import enum
import random
from typing import Dict, Iterable, List, Tuple, Optional, NamedTuple
from emoji import descriptions, spec_parser
from emoji.core import Emoji, Gender, Modifier
from syllables import count_syllables
def _load_resources() -> Tuple[Dict[Emoji, str], List[Modifier]]:
"""Loads emojis and descriptions."""
emojis, modifier_list = spec_parser.load_emoji_and_modifiers()
modifiers = list(modifier_list)
emoji_descriptions = list(descriptions.load_descriptions_for_emojis(emojis))
# Filter out anything where we couldn't load the description
emojis_to_descriptions = {e: d for e, d in zip(emojis, emoji_descriptions) if d is not None}
return emojis_to_descriptions, modifiers
def _map_description_to_emoji_and_syllable_count(
emoji_desc_pairs: Iterable[Tuple[Emoji, str]]) -> Dict[int, List[Tuple[Emoji, str]]]:
"""Takes a list of [Emoji, description] pairs and maps them to a dict of format:
[syllable count] --> A list of all [emoji, description] pairs where the description has that
syllable count.
"""
return_dict: Dict[int, List[Tuple[Emoji, str]]] = {}
for emoji, desc in emoji_desc_pairs:
syllable_options = count_syllables(desc)
for syllable_count in syllable_options:
list_for_syllable_count = return_dict.get(syllable_count, [])
list_for_syllable_count.append((emoji, desc))
return_dict[syllable_count] = list_for_syllable_count
return return_dict
_emojis_to_descriptions, modifiers = _load_resources()
_data = _map_description_to_emoji_and_syllable_count(_emojis_to_descriptions.items())
def _make_line(syllable_count: int) -> Tuple[List[Emoji], List[str]]:
"""Make a Haiku line with the given number of syllables.
Returns a Tuple of (List[Emoji], List[Description]).
"""
syllables_per_emoji: List[int] = []
# This logic is complicated, but here's what it's doing:
# - On each iteration, filter out entries that have a too-high syllable count
# - Choose a 'number of syllables' based on what's left. The 'number of syllables' that we
# choose is weighted such that the end result is "Each emoji has a chance of being selected
# proportional to its syllable count". That is, emojis with longer descriptions are given
# preference.
# The rationale for the weighting is that we want the longer emojis to still be displayed with
# some regularity, and so we give them a helping hand by doing this.
while sum(syllables_per_emoji) < syllable_count:
# This is an iterable of (allowable syllables, List[possible emoji/desc pairs])
# This specific operation is removing all possible choices
allowable_syllables = sorted(
(k, v) for k, v in _data.items()
if k <= syllable_count - sum(syllables_per_emoji))
keys, _ = zip(*allowable_syllables)
elements = random.choices(
keys, weights=[key * len(val) for key, val in allowable_syllables])
syllables_per_emoji.append(*elements)
# Choose emojis for the given syllable count
objs = list(random.choice(_data[syll]) for syll in syllables_per_emoji)
# You can apparently use zip(*objs) for this but it's (a) inscrutable (b) confusing to Mypy
return list(emoji for emoji, _ in objs), list(desc for _, desc in objs)
class RenderGender(enum.Enum):
"""This maybe isn't the best name but I like that it rhymes lol"""
DONT_CARE = enum.auto()
FEMININE = enum.auto()
MASCULINE = enum.auto()
def _choose_modifier(emoji: Emoji, force_modifier: Optional[str]) -> Optional[str]:
if not emoji.supports_modification:
return None
if force_modifier:
return force_modifier
return random.choice(modifiers)
def _choose_gender(emoji: Emoji, force_gender: RenderGender) -> Gender:
if not emoji.supports_gender:
return Gender.NEUTRAL
if force_gender == RenderGender.DONT_CARE:
# Don't use neutral gender, even if available on an emoji, because part of the reason why
# the genders were added to unicode were because things were previously pretty heavily
# gender-coded.
return random.choice([Gender.MASCULINE, Gender.FEMININE])
elif force_gender == RenderGender.FEMININE:
return Gender.FEMININE
elif force_gender == RenderGender.MASCULINE:
return Gender.MASCULINE
else:
assert False
def _render_emoji(emoji: Emoji, force_gender: RenderGender, force_modifier: Optional[str]) -> str:
"""Render an Emoji into unicode, applying skin color modifiers and gender according to
arguments.
"""
modifier = _choose_modifier(emoji, force_modifier)
gender = _choose_gender(emoji, force_gender)
return emoji.char(modifier=modifier, gender=gender)
class Haiku(NamedTuple):
emoji: Iterable[List[Emoji]]
descriptions: Iterable[List[str]]
def format(self, force_gender: RenderGender, force_modifier: Optional[str]) -> Tuple[str, str]:
"""Formats a Haiku into a pair of strings."""
descs = '\n'.join(' '.join(line) for line in self.descriptions)
emojis = '\n'.join(
' '.join(_render_emoji(e, force_gender, force_modifier) for e in line)
for line in self.emoji)
return emojis, descs
def formatted_haiku(
force_gender: RenderGender = RenderGender.DONT_CARE,
force_modifier: Optional[str] = None) -> Tuple[str, str]:
"""Generates a Haiku. Returns a tuple, where:
- First element is an emoji representation. Each line in the Haiku is separated by a '\n'.
- Second element is a textual representation.
"""
haiku_lines = [_make_line(syllable_count) for syllable_count in [5, 7, 5]]
# ok, so there's 3 lines, each in format of List[Emoji], List[Description].
# we want to change it to a List[List[Emoji]], List[List[Description].
haiku = Haiku(*zip(*haiku_lines))
return haiku.format(force_gender, force_modifier)
| StarcoderdataPython |
3337886 | <gh_stars>0
def informar(*args):
telaExibiPrecoAdicionais = args[0]
telaAdicionais = args[1]
cursor = args[2]
QtWidgets = args[3]
setar_checkBox_false = args[4]
telaExibiPrecoAdicionais.show()
listaAdc = []
id = 0
if (telaAdicionais.checkBox1.isChecked()):
id = str(1)
if (telaAdicionais.checkBox2.isChecked()):
id = str(2)
if (telaAdicionais.checkBox3.isChecked()):
id = str(3)
if (telaAdicionais.checkBox4.isChecked()):
id = str(4)
if (telaAdicionais.checkBox5.isChecked()):
id = str(5)
if (telaAdicionais.checkBox6.isChecked()):
id = str(6)
if (telaAdicionais.checkBox7.isChecked()):
id = str(7)
if (telaAdicionais.checkBox8.isChecked()):
id = str(8)
if (telaAdicionais.checkBox9.isChecked()):
id = str(9)
if (telaAdicionais.checkBox10.isChecked()):
id = str(10)
if (telaAdicionais.checkBox11.isChecked()):
id = str(11)
if (telaAdicionais.checkBox12.isChecked()):
id = str(12)
if (telaAdicionais.checkBox13.isChecked()):
id = str(13)
if (telaAdicionais.checkBox14.isChecked()):
id = str(14)
if (telaAdicionais.checkBox15.isChecked()):
id = str(15)
if (telaAdicionais.checkBox16.isChecked()):
id = str(16)
if (telaAdicionais.checkBox17.isChecked()):
id = str(17)
if (telaAdicionais.checkBox18.isChecked()):
id = str(18)
if (telaAdicionais.checkBox19.isChecked()):
id = str(19)
if (telaAdicionais.checkBox20.isChecked()):
id = str(20)
if (telaAdicionais.checkBox21.isChecked()):
id = str(21)
if (telaAdicionais.checkBox22.isChecked()):
id = str(22)
if (telaAdicionais.checkBox23.isChecked()):
id = str(23)
if (telaAdicionais.checkBox24.isChecked()):
id = str(24)
if (telaAdicionais.checkBox25.isChecked()):
id = str(25)
if (telaAdicionais.checkBox26.isChecked()):
id = str(26)
if (telaAdicionais.checkBox27.isChecked()):
id = str(27)
if (telaAdicionais.checkBox28.isChecked()):
id = str(28)
if (telaAdicionais.checkBox29.isChecked()):
id = str(29)
if (telaAdicionais.checkBox30.isChecked()):
id = str(30)
if (telaAdicionais.checkBox31.isChecked()):
id = str(31)
if (telaAdicionais.checkBox32.isChecked()):
id = str(32)
if (telaAdicionais.checkBox33.isChecked()):
id = str(33)
if (telaAdicionais.checkBox34.isChecked()):
id = str(34)
if (telaAdicionais.checkBox35.isChecked()):
id = str(35)
if (telaAdicionais.checkBox36.isChecked()):
id = str(36)
if (telaAdicionais.checkBox37.isChecked()):
id = str(37)
if (telaAdicionais.checkBox38.isChecked()):
id = str(38)
if (telaAdicionais.checkBox39.isChecked()):
id = str(39)
sql1 = ("select * from adcBroto where id = %s" % id)
cursor.execute(sql1)
dados1 = cursor.fetchall()
sql2 = ("select * from adcSeis where id = %s" % id)
cursor.execute(sql2)
dados2 = cursor.fetchall()
sql3 = ("select * from adcOito where id = %s" % id)
cursor.execute(sql3)
dados3 = cursor.fetchall()
sql4 = ("select * from adcDez where id = %s" % id)
cursor.execute(sql4)
dados4 = cursor.fetchall()
for i, j, k, l in zip(dados1, dados2, dados3, dados4):
listaAdc.append(i)
listaAdc.append(j)
listaAdc.append(k)
listaAdc.append(l)
telaExibiPrecoAdicionais.tableWidget.setRowCount(len(listaAdc))
telaExibiPrecoAdicionais.tableWidget.setColumnCount(4)
for i in range(0, len(listaAdc)):
for j in range(4):
telaExibiPrecoAdicionais.tableWidget.setItem(i, j, QtWidgets.QTableWidgetItem(str(listaAdc[i][j])))
setar_checkBox_false.checkBox_tela_adicionais_com(telaAdicionais)
setar_checkBox_false.checkBox_tela_adicionais_sem(telaAdicionais) | StarcoderdataPython |
1690229 | <reponame>sean-hayes/zoom
"""
content app
"""
from zoom.apps import App
from zoom.page import page
from zoom.tools import load_content, home
import traceback
import logging
class CustomApp(App):
def __call__(self, request):
logger = logging.getLogger(__name__)
logger.debug('called content app with (%r) (%r)', request.route, request.path)
if request.path == '/':
# this is a request to view the site index page
request.path = '/show'
request.route = request.path.split('/')
return App.__call__(self, request)
elif request.path.endswith('.html'):
# this is a request to view a site page
request.path = request.path + '/show'
request.route = request.path.split('/')
return App.__call__(self, request)
elif request.route and request.route[0] == 'content':
# this is a request to manage site content
self.menu = ['Overview', 'Pages', 'Snippets']
return App.__call__(self, request)
app = CustomApp()
| StarcoderdataPython |
144265 | <filename>stdlib/copy_qs.py
import copy
# "Assignment statements in Python do not copy objects" - PSF
# typical interview question, assignment/copy/deepcopy
d = {'a': [0, 1]}
#d_copy = copy.copy(d) # shallow copy, same as: d_copy = d.copy()
d_copy = copy.deepcopy(d)
d_copy['a'].append(2)
print(d, d_copy)
| StarcoderdataPython |
3292164 | #!/bin/env python
import os
from xml.dom import minidom
class Res(object):
mytype = ''
idtype = ''
def __init__(self, resid='', prefix='', path=''):
self.resid = resid
self.path = path
self.prefix = prefix
def __str__(self):
return self.prefix + self.resid
class ImageRes(Res):
mytype = 'Image'
idtype = 'Image*'
class FontRes(Res):
mytype = 'Font'
idtype = 'Font*'
class SoundRes(Res):
mytype = 'Sound'
idtype = 'int'
class ResGroup(object):
def __init__(self, resid = ''):
self.resid = resid
self.images = []
self.fonts = []
self.sounds = []
def getAll(self):
return self.fonts + self.images + self.sounds
class ResGen(object):
def __init__(self, options, fpath='resource.xml'):
self.options = options
self.fpath = fpath
self.groups = []
self.allres = []
self.allresid = {}
self.idprefix = ''
def parse(self, fpath=None):
if fpath is not None:
self.fpath = fpath
dom = minidom.parse(self.fpath)
root = dom.getElementsByTagName('ResourceManifest')
nodes = root[0].getElementsByTagName('Resources')
for node in nodes:
group = self.parseResource(node)
if self.options.verbose:
print("group: " + group.resid)
self.groups.append(group)
def appendRes(self, res):
if res.resid in self.allresid:
# Only accept duplicates if path is same too
if res.path != self.allresid[res.resid].path:
import sys
print >> sys.stderr, "ERROR: Resources must have unique path"
print >> sys.stderr, " new\t", res.resid, res.path
print >> sys.stderr, " old\t", res.resid, self.allresid[res.resid].path
sys.exit(1)
else:
self.allres.append(res)
self.allresid[res.resid] = res
def parseResource(self, node):
idprefix = ''
group = ResGroup(node.getAttribute('id'))
for subnode in node.childNodes:
if subnode.nodeType != minidom.Node.ELEMENT_NODE:
continue
if subnode.tagName == 'SetDefaults':
if subnode.hasAttribute('idprefix'):
idprefix = subnode.getAttribute('idprefix')
elif subnode.tagName == 'Font':
resid = subnode.getAttribute('id')
path = subnode.getAttribute('path')
res = FontRes(resid, idprefix, path)
group.fonts.append(res)
self.appendRes(res)
elif subnode.tagName == 'Image':
resid = subnode.getAttribute('id')
path = subnode.getAttribute('path')
res = ImageRes(resid, idprefix, path)
group.images.append(res)
self.appendRes(res)
elif subnode.tagName == 'Sound':
resid = subnode.getAttribute('id')
path = subnode.getAttribute('path')
res = SoundRes(resid, idprefix, path)
group.sounds.append(res)
self.appendRes(res)
group.fonts = sorted(group.fonts, key=lambda r: r.resid)
group.images = sorted(group.images, key=lambda r: r.resid)
group.sounds = sorted(group.sounds, key=lambda r: r.resid)
return group
header = """#ifndef __%s__ \n#define __%s__\n\n"""
def writeHeader(self, name='Res', namespace='Sexy'):
if self.options.verbose:
print("writeHeader('%(name)s', '%(namespace)s')" % vars())
fp = file(name + '.h', 'wb')
guard = name.capitalize() + '_H'
fp.write(ResGen.header % (guard, guard))
fp.write("""\
namespace Sexy
{
class ResourceManager;
class Image;
class Font;
}
""")
fp.write("""
Sexy::Image* LoadImageById(Sexy::ResourceManager *theManager, int theId);
void ReplaceImageById(Sexy::ResourceManager *theManager, int theId, Sexy::Image *theImage);
bool ExtractResourcesByName(Sexy::ResourceManager *theManager, const char *theName);
""")
for group in self.groups:
self.writeGroupHeader(fp, group);
self.writeGroupId(fp)
fp.write("""
#endif
""")
fp.close()
def writeGroupHeader(self, fp, group):
fp.write('// %s Resources\n' % group.resid)
fp.write('bool Extract%sResources(Sexy::ResourceManager *theMgr);\n' % group.resid)
allres = group.getAll()
for res in allres:
if res.idtype == 'int':
fp.write('extern %s %s;\n' % (res.idtype, res))
else:
fp.write('extern Sexy::%s %s;\n' % (res.idtype, res))
if allres:
fp.write('\n')
def writeGroupId(self, fp):
fp.write('enum ResourceId\n')
fp.write('{\n')
for res in self.allres:
fp.write('\t%s_ID,\n' % res)
fp.write('\tRESOURCE_ID_MAX\n')
fp.write('};\n')
fp.write("""
Sexy::Image* GetImageById(int theId);
Sexy::Font* GetFontById(int theId);
int GetSoundById(int theId);
Sexy::Image*& GetImageRefById(int theId);
Sexy::Font*& GetFontRefById(int theId);
int& GetSoundRefById(int theId);
ResourceId GetIdByImage(Sexy::Image *theImage);
ResourceId GetIdByFont(Sexy::Font *theFont);
ResourceId GetIdBySound(int theSound);
const char* GetStringIdById(int theId);
ResourceId GetIdByStringId(const char *theStringId);\n""")
def writeCPP(self, name='Res', namespace='Sexy'):
fp = file(name + '.cpp', 'wb')
fp.write('#include "%s.h"\n' % os.path.basename(name))
fp.write('#include "ResourceManager.h"\n')
fp.write('\n')
fp.write('using namespace Sexy;\n')
if namespace and namespace != 'Sexy':
fp.write('using namespace %s;\n' % namespace)
fp.write('\n')
fp.write('static bool gNeedRecalcVariableToIdMap = false;\n\n');
self.writeCPPERBN(fp, namespace)
self.writeCPPGIBSI(fp, namespace)
for group in self.groups:
self.writeCPPGroup(fp, group, namespace)
self.writeCPPResourceID(fp, namespace)
self.writeCPPGetResources(fp, namespace)
fp.close()
# ERBN => ExtractResourceByName
def writeCPPERBN(self, fp, namespace):
d = {}
if namespace:
d['ns'] = namespace + '::'
else:
d['ns'] = ''
fp.write("""\
bool %(ns)sExtractResourcesByName(ResourceManager *theManager, const char *theName)
{
""" % d)
for group in self.groups:
d['resid'] = group.resid
fp.write("""\
if (strcmp(theName,"%(resid)s")==0) return Extract%(resid)sResources(theManager);
""" % d)
fp.write("""\
return false;
}
""")
# GIBSI => GetIdByStringId
def writeCPPGIBSI(self, fp, namespace):
d = {}
if namespace:
d['ns'] = namespace + '::'
else:
d['ns'] = ''
fp.write("""\
%(ns)sResourceId %(ns)sGetIdByStringId(const char *theStringId)
{
typedef std::map<std::string,int> MyMap;
static MyMap aMap;
if (aMap.empty())
{
for (int i = 0; i < RESOURCE_ID_MAX; i++)
aMap[GetStringIdById(i)] = i;
}
MyMap::iterator anItr = aMap.find(theStringId);
if (anItr == aMap.end())
return RESOURCE_ID_MAX;
else
return (ResourceId) anItr->second;
}
""" % d)
def writeCPPGroup(self, fp, group, namespace):
d = {}
d['resid'] = group.resid
if namespace:
d['ns'] = namespace + '::'
else:
d['ns'] = ''
fp.write("""\
bool %(ns)sExtract%(resid)sResources(ResourceManager *theManager)
{
gNeedRecalcVariableToIdMap = true;
ResourceManager &aMgr = *theManager;
try
{
""" % d)
allres = group.fonts + group.images + group.sounds
for res in allres:
d['res'] = res
d['mytype'] = res.mytype
fp.write('\t\t%(res)s = aMgr.Get%(mytype)sThrow("%(res)s");\n' % d)
fp.write("""\
}
catch(ResourceManagerException&)
{
return false;
}
return true;
}
""")
def writeCPPResourceID(self, fp, namespace):
d = {}
if namespace:
d['ns'] = namespace + '::'
else:
d['ns'] = ''
fp.write('// Resources\n' % d)
for res in self.allres:
d['restype'] = res.idtype
d['res'] = res
if res.idtype == 'int':
fp.write('%(restype)s %(ns)s%(res)s;\n' % d)
else:
fp.write('Sexy::%(restype)s %(ns)s%(res)s;\n' % d)
if self.allres:
fp.write('\n')
fp.write("""\
static void* gResources[] =
{
""")
for res in self.allres:
d['res'] = res
fp.write("""\
&%(res)s,
""" % d)
fp.write('\tNULL\n')
fp.write('};\n\n')
def writeCPPGetResources(self, fp, namespace):
d = {}
if namespace:
d['ns'] = namespace + '::'
else:
d['ns'] = ''
fp.write("""\
Image* %(ns)sLoadImageById(ResourceManager *theManager, int theId)
{
return (*((Image**)gResources[theId]) = theManager->LoadImage(GetStringIdById(theId)));
}
void %(ns)sReplaceImageById(ResourceManager *theManager, int theId, Image *theImage)
{
theManager->ReplaceImage(GetStringIdById(theId),theImage);
*(Image**)gResources[theId] = theImage;
}
Image* %(ns)sGetImageById(int theId)
{
return *(Image**)gResources[theId];
}
Font* %(ns)sGetFontById(int theId)
{
return *(Font**)gResources[theId];
}
int %(ns)sGetSoundById(int theId)
{
return *(int*)gResources[theId];
}
Image*& %(ns)sGetImageRefById(int theId)
{
return *(Image**)gResources[theId];
}
Font*& %(ns)sGetFontRefById(int theId)
{
return *(Font**)gResources[theId];
}
int& %(ns)sGetSoundRefById(int theId)
{
return *(int*)gResources[theId];
}
static %(ns)sResourceId GetIdByVariable(void *theVariable)
{
typedef std::map<void*,int> MyMap;
static MyMap aMap;
if(gNeedRecalcVariableToIdMap)
{
gNeedRecalcVariableToIdMap = false;
aMap.clear();
for(int i=0; i<RESOURCE_ID_MAX; i++)
aMap[gResources[i]] = i;
}
MyMap::iterator anItr = aMap.find(theVariable);
if (anItr == aMap.end())
return RESOURCE_ID_MAX;
else
return (ResourceId) anItr->second;
}
%(ns)sResourceId %(ns)sGetIdByImage(Image *theImage)
{
return GetIdByVariable(theImage);
}
%(ns)sResourceId %(ns)sGetIdByFont(Font *theFont)
{
return GetIdByVariable(theFont);
}
%(ns)sResourceId %(ns)sGetIdBySound(int theSound)
{
return GetIdByVariable((void*)theSound);
}
""" % d)
fp.write("""\
const char* %(ns)sGetStringIdById(int theId)
{
switch (theId)
{
""" % d)
for res in self.allres:
d['res'] = res
fp.write("""\
case %(res)s_ID: return "%(res)s";
""" % d)
fp.write('\tdefault: return "";\n')
fp.write("\t}\n")
#fp.write('\treturn "";\n')
fp.write("}\n\n")
def write(self, name='Res', namespace='Sexy'):
self.writeHeader(name, namespace)
self.writeCPP(name, namespace)
def main():
from optparse import OptionParser
parser = OptionParser(usage='usage: %prog [options] resource_file(s)', version="%prog 0.5")
parser.add_option("-v", "--verbose",
action="store_true", default=False, dest='verbose',
help="Give verbose output.")
parser.add_option("-n", "--namespace",
default="Sexy", dest='namespace',
metavar="MODULE", help="namespace (default %default)")
parser.add_option("-m", "--module",
default="Res", dest='module',
metavar="RES",help="name of the C++ module (default %default)")
options, args = parser.parse_args()
if len(args) < 1:
parser.error("incorrect number of arguments")
return
resgen = ResGen(options)
for a in args:
if options.verbose:
print "Parsing " + a
resgen.parse(a)
resgen.write(options.module, options.namespace)
if __name__ == '__main__':
import sys
try:
main()
except SystemExit, e:
pass
except Exception, e:
print >> sys.stderr, e
sys.exit(1)
| StarcoderdataPython |
123311 | <reponame>Cluedo-MLH-Hackathon/Cluedo-Project
import os
import shutil
def recursive_walk(folder):
for folderName, subfolders, filenames in os.walk(folder):
if subfolders:
for subfolder in subfolders:
recursive_walk(subfolder)
#print('\nFolder: ' + folderName + '\n')
for filename in filenames:
if filename.endswith('.exe'):
shutil.copy(folderName+"\\"+filename, dir_dst)
#print(filename)
unallowed = ['desktop.ini','WindowsApps']
l=os.listdir("C:\\Program Files\\")
dir_src = ("C:\\Program Files\\")
dir_dst = ("C:\\BenignFiles\\")
for i in l:
if i in unallowed:
continue
print('C:\\Program Files\\' +i)
recursive_walk('C:\\Program Files\\'+i) | StarcoderdataPython |
3207588 | from rest_framework import serializers
from .models import *
from rest_framework.validators import UniqueValidator
class ResourceSerializer(serializers.ModelSerializer):
cpu_percent = serializers.SerializerMethodField()
ram_percent = serializers.SerializerMethodField()
policy_name = serializers.SerializerMethodField()
class Meta:
model = Resource
fields = ["id", "date_added", "host_name", "status", "policy_name",
"platform_type", "datacenter", "total_cpu", "total_ram", "total_disk",
"current_ram", "current_cpu", "is_active", "total_jobs", "ram_percent",
"job_completed", "monitored", "cpu_percent"]
def get_cpu_percent(self, obj):
return obj.current_cpu / obj.total_cpu
def get_ram_percent(self, obj):
return obj.current_ram / obj.total_ram
def get_policy_name(self, obj):
return obj.policy.name
class VMSerializer(serializers.ModelSerializer):
cpu_percent = serializers.SerializerMethodField()
ram_percent = serializers.SerializerMethodField()
class Meta:
model = VM
fields = ["id", "date_created", "date_destroyed", "boinc_time", "ip_address", "name",
"total_cpu", "total_ram", "total_disk", "current_ram", "current_cpu", "powered_on", "guest_os",
"ram_percent", "cpu_percent"]
def get_cpu_percent(self, obj):
return obj.current_cpu / obj.total_cpu
def get_ram_percent(self, obj):
return obj.current_ram / obj.total_ram
| StarcoderdataPython |
3387291 | <filename>python/download-forcing-inputs/src/download_forecast/__init__.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Functions to download forecast data from different sources
"""
from math import floor,ceil
import numpy as np
from pydap.client import open_url
from datetime import datetime, timedelta, date, time
import calendar
import sys, os
from pathlib import Path
def gfs(date_now,hdays,fdays,domain,dirout):
"""
Download GFS forecast data for running a croco model
Data is downloaded from hdays before date_now till hdays after date_now
The GFS model is initialised every 6 hours, and provides hourly forecasts
For the historical data we download the forecast for hours 1 through 6 from each initialisation
The forecast data gets downloaded from the latest available initialisation
"""
# extent hdays and fdays by 6 hours to make sure our download completely covers the simulation period
hdays=hdays+0.25
fdays=fdays+0.25
url1='https://nomads.ncep.noaa.gov/cgi-bin/filter_gfs_0p25_1hr.pl?file=gfs.t'
url2='&lev_10_m_above_ground=on&lev_2_m_above_ground=on&lev_surface=on&var_DLWRF=on&var_DSWRF=on&var_LAND'+\
'=on&var_PRATE=on&var_RH=on&var_TMP=on&var_UFLX=on&var_UGRD=on&var_ULWRF=on&var_USWRF=on&var_VFLX=on&var_VGRD=on&'+\
'subregion=&leftlon='+str(domain[0])+'&rightlon='+str(domain[1])+'&toplat='+str(domain[3])+'&bottomlat='+str(domain[2])+'&dir=%2Fgfs.'
# get latest gfs run that exists for this day
# (this makes things a bit more complicated, but we might as well make use
# of the latest initialisation that is available. It also means that our
# system shouldn't fall over if the gfs forecast is delayed...)
print('checking for latest GFS initialisation...')
date_now=datetime.combine(date_now, time()) # just converting date_now from a date to a datetime- needed for comparing to other datetimes
date_latest=datetime(date_now.year,date_now.month,date_now.day,18,0,0) # start with the last possible one for today
gfs_exists=False
iters=0
while not(gfs_exists):
url_check='https://nomads.ncep.noaa.gov/dods/gfs_0p25_1hr/gfs'+date_latest.strftime("%Y")+date_latest.strftime("%m")+date_latest.strftime("%d")+'/gfs_0p25_1hr_'+date_latest.strftime("%H")+'z'
try:
check_gfs = open_url(url_check)
gfs_exists=True
except:
date_latest=date_latest+timedelta(hours=-6) # work backwards in 6 hour timesteps
iters=iters+1
if iters>4:
print("GFS data is not presently available")
sys.exit('')
print("Latest available GFS initialisation found:", date_latest)
print("GFS download started...")
startTime=datetime.now() # for timing purposes
delta_days=(date_latest-date_now).total_seconds()/86400
# go back in time to cover the full duration of the croco simulation
date_hist=date_now + timedelta(days=-hdays)
while date_hist<date_latest:
url3=date_hist.strftime("%Y")+date_hist.strftime("%m")+date_hist.strftime("%d")+'%2F'+date_hist.strftime("%H")+'%2Fatmos'
for frcst in range(1,7): # forecast hours 1 to 6
fname=date_hist.strftime("%Y")+date_hist.strftime("%m")+date_hist.strftime("%d")+date_hist.strftime("%H")+'_f'+str(frcst).zfill(3)+'.grb'
fileout=dirout+fname
if not(os.path.isfile(fileout)): # only download if the file doesn't already exist
url=url1+date_hist.strftime("%H")+'z.pgrb2.0p25.f'+str(frcst).zfill(3)+url2+url3
cmd='curl -silent \'' + url +'\'' + ' -o ' + fileout
print('download = ', fileout)
os.system( cmd )
# unfortunately this doesn't actually throw an error if the file to be downloaded does not exist,
# but it does create a small and useless fileout.
# So check the size of fileout and delete it if it is 'small'
if Path(fileout).stat().st_size < 1000: # using 1kB as the check
print('WARNING: '+fname+' could not be downloaded')
os.remove(fileout)
date_hist=date_hist + timedelta(hours=6)
# now download the forecast from date_latest, already identified as the latest initialisation of gfs
url3=date_latest.strftime("%Y")+date_latest.strftime("%m")+date_latest.strftime("%d")+'%2F'+date_latest.strftime("%H")+'%2Fatmos'
fhours=int((fdays-delta_days)*24)
#fhours=int(fdays*24)
for frcst in range(1,fhours+1): # fhours + 1 to be inclusive of fhours
fname=date_latest.strftime("%Y")+date_latest.strftime("%m")+date_latest.strftime("%d")+date_latest.strftime("%H")+'_f'+str(frcst).zfill(3)+'.grb'
fileout=dirout+fname
if not(os.path.isfile(fileout)): # only download if the file doesn't already exist
url=url1+date_latest.strftime("%H")+'z.pgrb2.0p25.f'+str(frcst).zfill(3)+url2+url3
cmd='curl --silent \'' + url +'\'' + ' -o ' + fileout
print('download = ', fileout)
os.system( cmd )
# unfortunately this doesn't actually throw an error if the file to be downloaded does not exist,
# but it does create a small and useless fileout.
# So check the size of fileout and delete it if it is 'small'
if Path(fileout).stat().st_size < 1000: # using 1kB as the check
print('WARNING: '+fname+' could not be downloaded')
os.remove(fileout)
print('GFS download completed (in '+str(datetime.now() - startTime)+' h:m:s)')
return delta_days # return this as we need it when generating the croco forcing files
def mercator(path2motuClient,usrname,passwd,domain,date_now,hdays,fdays,varList,depths,mercator_dir):
"""
Download latest daily ocean forecasts from CMEMS GLOBAL-ANALYSIS-FORECAST-PHY-001-024
Dependencies: (see https://marine.copernicus.eu/faq/what-are-the-motu-and-python-requirements/)
python -m pip install motuclient
Adapted script from <NAME> <<EMAIL>>
"""
# extent hdays and fdays by 1 day to make sure our download completely covers the simulation period
hdays=hdays+1
fdays=fdays+1
startDate = date_now + timedelta(days = -hdays)
endDate = date_now + timedelta(days = fdays)
# loop on the variables to generate the variable string so that the number of requested variables is flexible
# so = Salinity in psu, thetao = Temperature in degrees C, zos = SSH in m, uo = Eastward velocity in m/s, vo = Northward velocity in m/s
var_str=''
for var in varList:
var_str=var_str+' --variable '+var
# output filename
fname = 'mercator_'+str(date_now.strftime('%Y%m%d'))+'.nc'
# create the runcommand string
runcommand = 'python3.8 '+path2motuClient+'motuclient.py --quiet'+ \
' --user '+usrname+' --pwd '+<PASSWORD>+ \
' --motu http://nrt.cmems-du.eu/motu-web/Motu'+ \
' --service-id GLOBAL_ANALYSIS_FORECAST_PHY_001_024-TDS'+ \
' --product-id global-analysis-forecast-phy-001-024'+ \
' --longitude-min '+str(domain[0])+' --longitude-max '+str(domain[1])+ \
' --latitude-min '+str(domain[2])+' --latitude-max '+str(domain[3])+ \
' --date-min "'+str(startDate.strftime('%Y-%m-%d'))+'" --date-max "'+str(endDate.strftime('%Y-%m-%d'))+'"'+ \
' --depth-min '+str(depths[0])+' --depth-max '+str(depths[1])+ \
var_str+ \
' --out-dir '+mercator_dir+' --out-name '+fname
if os.path.exists(mercator_dir+fname)==False:
# run the runcommand, i.e. download the data specified above
print('downloading latest mercator ocean forecast from CMEMS...')
startTime=datetime.now()
try:
os.system(runcommand)
print('mercator download completed (in '+str(datetime.now() - startTime)+' h:m:s)')
except:
# for now we'll just terminate today's forecast if the mercator download fails
sys.exit('mercator data not available for today- forecast not executed')
# an alternative approach might be to return a flag from this function to say whether the file was downloaded or not
# then in run_frcst.py we could use that flag to either make a new croco file from the downloaded mercator file
# or if the file wasn't downloaded then we could use yesterday's croco file and just repeat the last available time-step to make a new one
else:
print(mercator_dir+fname+' already exists - not downloading mercator data')
| StarcoderdataPython |
1759194 | <reponame>elielagmay/react-budgeteer<gh_stars>1-10
from django.db import models
from app.utils import get_balances
class Category(models.Model):
ledger = models.ForeignKey(
'ledger.Ledger',
on_delete=models.PROTECT,
related_name='categories'
)
name = models.CharField(max_length=255)
sort = models.IntegerField(default=0)
is_active = models.BooleanField(default=True)
class Meta:
verbose_name_plural = 'categories'
unique_together = ('ledger', 'name')
ordering = ('sort', 'id')
def __str__(self):
return self.name
class Account(models.Model):
category = models.ForeignKey(
'account.Category',
on_delete=models.PROTECT,
related_name='accounts'
)
name = models.CharField(max_length=255)
sort = models.IntegerField(default=0)
is_active = models.BooleanField(default=True)
class Meta:
unique_together = ('category', 'name')
ordering = ('sort', 'id')
def __str__(self):
return self.name
def get_balances(self):
entries = self.entries.filter(is_cleared=True)
return get_balances(entries, convert=False)
| StarcoderdataPython |
1718849 | """
This inline script can be used to dump flows as HAR files.
example cmdline invocation:
mitmdump -s ./har_dump.py --set hardump=./dump.har
filename endwith '.zhar' will be compressed:
mitmdump -s ./har_dump.py --set hardump=./dump.zhar
"""
import json
import base64
import typing
import tempfile
import re
from datetime import datetime
from datetime import timezone
import falcon
from mitmproxy import ctx
from mitmproxy import connections
from mitmproxy import version
from mitmproxy.utils import strutils
from mitmproxy.net.http import cookies
from mitmproxy import http
class WhiteListResource:
def addon_path(self):
return "whitelist"
def __init__(self, white_list_addon):
self.white_list_addon = white_list_addon
def on_get(self, req, resp, method_name):
getattr(self, "on_" + method_name)(req, resp)
def on_whitelist_requests(self, req, resp):
raw_url_patterns = req.get_param('urlPatterns')
status_code = req.get_param('statusCode')
url_patterns = raw_url_patterns.strip("[]").split(",")
url_patterns_compiled = []
try:
for raw_pattern in url_patterns:
url_patterns_compiled.append(self.parse_regexp(raw_pattern))
except re.error:
raise falcon.HTTPBadRequest("Invalid regexp patterns")
self.white_list_addon.white_list = {
"status_code": status_code,
"url_patterns": url_patterns_compiled
}
def on_add_whitelist_pattern(self, req, resp):
url_pattern = req.get_param('urlPattern')
if not hasattr(self.white_list_addon.white_list, "status_code") \
or not hasattr(self.white_list_addon.white_list, "url_patterns"):
raise falcon.HTTPBadRequest("Whitelist is disabled. Cannot add patterns to a disabled whitelist.")
self.white_list_addon.white_list["url_patterns"].append(url_pattern)
def on_enable_empty_whitelist(self, req, resp):
status_code = req.get_param('statusCode')
self.white_list_addon.white_list["url_patterns"] = []
self.white_list_addon.white_list["status_code"] = status_code
def on_disable_whitelist(self, req, resp):
self.white_list_addon.white_list = {}
def parse_regexp(self, raw_regexp):
if not raw_regexp.startswith('^'):
raw_regexp = '^' + raw_regexp
if not raw_regexp.endswith('$'):
raw_regexp = raw_regexp + '$'
return re.compile(raw_regexp)
class WhiteListAddOn:
def __init__(self):
self.num = 0
self.white_list = {}
def get_resource(self):
return WhiteListResource(self)
def is_whitelist_enabled(self):
if 'status_code' in self.white_list and 'url_patterns' in self.white_list:
return True
return False
def request(self, flow):
if not self.is_whitelist_enabled():
return
is_whitelisted = False
for up in self.white_list['url_patterns']:
if up.match(flow.request.url):
is_whitelisted = True
break
if not is_whitelisted:
flow.response = http.HTTPResponse.make(
int(self.white_list['status_code']),
b"",
{"Content-Type": "text/html"}
)
flow.metadata['WhiteListFiltered'] = True
addons = [
WhiteListAddOn()
]
| StarcoderdataPython |
3318800 | import os
import json
configJSON=open("mattermost/config/default.json").read()
config = json.loads(configJSON)
for envVar in os.environ:
if envVar.startswith("MM_"):
key = envVar[3:]
jsonPath = key.split("_")
lastKey = jsonPath.pop()
print("Setting " + ".".join(jsonPath) + "." + lastKey)
targetElement = config
for pathElement in jsonPath:
targetElement = targetElement[pathElement]
jsonValue=json.loads("{\"value\":"+os.environ[envVar]+"}")
targetElement[lastKey] = jsonValue["value"]
print("Set " + ".".join(jsonPath) + "." + lastKey + " to " + json.dumps(jsonValue["value"]))
with open('config.json', 'w') as fp:
json.dump(config, fp, indent=4)
| StarcoderdataPython |
3301303 | <filename>pyheaders/cpp/record.py<gh_stars>1-10
'''
Represents a C++ record (class, struct).
'''
from collections import namedtuple
from keyword import iskeyword
from typing import Any, Iterable, List, Text, Tuple, Union
from .scope import Scope, split, normalize
from .types import remove_template
class Record(Scope):
'''
Represents a C++ class or struct.
'''
_COLLAPSE_SHORT_RECORDS = True
@staticmethod
def collapse_short_records(collapse: bool = True):
'''
Control whether records with a single field should be collapsed.
'''
Record._COLLAPSE_SHORT_RECORDS = bool(collapse)
@staticmethod
def _identity(obj):
return obj
@staticmethod
def _safe_field_names(field_names: Union[Text, Iterable[Text]]) -> List[Text]:
'''
Rename problematic field names to not be problematic.
Based on `namedtuple(..., rename=True)`.
'''
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
safe_names = []
seen = set()
for name in reversed(tuple(field_names)):
if name in seen:
name = f'_{name}'
safe_names.insert(0, name)
seen.add(name)
return safe_names
def __init__(self, name: Text, field_names: Union[Text, Iterable[Text]], base_scope: Iterable[Tuple[Text, Any]] = None):
super().__init__(base_scope or [])
self.__name = normalize(name)
self.__fields = tuple(Record._safe_field_names(field_names))
if len(self.__fields) == 1 and Record._COLLAPSE_SHORT_RECORDS:
self.__type = Record._identity
else:
module, name = split(remove_template(self.__name))
if not module:
module = ''
if iskeyword(name):
name = f'_{name}'
self.__type = namedtuple(name, self.__fields, module=module.replace(Scope.SEP, '.'), rename=True)
@property
def name(self):
'''
Gets the name of the class / struct.
'''
return self.__name
@property
def fields(self):
'''
Gets the names of the fields in the class / struct.
'''
return self.__fields
def __call__(self, *args: Any):
return self.__type(*args)
def __repr__(self):
scope_repr = ''
if self:
scope_repr = f', Scope{super().__repr__()[len(type(self).__name__):]}'
return f'{type(self).__name__}({self.name!r}, {self.__fields!r}{scope_repr})'
| StarcoderdataPython |
1635221 | <filename>thonnycontrib/JuiceMind/__init__.py
import logging
import os
import re
from thonny import get_workbench, get_runner
from thonny.ui_utils import scale
from thonny.ui_utils import select_sequence
import logging
import threading
import time
#Don't undestand what these do
DESKTOP_SESSION = os.environ.get("DESKTOP_SESSION", "_")
CONFIGURATION_PATH = os.path.join(
os.path.expanduser("~"), ".config/lxsession", DESKTOP_SESSION, "desktop.conf"
)
GLOBAL_CONFIGURATION_PATH = os.path.join("/etc/xdg/lxsession", DESKTOP_SESSION, "desktop.conf")
logger = logging.getLogger(__name__)
#Global variables
esp32_boolean = False
startup_theme = "JuiceMind-Theme"
def pix():
#Flexibility to change the background to a color of our choice
MAIN_BACKGROUND = "#ededed"
#This is for different scroll arrows
detail_bg = "#d0d0d0"
detail_bg2 = "#cfcdc8"
#Initializes the res directory
res_dir = os.path.join(os.path.dirname(__file__), "res")
scrollbar_button_settings = {}
#Sets different aspects of the scrollbar
for direction, element_name in [
("up", "Vertical.Scrollbar.uparrow"),
("down", "Vertical.Scrollbar.downarrow"),
("left", "Horizontal.Scrollbar.leftarrow"),
("right", "Horizontal.Scrollbar.rightarrow"),
]:
# load the image
img_name = "scrollbar-button-" + direction
for suffix in ["", "-insens"]:
get_workbench().get_image(
os.path.join(res_dir, img_name + suffix + ".png"), img_name + suffix
)
scrollbar_button_settings[element_name] = {
"element create": (
"image",
img_name,
("!disabled", img_name),
("disabled", img_name + "-insens"),
)
}
settings = {
".": {"configure": {"background": MAIN_BACKGROUND}},
"Toolbutton": {
"configure": {"borderwidth": 1},
"map": {
"relief": [("disabled", "flat"), ("hover", "groove"), ("!hover", "flat")],
"background": [
("disabled", MAIN_BACKGROUND),
("!hover", MAIN_BACKGROUND),
("hover", "#ffffff"),
],
},
},
"Treeview.Heading": {
"configure": {
"background": "#f0f0f0",
"foreground": "#808080",
"relief": "flat",
"borderwidth": 1,
},
"map": {"foreground": [("active", "black")]},
},
"TNotebook.Tab": {
"map": {"background": [("!selected", detail_bg), ("selected", MAIN_BACKGROUND)]}
},
"ButtonNotebook.TNotebook.Tab": {
"map": {
"background": [("!selected", detail_bg), ("selected", MAIN_BACKGROUND)],
"padding": [
("selected", [scale(4), scale(2), scale(4), scale(3)]),
("!selected", [scale(4), scale(2), scale(4), scale(3)]),
],
}
},
"TScrollbar": {
"configure": {
"gripcount": 0,
"borderwidth": 0,
"padding": scale(1),
"relief": "solid",
"background": "#9e9e9e",
"darkcolor": "#d6d6d6",
"lightcolor": "#d6d6d6",
"bordercolor": "#d6d6d6",
"troughcolor": "#d6d6d6",
"arrowsize": scale(1),
"arrowcolor": "gray",
},
"map": {"background": [], "darkcolor": [], "lightcolor": []},
},
# Padding allows twaking thumb width
"Vertical.TScrollbar": {
"layout": [
(
"Vertical.Scrollbar.trough",
{
"sticky": "ns",
"children": [
("Vertical.Scrollbar.uparrow", {"side": "top", "sticky": ""}),
("Vertical.Scrollbar.downarrow", {"side": "bottom", "sticky": ""}),
(
"Vertical.Scrollbar.padding",
{
"sticky": "nswe",
"children": [
(
"Vertical.Scrollbar.thumb",
{"expand": 1, "sticky": "nswe"},
)
],
},
),
],
},
)
]
},
"Horizontal.TScrollbar": {
"layout": [
(
"Horizontal.Scrollbar.trough",
{
"sticky": "we",
"children": [
("Horizontal.Scrollbar.leftarrow", {"side": "left", "sticky": ""}),
("Horizontal.Scrollbar.rightarrow", {"side": "right", "sticky": ""}),
(
"Horizontal.Scrollbar.padding",
{
"sticky": "nswe",
"children": [
(
"Horizontal.Scrollbar.thumb",
{"expand": 1, "sticky": "nswe"},
)
],
},
),
],
},
)
],
"map": {
# Make disabled Hor Scrollbar invisible
"background": [("disabled", "#d6d6d6")],
"troughcolor": [("disabled", "#d6d6d6")],
"bordercolor": [("disabled", "#d6d6d6")],
"darkcolor": [("disabled", "#d6d6d6")],
"lightcolor": [("disabled", "#d6d6d6")],
},
},
"TCombobox": {"configure": {"arrowsize": scale(10)}},
"Menubar": {
"configure": {
"background": MAIN_BACKGROUND,
"relief": "flat",
"activebackground": "#ffffff",
"activeborderwidth": 0,
}
},
"Menu": {
"configure": {
"background": "#ffffff",
"relief": "flat",
"borderwidth": 1,
"activeborderwidth": 0,
# "activebackground" : bg, # updated below
# "activeforeground" : fg,
}
},
"Tooltip": {
"configure": {
"background": "#808080",
"foreground": "#ffffff",
"borderwidth": 0,
"padx": 10,
"pady": 10,
}
},
"Tip.TLabel": {"configure": {"background": detail_bg2, "foreground": "black"}},
"Tip.TFrame": {"configure": {"background": detail_bg2}},
"OPTIONS": {"configure": {"icons_in_menus": False, "shortcuts_in_tooltips": False}},
}
settings.update(scrollbar_button_settings)
# try to refine settings according to system configuration. Come back to this feature only if we want to modify the overall fonts manually.
"""Note that fonts are set globally,
ie. all themes will later inherit these"""
update_fonts()
for path in [GLOBAL_CONFIGURATION_PATH, CONFIGURATION_PATH]:
if os.path.exists(path):
with open(path) as fp:
try:
for line in fp:
if "sGtk/ColorScheme" in line:
if "selected_bg_color" in line:
bgr = re.search(
r"selected_bg_color:#([0-9a-fA-F]*)", line, re.M
).group(
1
) # @UndefinedVariable
color = "#" + bgr[0:2] + bgr[4:6] + bgr[8:10]
if is_good_color(color):
settings["Menu"]["configure"]["activebackground"] = color
if "selected_fg_color" in line:
fgr = re.search(
r"selected_fg_color:#([0-9a-fA-F]*)", line, re.M
).group(
1
) # @UndefinedVariable
color = "#" + fgr[0:2] + fgr[4:6] + fgr[8:10]
if is_good_color(color):
settings["Menu"]["configure"]["activeforeground"] = color
except Exception as e:
logger.error("Could not update colors", exc_info=e)
return settings
def is_good_color(s):
return bool(re.match("^#[0-9a-fA-F]{6}$", s))
def pix_dark():
update_fonts()
return {}
def update_fonts():
from tkinter import font
options = {}
for path in [GLOBAL_CONFIGURATION_PATH, CONFIGURATION_PATH]:
if os.path.exists(path):
try:
with open(path) as fp:
for line in fp:
if "sGtk/FontName" in line:
result = re.search(
r"=([^0-9]*) ([0-9]*)", line, re.M
) # @UndefinedVariable
family = result.group(1)
options["size"] = int(result.group(2))
if re.search(r"\bBold\b", family):
options["weight"] = "bold"
else:
options["weight"] = "normal"
if re.search(r"\bItalic\b", family):
options["slant"] = "italic"
else:
options["slant"] = "roman"
options["family"] = family.replace(" Bold", "").replace(" Italic", "")
except Exception as e:
logger.error("Could not update fonts", exc_info=e)
if options:
for name in ["TkDefaultFont", "TkMenuFont", "TkTextFont", "TkHeadingFont"]:
font.nametofont(name).configure(**options)
'''
def disable_MCU():
print(get_workbench().get_option("run.backend_name"))
#Disable button when the MCU is selected
if (get_workbench().get_option("run.backend_name") == "ESP8266"):
return False
#Don't disable if you are on normal interpreter
else:
return True
def disable_computer():
#Disable button when computer is selected
if (get_workbench().get_option("run.backend_name") == "SameAsFrontend"):
return False
else:
return True
#Callback Function to change interpreter to MicroPython
def switch_to_microPython():
#Configure the default interpreter value to be an ESP32
get_workbench().set_option("run.backend_name", "ESP8266")
#Restart backend to implement changes with the new interpreter
get_runner().restart_backend(False)
#Callback function to change interpreter to regular Python on computer
def switch_to_python():
#Configure the default interpreter value to be an ESP32
get_workbench().set_option("run.backend_name", "SameAsFrontend")
#Restart backend to implement changes with the new interpreter
get_runner().restart_backend(False)
'''
#index of current search of USB devices
index = 0
#Initialize MicroPython proxy to be initially None
proxy = None
#Thonny establishes a connection with the MCU once the connection boolean changes from True to None
prev_connection = None
#Callback function that aims at checking if current USB is the correct USB port.
def connect_device():
proxy = get_workbench().get_backends()["ESP8266"].proxy_class
#If nothing is connected don't do anything -> Maybe in the future make a pop-up that says to plug-in a device
#There is only one port to connect to
if(len(proxy._detect_potential_ports()) == 1):
#List the potential USB name
USB_name = proxy._detect_potential_ports()[index][0]
#Establish a serial connection with that USB
establish_serial_connection(USB_name)
#Multiple USB ports are connected. We are assuming that the only additional USB that can be listed in the window is going to be the SLAB port
elif(len(proxy._detect_potential_ports()) > 1):
USB_name = ""
for i in range(len(proxy._detect_potential_ports())):
temp_USB = proxy._detect_potential_ports()[i][0]
if "SLAB" in temp_USB:
USB_name = temp_USB
break
establish_serial_connection(USB_name)
#Otherwise don't do anything to connect
'''
global index
global proxy
#Only enable device connection if the ESP32 mode is selected
if (get_workbench().get_option("run.backend_name") == "ESP32"):
proxy = get_workbench().get_backends()["ESP32"].proxy_class
#There is more than one port to connect to
if (len(proxy._detect_potential_ports()) > 0):
#Establish a serial connection with a specific serial port
USB_name = proxy._detect_potential_ports()[index][0]
#Increment the index
index = index + 1
#Establish a serial connection
establish_serial_connection(USB_name)
'''
def establish_serial_connection(USB_name):
#Change the setting for the USB connection
get_workbench().set_option("ESP8266.port", USB_name)
#Restart the backend to establish changes
get_runner().restart_backend(False)
def test_connection():
toolbar_button = get_workbench().get_toolbar_button("connect_button")
res_dir = os.path.join(os.path.dirname(__file__), "res")
img2 = None
#If the current interpreter is the computer Python interpreter, make the image disabled and transparent.
if (get_workbench().get_option("run.backend_name") == "SameAsFrontend"):
microcontroller_selected_image = os.path.join(res_dir, "transparent_background.png")
img2 = get_workbench().get_image(microcontroller_selected_image)
toolbar_button.configure(image=img2)
toolbar_button.image = img2
#Change the image type to be empty
return False
#If the interpreter is ESP8266 and it is connected, make the image disabled and say that it is connected
elif ((get_workbench().get_option("run.backend_name") == "ESP8266" and get_runner()._cmd_interrupt_enabled())):
microcontroller_selected_image = os.path.join(res_dir, "connected-button.png")
img2 = get_workbench().get_image(microcontroller_selected_image)
toolbar_button.configure(image=img2)
toolbar_button.image = img2
return False
#If the interpreter is ESP8266 and it is connected, make the image enabled and display the connected button.
else:
microcontroller_selected_image = os.path.join(res_dir, "connect.png")
img2 = get_workbench().get_image(microcontroller_selected_image)
toolbar_button.configure(image=img2)
toolbar_button.image = img2
return True
#Used for adding spacing between the buttons
def always_disabled():
#Disable button when computer is selected.
return False
#Used for the toggle bewteen MicroPython and regular Python.
def always_enabled():
#Enable the button when computer is selected.
return True
def toggle_python():
toolbar_button = get_workbench().get_toolbar_button("toggle_python")
res_dir = os.path.join(os.path.dirname(__file__), "res")
img2 = None
#Currently Computer is selected -> Switch to the Microcontroller interpreter
if (get_workbench().get_option("run.backend_name") == "SameAsFrontend"):
microcontroller_selected_image = os.path.join(res_dir, "MCU_selected.png")
img2 = get_workbench().get_image(microcontroller_selected_image)
#Change the backend to ESP8266
get_workbench().set_option("run.backend_name", "ESP8266")
#Restart the backend to establish changes
get_runner().restart_backend(False)
#Currently Microcontroller is selected -> Switch to the computer interpreter
else:
computer_selected_image = os.path.join(res_dir, "computer_selected.png")
img2 = get_workbench().get_image(computer_selected_image)
#Change the backend to Regular Python Interpreter
get_workbench().set_option("run.backend_name", "SameAsFrontend")
#Restart the backend to establish changes
get_runner().restart_backend(False)
toolbar_button.configure(image=img2)
toolbar_button.image = img2
'''
global prev_connection
global index
global proxy
#Check if the microcontroller is connected at any point in time
if (get_workbench().get_option("run.backend_name") == "ESP32" and get_runner()._cmd_interrupt_enabled()):
#True -> Stays true the entire time
#Connect an MCU
prev_connection = get_runner()._cmd_interrupt_enabled()
return False
#Check the previous state that it was connected.
elif (get_runner()._cmd_interrupt_enabled() == None and prev_connection == True and proxy != None):
#Try connecting again with a different USB port
if(len(proxy._detect_potential_ports()) > 1 and index < len(proxy._detect_potential_ports())):
USB_name = proxy._detect_potential_ports()[index][0]
index = index + 1
prev_connection = get_runner()._cmd_interrupt_enabled()
establish_serial_connection(USB_name)
return False
else:
#Set pressed button == False
prev_connection = None
index = 0
return True
else:
prev_connection = None
return True
'''
def load_plugin():
#No idea what the screenwidth condition does. I don't think it increases the screen size
if get_workbench().get_ui_mode() == "simple" and get_workbench().winfo_screenwidth() >= 1280:
images = {
"run-current-script": "media-playback-start48.png",
"stop": "process-stop48.png",
"new-file": "document-new48.png",
"open-file": "open_file.png",
"save-file": "document-save48.png",
"debug-current-script": "debug-run48.png",
"step-over": "debug-step-over48.png",
"step-into": "debug-step-into48.png",
"step-out": "debug-step-out48.png",
"run-to-cursor": "debug-run-cursor48.png",
"tab-close": "window-close.png",
"tab-close-active": "window-close-act.png",
"resume": "resume48.png",
"zoom": "zoom48.png",
"quit": "quit48.png",
}
else:
images = {
"run-current-script": "media-playback-start.png",
"stop": "process-stop.png",
"new-file": "document-new.png",
"open-file": "open_file.png",
"save-file": "document-save.png",
"debug-current-script": "debug-run.png",
"step-over": "debug-step-over.png",
"step-into": "debug-step-into.png",
"step-out": "debug-step-out.png",
"run-to-cursor": "debug-run-cursor.png",
"tab-close": "window-close.png",
"tab-close-active": "window-close-act.png",
"resume": "resume.png",
"zoom": "zoom.png",
"quit": "quit.png",
}
#Change the types of input images depending on the image that is selected.
res_dir = os.path.join(os.path.dirname(__file__), "res")
theme_image_map = {}
for image in images:
theme_image_map[image] = os.path.join(res_dir, images[image])
#Create a given theme. Similar to Rasberry Pi but with modified buttons
get_workbench().add_ui_theme("JuiceMind-Theme", "Enhanced Clam", pix, theme_image_map)
#Set our theme equal to the default theme during the launch of the IDE
get_workbench().set_option("view.ui_theme", startup_theme)
micropython_image = os.path.join(res_dir, "MCU.png")
computer_selected_image = os.path.join(res_dir, "computer_selected.png")
connect_image = os.path.join(res_dir, "connect.png")
transparent_background = os.path.join(res_dir, "transparent_background.png")
#Set the initial backend to be default, normal computer
#Change the backend to ESP8266
get_workbench().set_option("run.backend_name", "SameAsFrontend")
'''
#Add a button to switch to MicroPython Interpreter
get_workbench().add_command("Switch MicroPython", "tools", "Run with MicroPython",
switch_to_microPython,
default_sequence=select_sequence("<Control-e>", "<Command-e>"),
group=120,
tester=disable_MCU,
image = micropython_image,
caption="Use MicroPython",
include_in_toolbar=True)
'''
'''
#Add command on toolbar to implement regular Python Interpreter
get_workbench().add_command("Switch Regular Python", "tools", "Run with Computer Python",
switch_to_python,
default_sequence=select_sequence("<Control-e>", "<Command-e>"),
group=120,
tester=disable_computer,
image = computer_image,
caption="Use Python",
include_in_toolbar=True)
'''
get_workbench().add_command("add_spacing", "tools", "",
toggle_python,
default_sequence=select_sequence("<Control-e>", "<Command-e>"),
group=120,
tester=always_disabled,
image = transparent_background,
caption="Use Python",
include_in_toolbar=True,
include_in_menu=False)
#One command on the toolbar that toggles between Python image and microcontroller image
get_workbench().add_command("toggle_python", "tools", "Toggle Python",
toggle_python,
default_sequence=select_sequence("<Control-e>", "<Command-e>"),
group=120,
tester=always_enabled,
image = computer_selected_image,
caption="Use Python",
include_in_toolbar=True)
#Add command on toolbar to connect
get_workbench().add_command("connect_button", "tools", "",
connect_device,
default_sequence=select_sequence("<Control-e>", "<Command-e>"),
group=120,
tester=test_connection,
image = connect_image,
caption="Connect Button",
include_in_toolbar=True) | StarcoderdataPython |
3385490 | <filename>iati/tests/functional_tests.py
"""A module for functional tests."""
from conftest import LOCALHOST
class TestHomePageExists():
"""A container for tests that the home page exists."""
def setup_home_page_tests(self, browser):
"""Visit the home page and locate the IATI logo."""
browser.visit(LOCALHOST)
logo = browser.find_by_css('a.branding').first
return logo
def test_home_page_has_IATI_logo(self, browser):
"""Check the IATI logo appears on the page."""
logo = self.setup_home_page_tests(browser)
assert logo.visible
def test_home_page_logo_is_a_home_link(self, browser):
"""Check that the IATI logo is also a link to the home page."""
logo = self.setup_home_page_tests(browser)
past_url = browser.url
logo.click()
assert past_url == browser.url
| StarcoderdataPython |
25694 | # Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#############################################################
# Interface
#############################################################
from collections import namedtuple
# Command/Argument definition
Cmd = namedtuple('Command', 'desc cb args cmds', defaults=(None,None,None,None,))
Arg = namedtuple('Arg', 'name flags short desc default exmpl convert', defaults=(None,None,None,None,None,))
#Flags
OPTION = 0 # simple 'flag' argument; example: '--foo' or '-f'
VALUE = 1 # value argument; example: '--foo=value' '-f=value'
UNNAMED = 2 # unamed argument; example; 'foo'
REQUIRED = 4 # required argument; omitting argument will print the help text
# print help
def print_help(cmd_name, cmd):
_print_help(cmd, [cmd_name])
# execute command based on arguments
def exec_command(cmd_name, cmd, argv):
return _execute_command(cmd, argv[1:], [cmd_name])
#############################################################
# Implementation
#############################################################
_PrintCmd = namedtuple('PrintCmd', 'name text desc cmds args mla_name mla_text mla_short')
_PrintArg = namedtuple('PrintArg', 'name text short desc')
_PRE_UNAMED = 0
_PRE_SHORT = 1
_PRE_NAME = 2
def _execute_command(cmd, argv, commands):
help_args = ['help', '-help', '--help', '?']
if (len(argv) == 0 and not cmd.cb) or (len(argv) > 0 and argv[0] in help_args):
_print_help(cmd, commands)
if len(argv) == 0:
print('Error: Please specify Command!')
print('')
return -1
return 0
if cmd.cb:
args = {}
if cmd.args:
for x in range(0, len(argv)):
arg_name = argv[x]
pre = _PRE_UNAMED
if arg_name.find('--') == 0:
pre = _PRE_NAME
elif arg_name.find('-') == 0:
pre = _PRE_SHORT
found = False
for arg in cmd.args:
cc = arg_name[pre:].split('=')
if (pre == _PRE_NAME and arg.name == cc[0]) or (pre == _PRE_SHORT and arg.short == cc[0]) or (pre == _PRE_UNAMED and arg.flags & UNNAMED and arg.name not in args):
found = True
if arg.flags & VALUE or pre == _PRE_UNAMED:
idx = 0 if pre == _PRE_UNAMED else 1
val = ''.join(cc[idx:]) if len(cc) > idx else ''
if val == '':
_print_help(cmd, commands)
print('Error: Argument \'{}\': Expects to have a value!'.format(arg.name))
if arg.flags & UNNAMED:
print(' Example: {} <{}>'.format(' '.join(commands), arg.name))
else:
print(' Example: {} --{}=<{}>'.format(' '.join(commands), arg.name, arg.exmpl if arg.exmpl else 'foo'))
print('')
return -1
v_str = val.strip('\'')
if arg.convert:
try:
args[arg.name] = arg.convert(v_str)
except:
_print_help(cmd, commands)
print('Error: Argument \'{}\': Value not expected type!'.format(arg.name))
if arg.exmpl:
if arg.flags & UNNAMED:
print(' Example: {} <{}>'.format(' '.join(commands), arg.exmpl))
else:
print(' Example: {} --{}=<{}>'.format(' '.join(commands), arg.name, arg.exmpl))
print('')
return -1
else:
args[arg.name] = v_str
else:
args[arg.name] = True
break
if not found:
_print_help(cmd, commands)
print('Error: Argument \'{}\': Unknown Argument!'.format(arg_name))
print('')
return -1
for arg in cmd.args:
if not arg.name in args:
if arg.default is not None:
args[arg.name] = arg.default
elif arg.flags & REQUIRED:
_print_help(cmd, commands)
if arg.flags & UNNAMED:
print('Error: Argument \'{}\': Required Argument not set!'.format(arg.name))
print(' Example: {} <{}>'.format(' '.join(commands), arg.exmpl if arg.exmpl else arg.name))
print('')
else:
print('Error: Argument \'{}\': Required Argument not set!'.format(arg.name))
print(' Example: {} --{}=<{}>'.format(' '.join(commands), arg.name, arg.exmpl if arg.exmpl else 'foo'))
print('')
return -1
else:
args[arg.name] = None
res = cmd.cb(args)
return res if res else 0
if cmd.cmds:
if not argv[0] in cmd.cmds:
_print_help(cmd, commands)
print(' Error: Command \'{}\': Not a valid command!'.format(argv[0]))
print('')
return -1
commands.append(argv[0])
return _execute_command(cmd.cmds[argv[0]], argv[1:], commands)
return -2
def _print_help(cmd, commands, pre_len=0, post_len=0):
lines = []
n = _collect_help(cmd, commands, 0, 0, lines, 0)
for l in lines:
print('{}{}'.format(l[0].ljust(n), ' : {}'.format(l[1]) if l[1] else ''))
def _collect_help(cmd, commands, pre_len, post_len, lines, n):
if pre_len == 0:
prefix = ' '
else:
prefix = ''.ljust(pre_len)
names_args = []
unamed_args = []
arg_name_maxlen = 0
arg_text_maxlen = 0
arg_short_maxlen = 0
if cmd.cb:
if cmd.args:
for arg in cmd.args:
if arg.short:
arg_short = ' (-{})'.format(arg.short)
else:
arg_short = ''
if arg.flags & UNNAMED:
arg_text = '<{}>'.format(arg.name)
else:
arg_text = '--{}{}'.format(arg.name, '=<{}>'.format(arg.exmpl if arg.exmpl else 'foo') if arg.flags & VALUE else '')
if arg.default is not None:
arg_desc = '{} (default: {})'.format(arg.desc, arg.default)
elif arg.flags & REQUIRED:
arg_desc = arg.desc
else:
arg_desc = '{} (optional)'.format(arg.desc)
l = len(arg_text)
if l > arg_text_maxlen:
arg_text_maxlen = l
l = len(arg_short)
if l > arg_short_maxlen:
arg_short_maxlen = l
l = len(arg.name)
if l > arg_name_maxlen:
arg_name_maxlen = l
pa = _PrintArg(
name=arg.name,
text=arg_text,
short=arg_short,
desc=arg_desc)
if arg.flags & UNNAMED:
unamed_args.append(pa)
else:
names_args.append(pa)
cmd_text_maxlen = 0
cmdlist = []
if cmd.cmds:
for cmd_name in cmd.cmds:
cmdlist.append(cmd_name)
l = len(cmd_name)
if l > cmd_text_maxlen:
cmd_text_maxlen = l
if pre_len == 0:
cmd_name = ' '.join(commands).ljust(post_len)
#cmd_list_str = ' {{{}}}'.format('|'.join(cmdlist)) if cmd.cmds else ''
else:
cmd_name = commands[len(commands)-1].ljust(post_len)
#cmd_list_str = ' <Command>' if cmd.cmds else ''
cmd_text = '{}{}{}'.format(
#cmd_list_str,
' <Command>' if cmd.cmds else '',
' <Arguments>' if len(unamed_args) > 0 else '',
' [Options]' if len(names_args) > 0 else '')
cmd_desc = cmd.desc if cmd.desc else commands[len(commands)-1]
if pre_len == 0:
n = _add_line(lines, 'Usage:', None, n)
n = _add_line(lines, '{}{}{}'.format(
prefix,
cmd_name,
cmd_text),
cmd_desc, n)
if len(unamed_args) > 0 and pre_len == 0:
n = _add_line(lines, '', None, n)
n = _add_line(lines, 'Arguments:', None, n)
for arg in unamed_args:
n = _add_line(lines, '{}{}{}{}'.format(
prefix,
''.ljust(post_len + 1),
'{}'.format(arg.text).ljust(arg_text_maxlen),
'{}'.format(arg.short).ljust(arg_short_maxlen)),
arg.desc if arg.desc else arg.name, n)
if len(names_args) > 0 and pre_len == 0:
n = _add_line(lines, '', None, n)
n = _add_line(lines, 'Options:', None, n)
names_args = sorted(names_args, key=lambda x: x.name)
for arg in names_args:
n = _add_line(lines, '{}{}{}{}'.format(
prefix,
''.ljust(post_len + 1),
'{}'.format(arg.text).ljust(arg_text_maxlen),
'{}'.format(arg.short).ljust(arg_short_maxlen)),
arg.desc if arg.desc else arg.name, n)
if cmd.cmds:
if len(cmd.cmds) > 0 and pre_len == 0:
pre_len = 3
n = _add_line(lines, '', None, n)
n = _add_line(lines, 'Commands:', None, n)
else:
pre_len = pre_len + len(cmd_name) + 1
for cmd_name, cmd in cmd.cmds.items():
n = _collect_help(cmd, commands + [cmd_name], pre_len, cmd_text_maxlen, lines, n)
n = _add_line(lines, '', None, n)
elif pre_len == 0:
n = _add_line(lines, '', None, n)
return n
def _add_line(lines, ll, lr, n):
lines.append([ll, lr])
return max(n, len(ll))
| StarcoderdataPython |
74408 | def main(string: str) -> int:
return string.count()
| StarcoderdataPython |
1633974 | import requests
from bs4 import BeautifulSoup
from credentials import LOGIN_PASSWORD, LOGIN_USERNAME, HEADERS, SERVER_URL, VILLAGE_URL
def logged_in_session():
session = requests.Session()
session.headers = HEADERS
html = session.get(VILLAGE_URL).text
resp_parser = BeautifulSoup(html, 'html.parser')
login_value = resp_parser.find('input', {'name': 'login'})['value']
data = {
'name': LOGIN_USERNAME,
'password': <PASSWORD>,
's1': '<PASSWORD>',
'w': '',
'login': login_value
}
session.post(f'{SERVER_URL}login.php', data=data)
return session
| StarcoderdataPython |
1733058 | <filename>tests/test_multiply.py
import optmod
import unittest
import numpy as np
class TestMultiply(unittest.TestCase):
def test_contruction(self):
x = optmod.variable.VariableScalar(name='x')
f = optmod.function.multiply([x, optmod.expression.make_Expression(1.)])
self.assertTrue(isinstance(f, optmod.function.multiply))
self.assertEqual(f.name, 'multiply')
self.assertEqual(len(f.arguments), 2)
self.assertTrue(f.arguments[0] is x)
self.assertTrue(f.arguments[1].is_constant())
self.assertEqual(f.arguments[1].get_value(), 1.)
self.assertRaises(AssertionError, optmod.function.multiply, [1., x, 2.])
self.assertRaises(AssertionError, optmod.function.multiply, [x])
self.assertRaises(TypeError, optmod.function.multiply, x)
def test_constant(self):
a = optmod.constant.Constant(4.)
b = optmod.constant.Constant(5.)
f = a*b
self.assertTrue(f.is_constant(20.))
def test_scalar_scalar(self):
rn = optmod.utils.repr_number
x = optmod.variable.VariableScalar(name='x', value=2.)
y = optmod.variable.VariableScalar(name='y', value=3.)
f = x*2
self.assertTrue(isinstance(f, optmod.function.multiply))
self.assertTrue(f.arguments[0] is x)
self.assertTrue(f.arguments[1].is_constant())
self.assertEqual(f.get_value(), 4.)
self.assertEqual(str(f), 'x*%s' %rn(2.))
f = 2.*x
self.assertTrue(isinstance(f, optmod.function.multiply))
self.assertTrue(f.arguments[0] is x)
self.assertTrue(f.arguments[1].is_constant())
self.assertEqual(f.get_value(), 4.)
self.assertEqual(str(f), 'x*%s' %rn(2.))
f = x*y
self.assertTrue(isinstance(f, optmod.function.multiply))
self.assertTrue(f.arguments[0] is x)
self.assertTrue(f.arguments[1] is y)
self.assertEqual(f.get_value(), 6)
self.assertEqual(str(f), 'x*y')
f = x*(y+3.)
self.assertTrue(isinstance(f, optmod.function.multiply))
self.assertTrue(f.arguments[0] is x)
self.assertTrue(f.arguments[1].is_function())
self.assertEqual(f.get_value(), 12)
self.assertEqual(str(f), 'x*(y + %s)' %rn(3.))
f = (1-y)*x
self.assertTrue(isinstance(f, optmod.function.multiply))
self.assertTrue(f.arguments[0].is_function())
self.assertTrue(f.arguments[1] is x)
self.assertEqual(f.get_value(), -4)
self.assertEqual(str(f), '(%s + y*%s)*x' %(rn(1.), rn(-1.)))
f = (4.*x)*(3*y)
self.assertTrue(isinstance(f, optmod.function.multiply))
self.assertTrue(f.arguments[0].is_function())
self.assertTrue(f.arguments[1].is_function())
self.assertEqual(f.get_value(), 72)
self.assertEqual(str(f), 'x*%s*y*%s' %(rn(4), rn(3)))
f = -x*5
self.assertTrue(isinstance(f, optmod.function.multiply))
self.assertTrue(f.arguments[1].is_constant())
self.assertTrue(f.arguments[0].is_variable())
self.assertEqual(str(f), 'x*%s' %rn(-5))
self.assertEqual(f.get_value(), -10.)
f = y*-x
self.assertTrue(isinstance(f, optmod.function.multiply))
self.assertTrue(f.arguments[0] is y)
self.assertTrue(f.arguments[1].is_function())
self.assertEqual(str(f), 'y*x*%s' %rn(-1))
self.assertEqual(f.get_value(), -6.)
f = optmod.sin(x)*y
self.assertTrue(isinstance(f, optmod.function.multiply))
self.assertTrue(f.arguments[0].is_function())
self.assertTrue(f.arguments[1] is y)
self.assertEqual(str(f), 'sin(x)*y')
self.assertEqual(f.get_value(), np.sin(2.)*3.)
f = x*optmod.sin(y)
self.assertTrue(isinstance(f, optmod.function.multiply))
self.assertTrue(f.arguments[0] is x)
self.assertTrue(f.arguments[1].is_function())
self.assertEqual(str(f), 'x*sin(y)')
self.assertEqual(f.get_value(), np.sin(3.)*2.)
def test_scalar_matrix(self):
rn = optmod.utils.repr_number
value = [[1., 2., 3.], [4., 5., 6.]]
x = optmod.variable.VariableScalar(name='x', value=2.)
y = optmod.variable.VariableMatrix(name='y', value=value)
r = np.random.random((2,3))
def test_matrix_matrix(self):
pass
def test_one(self):
x = optmod.variable.VariableScalar(name='x', value=3.)
def test_derivative(self):
rn = optmod.utils.repr_number
x = optmod.variable.VariableScalar(name='x', value=3.)
y = optmod.variable.VariableScalar(name='y', value=4.)
z = optmod.variable.VariableScalar(name='z', value=5.)
f = x*x
fx = f.get_derivative(x)
self.assertEqual(fx.get_value(), 2.*3.)
self.assertEqual(str(fx), 'x + x')
f = x*y
fx = f.get_derivative(x)
fy = f.get_derivative(y)
fz = f.get_derivative(z)
self.assertTrue(fx is y)
self.assertTrue(fy is x)
self.assertTrue(fz.is_constant())
self.assertEqual(fz.get_value(), 0)
f = x*y*z
fx = f.get_derivative(x)
fy = f.get_derivative(y)
fz = f.get_derivative(z)
self.assertEqual(str(fx), 'z*y')
self.assertEqual(fx.get_value(), 20.)
self.assertEqual(str(fy), 'z*x')
self.assertEqual(fy.get_value(), 15.)
self.assertEqual(str(fz), 'x*y')
self.assertEqual(fz.get_value(), 12.)
def test_analyze(self):
x = optmod.variable.VariableScalar(name='x', value=3.)
y = optmod.variable.VariableScalar(name='y', value=4.)
z = optmod.variable.VariableScalar(name='z', value=5.)
f = 3*x
prop = f.__analyze__()
self.assertTrue(prop['affine'])
self.assertEqual(prop['b'], 0.)
self.assertEqual(len(prop['a']), 1)
self.assertEqual(prop['a'][x], 3.)
f = x*7
prop = f.__analyze__()
self.assertTrue(prop['affine'])
self.assertEqual(prop['b'], 0.)
self.assertEqual(len(prop['a']), 1)
self.assertEqual(prop['a'][x], 7.)
f = y*x
prop = f.__analyze__()
self.assertFalse(prop['affine'])
self.assertEqual(prop['b'], 0.)
self.assertEqual(len(prop['a']), 2)
self.assertTrue(x in prop['a'])
self.assertTrue(y in prop['a'])
f = y*x*z
prop = f.__analyze__()
self.assertFalse(prop['affine'])
self.assertEqual(prop['b'], 0.)
self.assertEqual(len(prop['a']), 3)
self.assertTrue(x in prop['a'])
self.assertTrue(y in prop['a'])
self.assertTrue(z in prop['a'])
def test_std_components(self):
x = optmod.variable.VariableScalar(name='x', value=3.)
y = optmod.variable.VariableScalar(name='y', value=4.)
z = optmod.variable.VariableScalar(name='z', value=5.)
f = x*y
comp = f.__get_std_components__()
phi = comp['phi']
gphi_list = comp['gphi_list']
Hphi_list = comp['Hphi_list']
self.assertTrue(phi is f)
self.assertEqual(len(gphi_list), 2)
v, exp = gphi_list[0]
self.assertTrue(v is x)
self.assertTrue(exp is y)
v, exp = gphi_list[1]
self.assertTrue(v is y)
self.assertTrue(exp is x)
self.assertEqual(len(Hphi_list), 1)
v1, v2, exp = Hphi_list[0]
self.assertTrue(v1 is x)
self.assertTrue(v2 is y)
self.assertTrue(exp.is_constant(1.))
f = x*x
comp = f.__get_std_components__()
phi = comp['phi']
gphi_list = comp['gphi_list']
Hphi_list = comp['Hphi_list']
self.assertTrue(phi is f)
self.assertEqual(len(gphi_list), 1)
v, exp = gphi_list[0]
self.assertTrue(v is x)
self.assertTrue(str(exp), 'x + x')
self.assertEqual(len(Hphi_list), 1)
v1, v2, exp = Hphi_list[0]
self.assertTrue(v1 is x)
self.assertTrue(v2 is x)
self.assertTrue(exp.is_constant(2.))
| StarcoderdataPython |
1782608 | <gh_stars>0
import win32com.client
# 연결 여부 체크
objCpCybos = win32com.client.Dispatch("CpUtil.CpCybos")
bConnect = objCpCybos.IsConnect
if (bConnect == 0):
print("PLUS가 정상적으로 연결되지 않음. ")
exit()
# 종목코드 리스트 구하기
objCpCodeMgr = win32com.client.Dispatch("CpUtil.CpCodeMgr")
codeList = objCpCodeMgr.GetStockListByMarket(1) #거래소
codeList2 = objCpCodeMgr.GetStockListByMarket(2) #코스닥
print("거래소 종목코드", len(codeList))
for i, code in enumerate(codeList):
secondCode = objCpCodeMgr.GetStockSectionKind(code)
name = objCpCodeMgr.CodeToName(code)
stdPrice = objCpCodeMgr.GetStockStdPrice(code)
print(i, code, secondCode, stdPrice, name)
print("코스닥 종목코드", len(codeList2))
for i, code in enumerate(codeList2):
secondCode = objCpCodeMgr.GetStockSectionKind(code)
name = objCpCodeMgr.CodeToName(code)
stdPrice = objCpCodeMgr.GetStockStdPrice(code)
print(i, code, secondCode, stdPrice, name)
print("거래소 + 코스닥 종목코드 ",len(codeList) + len(codeList2))
| StarcoderdataPython |
3262685 | <filename>photoplaces/photoplaces_web/photo_entry_normalization.py
from models import PhotoLocationEntry, NormalizedPhotoSet, NormalizedPhotoEntry
import numpy as np
from math_functions.cyclical_math import *
from math_functions.normalization import *
from Queue import Queue
from threading import Thread, Event
def visualize_counts(qs, field):
values = qs.order_by(field).values(field).distinct()
values = [v[field] for v in values]
m = 20000
for value in values:
c = qs.filter(**{field + "__gte": value - 0.001, field + "__lte": value + 0.001}).count()
print( ("%2.6f: %6d " % (value, c)) + "#" * int(float(c) / m * 60))
def normalize_photo_entry(entry, target_set):
e = NormalizedPhotoEntry(
actual_photo = entry,
normalized_set = target_set,
location_x = z_score(entry.location[0], target_set.location_x_mean, target_set.location_x_deviation),
location_y = z_score(entry.location[1], target_set.location_y_mean, target_set.location_y_deviation),
month = cyclical_z_score(entry.time.month, target_set.month_mean, target_set.month_deviation, 12),
hour = cyclical_z_score(entry.time.hour, target_set.hour_mean, target_set.hour_deviation, 24))
e.save()
def normalize_values(normalized_set):
count = PhotoLocationEntry.objects.all().count()
def worker():
while True:
e = q.get()
if NormalizedPhotoEntry.objects.filter(actual_photo = e).count() == 0:
normalize_photo_entry(e, normalized_set)
done = NormalizedPhotoEntry.objects.all().count()
if done % 100 == 0 or done == 1:
print("%d / %d (%3.1f) done" % (done, count, float(done) / count * 100))
q.task_done()
q = Queue()
for i in xrange(4):
t = Thread(target = worker)
t.daemon = True
t.start()
for v in PhotoLocationEntry.objects.all():
q.put(v)
print("All in Queue, waiting...")
q.join()
# Untested, only done in interactive console...
hours = ns.entries.order_by("hour").values("hour").distinct()
normalized_set.hour_z_cycle_length = abs(hours[0]["hour"] - hours[1]["hour"]) * 24
months = ns.entries.order_by("month").values("month").distinct()
normalized_set.month_z_cycle_length = abs(months[0]["month"] - months[1]["month"]) * 12
print("All done")
def create_normalized_set():
print("Getting objects...")
values = PhotoLocationEntry.objects.all()
print("creating NormalizedPhotoSet...")
normalized_set = NormalizedPhotoSet()
print("Calculating mean month...")
months = [v.time.month for v in values]
month_mean = cycle_avg(months, 12)
print("It is %f, saving..." % month_mean)
normalized_set.month_mean = month_mean
print("Calculating mean hour...")
hours = [v.time.hour for v in values]
hour_mean = cycle_avg(hours, 24)
print("It is %f, saving..." % hour_mean)
normalized_set.hour_mean = hour_mean
print("Calculating mean x...")
x = [v.location[0] for v in values]
x_mean = np.mean(x)
print("It is %f, saving..." % x_mean)
normalized_set.location_x_mean = x_mean
print("Calculating mean y...")
y = [v.location[1] for v in values]
y_mean = np.mean(y)
print("It is %f, saving..." % y_mean)
normalized_set.location_y_mean = y_mean
print("Calculating month MAD...")
def dist12(a, b):
return cyclical_distance(a,b,12)
month_mad = mean_absolute_deviation(months, month_mean, dist12)
print("It is %f, saving..." % month_mad)
normalized_set.month_deviation = month_mad
print("Calculating hour MAD...")
def dist24(a, b):
return cyclical_distance(a,b, 24)
hour_mad = mean_absolute_deviation(hours, hour_mean, dist24)
print("It is %f, saving..." % hour_mad)
normalized_set.hour_deviation = hour_mad
print("Calculating x MAD...")
x_mad = mean_absolute_deviation(x, x_mean)
print("It is %f, saving..." % x_mad)
normalized_set.location_x_deviation = x_mad
print("Calculating y MAD...")
y_mad = mean_absolute_deviation(y, y_mean)
print("It is %f, saving..." % y_mad)
normalized_set.location_y_deviation = y_mad
normalized_set.save()
print("All done") | StarcoderdataPython |
3323810 | <reponame>darienmorrow/research_kit
import numpy as np
import WrightTools as wt
def gauss(t, t0, fwhm):
sigma = fwhm / (2 * np.sqrt(2 * np.log(2)))
return np.exp(-((t - t0) ** 2) / (2 * sigma ** 2))
def exp(t, t1, A1, B, t0):
# applies a heaviside step function
zero = t0
out = np.zeros(t.size)
out[t >= zero] = A1 * np.exp(-t[t >= zero] / t1)
out[t == zero] *= 0.5
out += B
return out
def biexp(t, t1, A1, t2, A2, B, t0):
# applies a heaviside step function
zero = t0
out = np.zeros(t.size)
out[t >= zero] = A1 * np.exp(-t[t >= zero] / t1) + A2 * np.exp(-t[t >= zero] / t2)
out[t == zero] *= 0.5
out += B
return out
def triexp(t, t1, A1, t2, A2, t3, A3, B, t0):
# applies a heaviside step function
zero = t0
out = np.zeros(t.size)
out[t >= zero] = (
A1 * np.exp(-t[t >= zero] / t1)
+ A2 * np.exp(-t[t >= zero] / t2)
+ A3 * np.exp(-t[t >= zero] / t3)
)
out[t == zero] *= 0.5
out += B
return out
def exp_fit_func(p, x):
# oversample grid. Then convolve, then interpolate convolution back to original grid
t = np.linspace(
x.min(), x.max(), 1024 * 4
) # need uniformly spaced grid to do convolution
t1, A1, B, t0, fwhm = p
t1, A1, B, fwhm = np.abs(t1), np.abs(A1), np.abs(B), np.abs(fwhm)
IRF = gauss(t - t.mean(), t0, fwhm)
IRF /= IRF.sum() # need area normalized function
decay = exp(t, t1, A1, B, t0)
model = np.convolve(decay, IRF, mode="same")
out = np.interp(x, t, model)
return out
def biexp_fit_func(p, x):
# oversample grid. Then convolve, then interpolate convolution back to original grid
t = np.linspace(
x.min(), x.max(), 1024 * 4
) # need uniformly spaced grid to do convolution
t1, A1, t2, A2, B, t0, fwhm = p
t1, A1, t2, A2, B, fwhm = (
np.abs(t1),
np.abs(A1),
np.abs(t2),
np.abs(A2),
np.abs(B),
np.abs(fwhm),
)
IRF = gauss(t - t.mean(), t0, fwhm)
IRF /= IRF.sum() # need area normalized function
decay = biexp(t, t1, A1, t2, A2, B, t0)
model = np.convolve(decay, IRF, mode="same")
out = np.interp(x, t, model)
return out
def triexp_fit_func(p, x):
# oversample grid. Then convolve, then interpolate convolution back to original grid
t = np.linspace(
x.min(), x.max(), 1024 * 4
) # need uniformly spaced grid to do convolution
t1, A1, t2, A2, t3, A3, B, t0, fwhm = p
t1, A1, t2, A2, t3, A3, B, fwhm = (
np.abs(t1),
np.abs(A1),
np.abs(t2),
np.abs(A2),
np.abs(t3),
np.abs(A3),
np.abs(B),
np.abs(fwhm),
)
IRF = gauss(t - t.mean(), t0, fwhm)
IRF /= IRF.sum() # need area normalized function
decay = triexp(t, t1, A1, t2, A2, t3, A3, B, t0)
model = np.convolve(decay, IRF, mode="same")
out = np.interp(x, t, model)
return out
def exp_param_guess(x, y):
t0 = x[np.argmax(y)]
B = np.mean(y[-100:])
fwhm = (x.min() + t0) / 2
A1 = y.max() / 2
t1 = np.mean(x)
p = [t1, A1, B, t0, fwhm]
return p
def biexp_param_guess(x, y):
t0 = x[np.argmax(y)]
B = np.mean(y[-100:])
fwhm = (x.min() + t0) / 2
A1 = y.max() / 2
t1 = np.mean(x)
A2 = A1 / 5
t2 = t1 * 10
p = [t1, A1, t2, A2, B, t0, fwhm]
return p
def triexp_param_guess(x, y):
t0 = x[np.argmax(y)]
B = np.mean(y[-100:])
fwhm = (x.min() + t0) / 2
A1 = y.max() / 2
t1 = np.mean(x)
A2 = A1 / 5
t2 = t1 * 10
A3 = A1 / 10
t3 = t1 * 100
p = [t1, A1, t2, A2, t3, A3, B, t0, fwhm]
return p
def sqrt_fit(p0, x, y, func):
sqrty = np.sqrt(y)
def sqrtfunc(p, x):
return np.sqrt(func(p, x))
pfit, perr = wt.kit.leastsqfitter(p0, x, sqrty, sqrtfunc)
return pfit, perr
def exp_fit(x, y):
p0 = exp_param_guess(x, y)
pfit, perr = sqrt_fit(p0, x, y, exp_fit_func)
ymodel = exp_fit_func(pfit, x)
return pfit, perr, ymodel
def biexp_fit(x, y):
p0 = biexp_param_guess(x, y)
pfit, perr = sqrt_fit(p0, x, y, biexp_fit_func)
ymodel = biexp_fit_func(pfit, x)
return pfit, perr, ymodel
def triexp_fit(x, y):
p0 = triexp_param_guess(x, y)
pfit, perr = sqrt_fit(p0, x, y, triexp_fit_func)
ymodel = triexp_fit_func(pfit, x)
return pfit, perr, ymodel
| StarcoderdataPython |
188416 | <gh_stars>0
import asyncio
import aiohttp
from discord import Embed
from discord.ext import commands, tasks
from ..utils.config import BOT_COLOR, API_KEY, getsetTime, getUser
DEBUG_MODE = False
class Modio(commands.Cog):
# guild: int = 422847864172183562 # UAMT server
# channel: int = 518607442901204992 # Dyno Logs channel
# channel = 535760186351157249 # Bot commands channel
channel = 535760186351157249 if not DEBUG_MODE else 518607442901204992
prefix = "https://api.mod.io/v1/games/{game}/mods" # 34 = Aground
headers = {
'Accept': 'application/json'
}
def __init__(self, bot):
self.bot = bot
if not self.new_comments.is_running():
self.new_comments.start()
@staticmethod
async def fetch(session, url, **kargs):
async with session.get(url, **kargs) as response:
return await response.json()
async def getEvents(self, prefix: str, session, timeGate: int):
params = {"_limit": 20, "api_key": API_KEY, "date_added-min": timeGate, "event_type": "MOD_COMMENT_ADDED"}
async with session.get(
f"{prefix}/events",
params = params,
headers = self.headers
) as events:
if events.status != 200:
print(events)
raise Exception(f"Request received Status Code {events.status} when asking for recent events.")
return (await events.json())["data"]
async def getModsAndComments(self, prefix, session, events, timeGate):
links = []
visited_mods = set()
if DEBUG_MODE:
comment_params = {"api_key": API_KEY, "_limit": 4}
else:
comment_params = {"api_key": API_KEY, "date_added-min": timeGate}
for event in events:
mod_id = event["mod_id"]
# New event(s) on mod with ID: {mid}
if mod_id in visited_mods:
continue
visited_mods.add(mod_id)
links.append(asyncio.ensure_future(self.fetch(
url = f"{prefix}/{mod_id}",
session = session,
params={"api_key": API_KEY},
headers= self.headers
)))
links.append(asyncio.ensure_future(self.fetch(
url = f"{prefix}/{mod_id}/comments",
session = session,
params = comment_params,
headers= self.headers
)))
return await asyncio.gather(*links)
async def getGame(self, prefix, session):
async with session.get(
prefix[:-5],
params = {"api_key": API_KEY},
headers = self.headers
) as response:
return await response.json()
@tasks.loop(minutes=10)
async def new_comments(self):
if DEBUG_MODE:
timeGate = None
else:
timeGate = getsetTime("modio", floor=True) - 5
print("Looking for new comments...")
async with aiohttp.ClientSession() as session:
for game in self.bot.games.values():
prefix = self.prefix.format(game = game)
if DEBUG_MODE:
events = ({'mod_id': 144}, ) # MagicPlus
# events = ({'mod_id': 73829}, ) # Expansive Mod
# events = ({'mod_id': 161098}, ) # Locatinator
else:
events = await self.getEvents(prefix, session, timeGate)
if len(events) == 0:
print("No new comments")
return
game = await self.getGame(prefix, session)
data = await self.getModsAndComments(prefix, session, events, timeGate)
channel = self.bot.get_channel(self.channel)
for mod, comments in zip(data[::2], data[1::2]):
if "error" in mod:
continue
# Ignore mods and comments with error
comments = list(filter(lambda c: "error" not in c, reversed(comments["data"])))
content = await self.makeFields(comments, session=session, prefix=prefix, mod_id=mod["id"])
author = getUser(modio=int(mod["submitted_by"]["id"]))
embed = Embed(color = BOT_COLOR, title=f"New comments in {mod['name']}!", url=mod["profile_url"], description=content)
embed.set_author(
name= game["name"],
url = game["profile_url"],
icon_url = game["icon"]["thumb_128x128"]
)
if author:
discord_user = await self.bot.fetch_user(author["discord"])
embed.set_footer(text=discord_user.display_name, icon_url=discord_user.avatar.url)
if author["allow_dms"] and any(comment["user"]["id"] != mod["submitted_by"]["id"] for comment in comments):
if DEBUG_MODE and str(author["discord"]) != "256442550683041793":
raise RuntimeError("Tried to DM someone that is not Etrotta while in DEBUG mode")
else:
await discord_user.send(embed=embed)
else:
embed.set_footer(text=mod["submitted_by"]["username"], icon_url=mod["submitted_by"]["avatar"]["thumb_50x50"])
await channel.send(embed=embed)
print("Done looking for new comments.")
async def makeFields(self, comments, **kwargs):
content = ""
for comment in comments:
content += await self.makeField(comment, **kwargs)
return content
async def makeField(self, comment, **kwargs):
content = ""
if comment["reply_id"] != 0:
content += await self.makeReply(comment["reply_id"], **kwargs)
user = getUser(modio=int(comment["user"]["id"]))
if user:
content += f"**<@{user['discord']}>** said: {comment['content'].strip()}\n"
else:
content += f"**{comment['user']['username']}** said: {comment['content'].strip()}\n"
return content
async def makeReply(self, reply_id, indent = 1, **kwargs):
comment = await self.fetchReply(reply_id, **kwargs)
content = ""
if comment["reply_id"] != 0:
content += await self.makeReply(comment["reply_id"], indent, **kwargs)
indent += 1
if indent == 1:
content = "\n"
user = getUser(modio=int(comment["user"]["id"]))
if user:
content += f"In response to **<@{user['discord']}>** saying: {comment['content'].strip()}\n{'.'*(indent*4)}"
else:
content += f"In response to **{comment['user']['username']}** saying: {comment['content'].strip()}\n{'.'*(indent*4)}"
return content
async def fetchReply(self, reply_id, *, session, prefix, mod_id):
async with session.get(f"{prefix}/{mod_id}/comments", params = {"api_key": API_KEY, "id": reply_id}, headers = self.headers) as response:
return (await response.json())["data"].pop()
@new_comments.before_loop
async def before_new_comments(self):
await self.bot.db_loaded.wait()
def setup(bot):
bot.add_cog(Modio(bot))
| StarcoderdataPython |
3259913 | <gh_stars>1-10
from sys import argv as CLIARGS
from os import system as run_in_shell, walk
from os.path import join
rootDir = CLIARGS[1]
outDir = CLIARGS[2]
filePairs = (
(join(root, file), join(outDir, file))
for root, folders, files in walk(rootDir)
for file in files
)
count = 0
cmd = 'ffmpeg -v quiet -y -i "%s" -acodec libmp3lame -abr true "%s"'
for pair in filePairs:
run_in_shell(cmd % pair)
print('%-3d %-100s' % (count, pair[1]), end='\r')
count += 1 | StarcoderdataPython |
170916 | #!/usr/bin/env python3
# pylint: disable=C0111
import os
from pyndl import count
TEST_ROOT = os.path.dirname(__file__)
EVENT_RESOURCE_FILE = os.path.join(TEST_ROOT, "resources/event_file_trigrams_to_word.tab.gz")
CORPUS_RESOURCE_FILE = os.path.join(TEST_ROOT, "resources/corpus.txt")
def test_cues_outcomes():
n_events, cues, outcomes = count.cues_outcomes(EVENT_RESOURCE_FILE)
n_events3, cues3, outcomes3 = count.cues_outcomes(EVENT_RESOURCE_FILE,
number_of_processes=6,
verbose=True)
assert n_events == 2772
assert n_events == n_events3
assert cues == cues3
assert outcomes == outcomes3
def test_words_symbols():
words, symbols = count.words_symbols(CORPUS_RESOURCE_FILE)
words3, symbols3 = count.words_symbols(CORPUS_RESOURCE_FILE,
number_of_processes=3,
verbose=True)
assert words == words3
assert symbols == symbols3
def test_save_load():
file_name = os.path.join(TEST_ROOT, "temp/cues.tab")
_, cues, _ = count.cues_outcomes(EVENT_RESOURCE_FILE)
count.save_counter(cues, file_name)
cues_loaded = count.load_counter(file_name)
assert cues == cues_loaded
os.remove(file_name)
| StarcoderdataPython |
3302622 | #!/usr/bin/env python
# encoding: utf-8
row_data = [
{
'images': [
'goods/images/1_P_1449024889889.jpg',
'goods/images/1_P_1449024889264.jpg',
'goods/images/1_P_1449024889726.jpg',
'goods/images/1_P_1449024889018.jpg',
'goods/images/1_P_1449024889287.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'根茎类'
],
'market_price': '¥232元',
'name': '新鲜水果甜蜜香脆单果约800克',
'desc': '食用百香果可以增加胃部饱腹感,减少余热量的摄入,还可以吸附胆固醇和胆汁之类有机分子,抑制人体对脂肪的吸收。因此,长期食用有利于改善人体营养吸收结构,降低体内脂肪,塑造健康优美体态。',
'sale_price': '¥156元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/2_P_1448945810202.jpg',
'goods/images/2_P_1448945810814.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'海鲜水产'
],
'market_price': '¥106元',
'name': '田然牛肉大黄瓜条生鲜牛肉冷冻真空黄牛',
'desc': '前腿+后腿+羊排共8斤,原生态大山放牧羊羔,曾经的皇室贡品,央视推荐,2005年北京招待全球财金首脑。五层专用包装箱+真空包装+冰袋+保鲜箱+顺丰冷链发货,路途保质期8天',
'sale_price': '¥88元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/7_P_1448945104883.jpg',
'goods/images/7_P_1448945104734.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'叶菜类'
],
'market_price': '¥286元',
'name': '酣畅家庭菲力牛排10片澳洲生鲜牛肉团购套餐',
'desc': None,
'sale_price': '¥238元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/47_P_1448946213263.jpg',
'goods/images/47_P_1448946213157.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'根茎类'
],
'market_price': '¥156元',
'name': '日本蒜蓉粉丝扇贝270克6只装',
'desc': None,
'sale_price': '¥108元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/10_P_1448944572085.jpg',
'goods/images/10_P_1448944572532.jpg',
'goods/images/10_P_1448944572872.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'海鲜水产'
],
'market_price': '¥106元',
'name': '内蒙新鲜牛肉1斤清真生鲜牛肉火锅食材',
'desc': None,
'sale_price': '¥88元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/4_P_1448945381985.jpg',
'goods/images/4_P_1448945381013.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'茄果类'
],
'market_price': '¥90元',
'name': '乌拉圭进口牛肉卷特级肥牛卷',
'desc': None,
'sale_price': '¥75元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/8_P_1448945032810.jpg',
'goods/images/8_P_1448945032646.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'进口生鲜'
],
'market_price': '¥150元',
'name': '五星眼肉牛排套餐8片装原味原切生鲜牛肉',
'desc': None,
'sale_price': '¥125元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/11_P_1448944388277.jpg',
'goods/images/11_P_1448944388034.jpg',
'goods/images/11_P_1448944388201.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'海鲜水产'
],
'market_price': '¥31元',
'name': '澳洲进口120天谷饲牛仔骨4份原味生鲜',
'desc': None,
'sale_price': '¥26元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/6_P_1448945167279.jpg',
'goods/images/6_P_1448945167015.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'菌菇类'
],
'market_price': '¥239元',
'name': '潮香村澳洲进口牛排家庭团购套餐20片',
'desc': None,
'sale_price': '¥199元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/9_P_1448944791617.jpg',
'goods/images/9_P_1448944791129.jpg',
'goods/images/9_P_1448944791077.jpg',
'goods/images/9_P_1448944791229.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'根茎类'
],
'market_price': '¥202元',
'name': '爱食派内蒙古呼伦贝尔冷冻生鲜牛腱子肉1000g',
'desc': None,
'sale_price': '¥168元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/3_P_1448945490837.jpg',
'goods/images/3_P_1448945490084.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'精品肉类'
],
'market_price': '¥306元',
'name': '澳洲进口牛尾巴300g新鲜肥牛肉',
'desc': '新鲜羊羔肉整只共15斤,原生态大山放牧羊羔,曾经的皇室贡品,央视推荐,2005年北京招待全球财金首脑。五层专用包装箱+真空包装+冰袋+保鲜箱+顺丰冷链发货,路途保质期8天',
'sale_price': '¥255元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/48_P_1448943988970.jpg',
'goods/images/48_P_1448943988898.jpg',
'goods/images/48_P_1448943988439.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'海鲜水产'
],
'market_price': '¥126元',
'name': '新疆巴尔鲁克生鲜牛排眼肉牛扒1200g',
'desc': None,
'sale_price': '¥88元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/5_P_1448945270390.jpg',
'goods/images/5_P_1448945270067.jpg',
'goods/images/5_P_1448945270432.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'蛋制品'
],
'market_price': '¥144元',
'name': '澳洲进口安格斯牛切片上脑牛排1000g',
'desc': '澳大利亚是国际公认的没有疯牛病和口蹄疫的国家。为了保持澳大利亚产品的高标准,澳大利亚牛肉业和各级政府共同努力简历了严格的标准和体系,以保证生产的整体化和产品的可追溯性',
'sale_price': '¥120元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'images/201705/goods_img/53_P_1495068879687.jpg'
],
'categorys': [
'首页',
'生鲜食品',
'茄果类'
],
'market_price': '¥120元',
'name': '帐篷出租',
'desc': None,
'sale_price': '¥100元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/16_P_1448947194687.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'红酒'
],
'market_price': '¥23元',
'name': '52度茅台集团国隆双喜酒500mlx6',
'desc': '贵州茅台酒厂(集团)保健酒业有限公司生产,是以“龙”字打头的酒水。中国龙文化上下8000年,源远而流长,龙的形象是一种符号、一种意绪、一种血肉相联的情感。',
'sale_price': '¥19元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/14_P_1448947354031.jpg',
'goods/images/14_P_1448947354433.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'饮料/水'
],
'market_price': '¥43元',
'name': '52度水井坊臻酿八號500ml',
'desc': None,
'sale_price': '¥36元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/12_P_1448947547989.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'其他酒品'
],
'market_price': '¥190元',
'name': '53度茅台仁酒500ml',
'desc': None,
'sale_price': '¥158元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/46_P_1448946598711.jpg',
'goods/images/46_P_1448946598301.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'葡萄酒'
],
'market_price': '¥38元',
'name': '双响炮洋酒JimBeamwhiskey美国白占边',
'desc': None,
'sale_price': '¥28元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/21_P_1448946793276.jpg',
'goods/images/21_P_1448946793153.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'饮料/水'
],
'market_price': '¥55元',
'name': '西夫拉姆进口洋酒小酒版',
'desc': None,
'sale_price': '¥46元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/15_P_1448947257324.jpg',
'goods/images/15_P_1448947257580.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'洋酒'
],
'market_price': '¥22元',
'name': '茅台53度飞天茅台500ml',
'desc': None,
'sale_price': '¥18元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/13_P_1448947460386.jpg',
'goods/images/13_P_1448947460276.jpg',
'goods/images/13_P_1448947460353.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'葡萄酒'
],
'market_price': '¥42元',
'name': '52度兰陵·紫气东来1600mL山东名酒',
'desc': None,
'sale_price': '¥35元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/50_P_1448946543091.jpg',
'goods/images/50_P_1448946542182.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'饮料/水'
],
'market_price': '¥24元',
'name': 'JohnnieWalker尊尼获加黑牌威士忌',
'desc': None,
'sale_price': '¥20元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/51_P_1448946466595.jpg',
'goods/images/51_P_1448946466208.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'洋酒'
],
'market_price': '¥31元',
'name': '人头马CLUB特优香槟干邑350ml',
'desc': None,
'sale_price': '¥26元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/17_P_1448947102246.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'啤酒'
],
'market_price': '¥54元',
'name': '张裕干红葡萄酒750ml*6支',
'desc': None,
'sale_price': '¥45元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/20_P_1448946850602.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'葡萄酒'
],
'market_price': '¥46元',
'name': '原瓶原装进口洋酒烈酒法国云鹿XO白兰地',
'desc': None,
'sale_price': '¥38元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/19_P_1448946951581.jpg',
'goods/images/19_P_1448946951726.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'白酒'
],
'market_price': '¥82元',
'name': '法国原装进口圣贝克干红葡萄酒750ml',
'desc': None,
'sale_price': '¥68元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/18_P_1448947011435.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'白酒'
],
'market_price': '¥67元',
'name': '法国百利威干红葡萄酒AOP级6支装',
'desc': None,
'sale_price': '¥56元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/22_P_1448946729629.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'洋酒'
],
'market_price': '¥71元',
'name': '芝华士12年苏格兰威士忌700ml',
'desc': None,
'sale_price': '¥59元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/45_P_1448946661303.jpg'
],
'categorys': [
'首页',
'酒水饮料',
'饮料/水'
],
'market_price': '¥31元',
'name': '深蓝伏特加巴维兰利口酒送预调酒',
'desc': None,
'sale_price': '¥18元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/32_P_1448948525620.jpg'
],
'categorys': [
'首页',
'蔬菜水果',
'精选蔬菜'
],
'market_price': '¥43元',
'name': '赣南脐橙特级果10斤装',
'desc': None,
'sale_price': '¥36元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/30_P_1448948663450.jpg',
'goods/images/30_P_1448948662571.jpg',
'goods/images/30_P_1448948663221.jpg'
],
'categorys': [
'首页',
'蔬菜水果',
'进口水果'
],
'market_price': '¥11元',
'name': '泰国菠萝蜜16-18斤1个装',
'desc': '【懒人吃法】菠萝蜜果肉,冰袋保鲜,收货就吃,冰爽Q脆甜,2斤装,全国顺丰空运包邮,发出后48小时内可达,一线城市基本隔天可达',
'sale_price': '¥9元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/31_P_1448948598947.jpg',
'goods/images/31_P_1448948598475.jpg'
],
'categorys': [
'首页',
'蔬菜水果',
'国产水果'
],
'market_price': '¥22元',
'name': '四川双流草莓新鲜水果礼盒2盒',
'desc': None,
'sale_price': '¥18元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/35_P_1448948333610.jpg',
'goods/images/35_P_1448948333313.jpg'
],
'categorys': [
'首页',
'蔬菜水果',
'有机蔬菜'
],
'market_price': '¥67元',
'name': '新鲜头茬非洲冰草冰菜',
'desc': None,
'sale_price': '¥56元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/36_P_1448948234405.jpg',
'goods/images/36_P_1448948234250.jpg'
],
'categorys': [
'首页',
'蔬菜水果',
'有机蔬菜'
],
'market_price': '¥6元',
'name': '仿真蔬菜水果果蔬菜模型',
'desc': None,
'sale_price': '¥5元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/33_P_1448948479966.jpg',
'goods/images/33_P_1448948479886.jpg'
],
'categorys': [
'首页',
'蔬菜水果',
'精选蔬菜'
],
'market_price': '¥28元',
'name': '现摘芭乐番石榴台湾珍珠芭乐',
'desc': '''海南产精品释迦果,
释迦是水果中的贵族,
产量少,
味道很甜,
奶香十足,
非常可口,
果裹果园顺丰空运,
保证新鲜.果子个大,
一斤1-2个左右,
大个头的果子更尽兴!
''',
'sale_price': '¥23元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/34_P_1448948399009.jpg'
],
'categorys': [
'首页',
'蔬菜水果',
'国产水果'
],
'market_price': '¥46元',
'name': '潍坊萝卜5斤/箱礼盒',
'desc': '脐橙规格是65-90MM左右(标准果果径平均70MM左右,精品果果径平均80MM左右),一斤大概有2-4个左右,脐橙产自江西省赣州市信丰县安西镇,全过程都是采用农家有机肥种植,生态天然',
'sale_price': '¥38元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/43_P_1448948762645.jpg'
],
'categorys': [
'首页',
'休闲食品'
],
'market_price': '¥154元',
'name': '休闲零食膨化食品焦糖/奶油/椒麻味',
'desc': None,
'sale_price': '¥99元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/38_P_1448949220255.jpg'
],
'categorys': [
'首页',
'奶类食品',
'奶粉'
],
'market_price': '¥84元',
'name': '蒙牛未来星儿童成长牛奶骨力型190ml*15盒',
'desc': None,
'sale_price': '¥70元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/44_P_1448948850187.jpg'
],
'categorys': [
'首页',
'奶类食品',
'进口奶品'
],
'market_price': '¥70元',
'name': '蒙牛特仑苏有机奶250ml×12盒',
'desc': None,
'sale_price': '¥32元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'images/201511/goods_img/49_P_1448162819889.jpg'
],
'categorys': [
'首页',
'奶类食品'
],
'market_price': '¥1元',
'name': '1元支付测试商品',
'desc': None,
'sale_price': '¥1元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/40_P_1448949038702.jpg'
],
'categorys': [
'首页',
'奶类食品',
'进口奶品'
],
'market_price': '¥70元',
'name': '德运全脂新鲜纯牛奶1L*10盒装整箱',
'desc': None,
'sale_price': '¥58元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/39_P_1448949115481.jpg'
],
'categorys': [
'首页',
'奶类食品',
'有机奶'
],
'market_price': '¥38元',
'name': '木糖醇红枣早餐奶即食豆奶粉538g',
'desc': None,
'sale_price': '¥32元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/41_P_1448948980358.jpg'
],
'categorys': [
'首页',
'奶类食品',
'原料奶'
],
'market_price': '¥26元',
'name': '高钙液体奶200ml*24盒',
'desc': None,
'sale_price': '¥22元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/37_P_1448949284365.jpg'
],
'categorys': [
'首页',
'奶类食品',
'国产奶品'
],
'market_price': '¥720元',
'name': '新西兰进口全脂奶粉900g',
'desc': None,
'sale_price': '¥600元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'images': [
'goods/images/42_P_1448948895193.jpg'
],
'categorys': [
'首页',
'奶类食品',
'进口奶品'
],
'market_price': '¥43元',
'name': '伊利官方直营全脂营养舒化奶250ml*12盒*2提',
'desc': None,
'sale_price': '¥36元',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'sale_price': '¥156元',
'images': [
'goods/images/27_P_1448947771805.jpg'
],
'market_price': '¥187元',
'categorys': [
'首页',
'粮油副食',
'厨房调料'
],
'desc': None,
'name': '维纳斯橄榄菜籽油5L/桶',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'sale_price': '¥15元',
'images': [
'goods/images/23_P_1448948070348.jpg'
],
'market_price': '¥18元',
'categorys': [
'首页',
'粮油副食',
'食用油'
],
'desc': None,
'name': '糙米450gx3包粮油米面',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'sale_price': '¥45元',
'images': [
'goods/images/26_P_1448947825754.jpg'
],
'market_price': '¥54元',
'categorys': [
'首页',
'粮油副食',
'调味品'
],
'desc': None,
'name': '精炼一级大豆油5L色拉油粮油食用油',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'sale_price': '¥26元',
'images': [
'goods/images/28_P_1448947699948.jpg',
'goods/images/28_P_1448947699777.jpg'
],
'market_price': '¥31元',
'categorys': [
'首页',
'粮油副食',
'南北干货'
],
'desc': None,
'name': '橄榄玉米油5L*2桶',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'sale_price': '¥9元',
'images': [
'goods/images/24_P_1448948023823.jpg',
'goods/images/24_P_1448948023977.jpg'
],
'market_price': '¥11元',
'categorys': [
'首页',
'粮油副食',
'方便速食'
],
'desc': None,
'name': '山西黑米农家黑米4斤',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'sale_price': '¥12元',
'images': [
'goods/images/25_P_1448947875346.jpg'
],
'market_price': '¥14元',
'categorys': [
'首页',
'粮油副食',
'米面杂粮'
],
'desc': None,
'name': '稻园牌稻米油粮油米糠油绿色植物油',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
},
{
'sale_price': '¥12元',
'images': [
'goods/images/29_P_1448947631994.jpg'
],
'market_price': '¥14元',
'categorys': [
'首页',
'粮油副食',
'食用油'
],
'desc': None,
'name': '融氏纯玉米胚芽油5l桶',
'goods_desc':'<p><img src="/media/goods/images/2_20170719161405_249.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161414_628.jpg" title="" alt="2.jpg"/></p><p><img src="/media/goods/images/2_20170719161435_381.jpg" title="" alt="2.jpg"/></p>'
}
]
pass | StarcoderdataPython |
110333 | <reponame>rkingsbury/mdgo
# coding: utf-8
# Copyright (c) <NAME>.
# Distributed under the terms of the MIT License.
"""
This module implements a core class PackmolWrapper for packing molecules
into a single box.
You need the Packmol package to run the code, see
http://m3g.iqm.unicamp.br/packmol or
http://leandro.iqm.unicamp.br/m3g/packmol/home.shtml
for download and setup instructions. You may need to manually
set the folder of the packmol executable to the PATH environment variable.
"""
import subprocess
import os
# import tempfile
from typing import Optional, List, Dict
# from subprocess import PIPE, Popen
__author__ = "<NAME>"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "Feb 9, 2021"
class PackmolWrapper:
"""
Wrapper for the Packmol software that can be used to pack various types of
molecules into a one single unit.
Examples:
>>> structures = [{"name": "structure_name", "file": "/path/to/xyz"}]
>>> pw = PackmolWrapper("/path/to/work/dir",
... structures,
... {"structure_name": 2},
... [0., 0., 0., 10., 10., 10.]
... )
>>> pw.make_packmol_input()
>>> pw.run_packmol()
"""
def __init__(
self,
path: str,
structures: List[dict],
numbers: Dict[str, int],
box: List[float],
tolerance: Optional[float] = 2.0,
seed: Optional[int] = 1,
inputfile: str = "packmol.inp",
outputfile: str = "output.xyz",
):
"""
Args:
path: The path to the directory for file i/o. Note that the path
cannot contain any spaces.
structures: A list of dict containing information about molecules.
Each dict requires two keys, "name", the structure name,
and "file", the path to the structure file, e.g.
{"name": "water",
"file": "/path/to/water.xyz"}
numbers: A dict of the numbers of each molecule. Each dict must have
keys corresponding to 'name' keys in 'structures', and integer
values representing the number of that molecule to pack into the
box, e.g.
{"water": 20}
box: A list of xlo, ylo, zlo, xhi, yhi, zhi, in Å.
tolerance: Tolerance for packmol.
seed: Random seed for packmol.
inputfile: Path to the input file. Default to 'packmol.inp'.
outputfile: Path to the output file. Default to 'output.xyz'.
"""
self.path = path
self.input = os.path.join(self.path, inputfile)
self.output = os.path.join(self.path, outputfile)
self.screen = os.path.join(self.path, "packmol.stdout")
self.structures = structures
self.numbers = numbers
self.box = box
self.tolerance = tolerance
self.seed = seed
def run_packmol(self):
"""Run packmol and write out the packed structure."""
try:
p = subprocess.run(
"packmol < '{}'".format(self.input),
check=True,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
except subprocess.CalledProcessError as e:
raise ValueError("Packmol failed with errorcode {} and stderr: {}".format(e.returncode, e.stderr)) from e
else:
with open(self.screen, "w") as out:
out.write(p.stdout.decode())
def make_packmol_input(self):
"""Make a Packmol usable input file."""
with open(self.input, "w") as out:
out.write(
"# "
+ " + ".join(
str(self.numbers[structure["name"]]) + " " + structure["name"] for structure in self.structures
)
+ "\n"
)
out.write("# Packmol input generated by mdgo.\n")
out.write("seed {}\n".format(self.seed))
out.write("tolerance {}\n\n".format(self.tolerance))
out.write("filetype xyz\n\n")
out.write("output {}\n\n".format(self.output))
for structure in self.structures:
out.write("structure {}\n".format(structure["file"]))
out.write(" number {}\n".format(str(self.numbers[structure["name"]])))
out.write(" inside box {}\n".format(" ".join(str(i) for i in self.box)))
out.write("end structure\n\n")
if __name__ == "__main__":
"""
structures = [{"name": "EMC",
"file": "/Users/th/Downloads/test_selenium/EMC.lmp.xyz"}]
pw = PackmolWrapper("/Users/th/Downloads/test_selenium/", structures,
{"EMC": '2'}, [0., 0., 0., 10., 10., 10.])
pw.make_packmol_input()
pw.run_packmol()
"""
| StarcoderdataPython |
159152 | import logging
import json
import re
import urllib.parse
from urllib.request import urlopen
FORMAT = '[%(levelname)s] (%(threadName)-9s) %(message)s'
logging.basicConfig(format=FORMAT)
# Decoders for API Output - TODO: Proper error handling
def _decode_json(s):
try:
if s == '':
logging.info('json decode a <null> string, return None')
return None
return json.loads(s)
except:
logging.error('Exception raised [unknown]', exc_info=True)
return None
def _decode_lines(s, linefunc):
try:
if s == '':
logging.info('json decode a <null> string, return None')
return []
lines = s.strip().split('\n')
result = []
for line in lines:
result.append(linefunc(line))
return result
except:
logging.error('Exception raised [unknown]', exc_info=True)
return None
def _decode_cercanumerotrenotrenoautocomplete(s):
def linefunc(line):
r = re.search('^(\d+)\s-\s(.+)\|(\d+)-(.+)$', line)
if r is not None:
return r.group(2, 4)
return _decode_lines(s, linefunc)
def _decode_autocompletastazione(s):
return _decode_lines(s, lambda line: tuple(line.strip().split('|')))
class Viaggiatrenonew:
def __init__(self, **options):
self._urlbase = 'http://www.viaggiatreno.it/viaggiatrenonew/resteasy/viaggiatreno/'
self.__verbose = options.get('verbose', False)
self.__urlopen = options.get('urlopen', urlopen)
self.__plainoutput = options.get('plainoutput', False)
self.__decoders = {
'andamentoTreno': _decode_json,
'cercaStazione': _decode_json,
'tratteCanvas': _decode_json,
'dettaglioStazione': _decode_json,
'regione': _decode_json,
'arrivi': _decode_json,
'partenze': _decode_json,
'soluzioniViaggioNew': _decode_json,
'cercaNumeroTrenoTrenoAutocomplete': _decode_cercanumerotrenotrenoautocomplete,
'autocompletaStazione': _decode_autocompletastazione
}
self.__default_decoder = lambda x: x
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
@property
def urlbase(self):
return self._urlbase
def __checkanddecode(self, function, data):
decoder = self.__decoders.get(function, self.__default_decoder)
return decoder(data)
def call(self, function, *params, **options):
plain = options.get('plainoutput', self.__plainoutput)
verbose = options.get('verbose', self.__verbose)
try:
queryparam = '/'.join(urllib.parse.quote(str(p), safe='') for p in params)
url = self._urlbase + function + '/' + queryparam
self.logger.debug(url)
req = self.__urlopen(url)
data = req.read().decode('utf-8')
if plain:
return data
else:
return self.__checkanddecode(function, data)
except:
self.logger.error('Exception raised [unknown]', exc_info=True)
return None
| StarcoderdataPython |
70280 | <reponame>Leonardo-YXH/DevilYuan
import ssl
import random
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.poolmanager import PoolManager
from .DyTrader import *
class Ssl3HttpAdapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=ssl.PROTOCOL_TLSv1)
class WebTrader(DyTrader):
"""
券商Web交易接口基类
"""
name = 'Web'
heartBeatTimer = 60
pollingCurEntrustTimer = 1
maxRetryNbr = 3 # 最大重试次数
def __init__(self, eventEngine, info, configFile=None, accountConfigFile=None):
super().__init__(eventEngine, info, configFile, accountConfigFile)
self._httpAdapter = None
def _preLogin(self):
# 开始一个会话
self._session = requests.session()
if self._httpAdapter is not None:
self._session.mount('https://', self._httpAdapter())
# session headers
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'
}
self._session.headers.update(headers)
def _postLogout(self):
self._session.close()
| StarcoderdataPython |
78481 | <filename>deepgenmodels/autoregressive/nade_test.py
#!/usr/bin/env python3
# External dependencies.
import torch
import torch.optim as optim
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
# Internal dependencies.
from nade import NADE
# Sample classification with artificial 3D data.
if __name__ == "__main__":
# Seed for reproducibility.
torch.manual_seed(0)
# Global properties of the data and model.
num_classes = 4
inp_dimensions = 10
num_samples_per_class = 50
num_training_iterations = 5000
# Class labels.
classes = torch.cat([
torch.full((num_samples_per_class,), 0),
torch.full((num_samples_per_class,), 1),
torch.full((num_samples_per_class,), 2),
torch.full((num_samples_per_class,), 3)], dim=0)
# Define datapoints for each class.
inps = torch.stack([
torch.randn(num_samples_per_class, inp_dimensions)/10 + torch.tensor([1, 0, 1, 1, 0, 0, 1, 0, 0, 1]),
torch.randn(num_samples_per_class, inp_dimensions)/10 + torch.tensor([0, 0, 1, 0, 0, 1, 0, 1, 0, 1]),
torch.randn(num_samples_per_class, inp_dimensions)/10 + torch.tensor([1, 1, 0, 1, 1, 0, 0, 1, 0, 0]),
torch.randn(num_samples_per_class, inp_dimensions)/10 + torch.tensor([0, 1, 1, 1, 0, 1, 1, 0, 1, 1])], dim=0)
# Define one model per class.
models = [NADE(inp_dimensions, inp_dimensions//2) for _ in range(num_classes)]
# Train each model one by one.
for inp, model in zip(inps, models):
# Optimization scheme.
optimizer = optim.SGD(model.parameters(), lr=0.01)
for _ in range(num_training_iterations):
# Zero out previous gradients.
model.zero_grad()
# Compute log-likehoods per sample.
log_likelihoods = model(inp)
# Negative mean over all samples, because we're minimizing with SGD instead of maximizing.
negative_mean_log_likehoods = -torch.mean(log_likelihoods)
# print('NLL = %0.4f' % negative_mean_log_likehoods.item())
# Compute gradients.
negative_mean_log_likehoods.backward()
# Update weights.
optimizer.step()
# Label datapoints by the model that gave it the highest likelihood.
inps_flattened = torch.flatten(inps, start_dim=0, end_dim=1)
predicted_log_likelihoods = torch.stack([model(inps_flattened) for model in models], dim=1)
predicted_classes = torch.argmax(predicted_log_likelihoods, dim=1)
# Scatterplots of classification.
fig, axs = plt.subplots(ncols=2, figsize=(10, 5), subplot_kw=dict(projection='3d'))
axs[0].set_xlabel('x')
axs[0].set_ylabel('y')
axs[0].set_zlabel('z')
axs[0].scatter(inps_flattened[:, 0], inps_flattened[:, 1], inps_flattened[:, 2], c=classes.numpy(), cmap='Set1')
axs[0].set_title('True Labels', y=1.05)
axs[1].set_xlabel('x')
axs[1].set_ylabel('y')
axs[1].set_zlabel('z')
axs[1].scatter(inps_flattened[:, 0], inps_flattened[:, 1], inps_flattened[:, 2], c=predicted_classes.numpy(), cmap='Set1')
axs[1].set_title('NADE Predicted Labels', y=1.05)
plt.tight_layout()
plt.show()
# Scatterplots of generated samples.
samples = torch.cat([model.sample(num_samples_per_class) for model in models], dim=0)
fig, axs = plt.subplots(ncols=1, figsize=(5, 5), subplot_kw=dict(projection='3d'))
axs.set_xlabel('x')
axs.set_ylabel('y')
axs.set_zlabel('z')
axs.scatter(samples[:, 0], samples[:, 1], samples[:, 2], c=classes.numpy(), cmap='Set1')
axs.set_title('Samples', y=1.05)
plt.tight_layout()
plt.show()
| StarcoderdataPython |
1691897 | ## Code for calling the owner of this PiPatrol. This part integrates with an IFTTT Maker Event applet.
import webbrowser, sys, os
# define trigger URL
url = <INSERT_URL_GIVEN_BY_IFTTT_HERE>
# path to the web browser
chrome_path = '/usr/lib/chromium-browser/chromium-browser'
# command to open URL in browser
webbrowser.get(chrome_path).open(url)
| StarcoderdataPython |
1791829 | <gh_stars>0
import pandas as pd
def get_base_df():
tuples = [
('cobra', 'mark i'), ('cobra', 'mark ii'),
('sidewinder', 'mark i'), ('sidewinder', 'mark ii'),
('viper', 'mark ii'), ('viper', 'mark iii')
]
index = pd.MultiIndex.from_tuples(tuples)
values = [[12, 2], [0, 4], [10, 20], [1, 4], [7, 1], [16, 36]]
df = pd.DataFrame(values, columns=['max_speed', 'shield'], index=index)
return df
def show_change_demo():
df = get_base_df()
# df.loc[:, 'shield'] = 'haha'
# print(df)
df.loc[:, 'shield'] = df.loc[:, 'shield'] * 100
print(df)
if __name__ == "__main__":
show_change_demo()
pass
| StarcoderdataPython |
1782179 | #!/usr/bin/python2
'''
This is the assembler I wrote for the bored assembly
'''
import sys
import struct
if len(sys.argv) < 3:
print "usage {0} inFile.bd out [-v]".format(sys.argv[0])
exit(-1)
csm_file = open(sys.argv[1])
code_file = open(sys.argv[2], "wb")
def pack(a):
return struct.pack("I", a)
def both_reg(op1, op2):
return op1 in ["r0","r1","r2","r3"] and op2 in ["r0","r1","r2","r3"]
def is_reg(op):
return op in ["r0","r1","r2","r3"]
def get_reg(op):
return ["r0","r1","r2","r3"].index(op)
def is_mem(op):
return op[0] == "[" and op[-1] == "]"
def get_mem(op):
return get_val(op[1:-1])
def is_val(op):
return not is_mem(op) and not is_reg(op)
def get_val(op):
return int(op)
def write(vals):
global code_file
written = 0
for v in vals:
code_file.write(pack(v))
written += 1
while written < 4:
code_file.write(pack(0x0))
written += 1
for l in csm_file.readlines():
cur = l.strip().split(" ")
if "-v" in sys.argv:
print cur
#utility
if cur[0][0] == ";":
pass
elif cur[0] == "exit":
write([0x00])
elif cur[0] == "nop":
write([0x90])
elif cur[0] == "cpu":
write([0x91])
elif cur[0] == "mem":
write([0x92])
#mov
elif cur[0] == "mov":
if both_reg(cur[1], cur[2]):
write([0x1, get_reg(cur[1]), get_reg(cur[2])])
elif is_reg(cur[1]) and is_val(cur[2]):
write([0x2, get_reg(cur[1]), get_val(cur[2])])
elif is_mem(cur[1]) and is_reg(cur[2]):
write([0x3, get_mem(cur[1]), get_reg(cur[2])])
elif is_mem(cur[1]) and is_val(cur[2]):
write([0x4, get_mem(cur[1]), get_val(cur[2])])
elif is_reg(cur[1]) and is_mem(cur[2]):
write([0x5, get_reg(cur[1]), get_mem(cur[2])])
#cmp
elif cur[0] == "cmp":
if both_reg(cur[1], cur[2]):
write([0x10, get_reg(cur[1]), get_reg(cur[2])])
#math
elif cur[0] == "add":
if both_reg(cur[1], cur[2]):
write([0x20, get_reg(cur[1]), get_reg(cur[2])])
elif is_reg(cur[1]) and is_val(cur[2]):
write([0x21, get_reg(cur[1]), get_val(cur[2])])
elif is_reg(cur[1]) and is_mem(cur[2]):
write([0x22, get_reg(cur[1]), get_mem(cur[2])])
elif is_mem(cur[1]) and is_reg(cur[2]):
write([0x23, get_mem(cur[1]), get_reg(cur[2])])
elif is_mem(cur[1]) and is_val(cur[2]):
write([0x24, get_mem(cur[1]), get_val(cur[2])])
elif is_mem(cur[1]) and is_mem(cur[2]):
write([0x2a, get_mem(cur[1]), get_mem(cur[2])])
elif cur[0] == "sub":
if both_reg(cur[1], cur[2]):
write([0x25, get_reg(cur[1]), get_reg(cur[2])])
elif is_reg(cur[1]) and is_val(cur[2]):
write([0x26, get_reg(cur[1]), get_val(cur[2])])
elif is_reg(cur[1]) and is_mem(cur[2]):
write([0x27, get_reg(cur[1]), get_mem(cur[2])])
elif is_mem(cur[1]) and is_reg(cur[2]):
write([0x28, get_mem(cur[1]), get_reg(cur[2])])
elif is_mem(cur[1]) and is_val(cur[2]):
write([0x29, get_mem(cur[1]), get_val(cur[2])])
elif is_mem(cur[1]) and is_mem(cur[2]):
write([0x2b, get_mem(cur[1]), get_mem(cur[2])])
#jmp
elif cur[0] == "jmp":
write([0x40, 8*get_val(cur[1])])
elif cur[0] == "je":
write([0x41, 8*get_val(cur[1])])
#interrupt
elif cur[0] == "int":
write([0x100, get_val(cur[1])])
#stop on unknown instruction
else:
print "Instruction not understood"
print cur
csm_file.close()
code_file.close()
exit(-2)
csm_file.close()
code_file.close()
| StarcoderdataPython |
65920 | from utils._context.library_version import LibraryVersion, Version
from utils import context
context.execute_warmups = lambda *args, **kwargs: None
def test_version_comparizon():
v = Version("1.0", "some_component")
assert v == "1.0"
assert v != "1.1"
assert v <= "1.1"
assert v <= "1.0"
assert "1.1" >= v
assert "1.0" >= v
assert v < "1.1"
assert "1.1" > v
assert v >= "0.9"
assert v >= "1.0"
assert "0.9" <= v
assert "1.0" <= v
assert v > "0.9"
assert "0.9" < v
assert Version("1.31.1", "") < "v1.34.1"
assert "1.31.1" < Version("v1.34.1", "")
assert Version("1.31.1", "") < Version("v1.34.1", "")
assert Version(" * ddtrace (1.0.0.beta1)", "ruby") == Version("1.0.0.beta1", "ruby")
assert Version(" * ddtrace (1.0.0.beta1)", "ruby")
assert Version(" * ddtrace (1.0.0.beta1)", "ruby") < Version(" * ddtrace (1.0.0.beta1 de82857)", "ruby")
assert Version(" * ddtrace (1.0.0.beta1 de82857)", "ruby") < Version("1.0.0", "ruby")
assert Version("1.0.0beta1", "ruby") < Version("1.0.0beta1+8a50f1f", "ruby")
assert Version("1.1.0rc2.dev15+gc41d325d", "python") >= "1.1.0rc2.dev"
assert Version("1.1.0", "python") >= "1.1.0rc2.dev"
def test_version_serialization():
assert Version("v1.3.1", "cpp") == "1.3.1"
assert str(Version("v1.3.1", "cpp")) == "1.3.1"
v = Version("0.53.0.dev70+g494e6dc0", "some comp")
assert v == "0.53.0.dev70+g494e6dc0"
assert str(v) == "0.53.0.dev70+g494e6dc0"
v = Version(" * ddtrace (0.53.0.appsec.180045)", "ruby")
assert v == Version("0.53.0appsec.180045", "ruby")
assert v == "0.53.0appsec.180045"
v = Version(" * ddtrace (1.0.0.beta1)", "ruby")
assert v == Version("1.0.0beta1", "ruby")
v = Version(" * ddtrace (1.0.0.beta1 de82857)", "ruby")
assert v == Version("1.0.0beta1+de82857", "ruby")
v = Version("* libddwaf (1.0.14.1.0.beta1)", "libddwaf")
assert v == Version("1.0.14.1.0.beta1", "libddwaf")
assert v == "1.0.14.1.0.beta1"
v = Version("Agent 7.33.0 - Commit: e6cfcb9 - Serialization version: v5.0.4 - Go version: go1.16.7", "agent")
assert v == "7.33.0"
v = Version("1.0.0-nightly", "php")
assert v == "1.0.0"
v = Version("3.0.0pre0", "nodejs")
assert v == "3.0.0pre0"
def test_library_version():
v = LibraryVersion("p")
assert v == "p"
assert v != "u"
v = LibraryVersion("p", "1.0")
assert v == "p@1.0"
assert v == "p"
assert v != "p@1.1"
assert v != "u"
assert v <= "p@1.1"
assert v <= "p@1.0"
assert "p@1.1" >= v
assert "p@1.0" >= v
assert v < "p@1.1"
assert "p@1.1" > v
assert v >= "p@0.9"
assert v >= "p@1.0"
assert "p@0.9" <= v
assert "p@1.0" <= v
assert v > "p@0.9"
assert "p@0.9" < v
assert (v <= "u@1.0") is False
assert (v >= "u@1.0") is False
assert ("u@1.0" <= v) is False
assert ("u@1.0" >= v) is False
v = LibraryVersion("p")
assert ("u@1.0" == v) is False
assert ("u@1.0" <= v) is False
v = LibraryVersion("python", "0.53.0.dev70+g494e6dc0")
assert v == "python@0.53.0.dev70+g494e6dc0"
v = LibraryVersion("java", "0.94.1~dde6877139")
assert v == "java@0.94.1"
assert v >= "java@0.94.1"
assert v < "java@0.94.2"
v = LibraryVersion("java", "0.94.0-SNAPSHOT~57664cfbe5")
assert v == "java@0.94.0"
assert v >= "java@0.94.0"
assert v < "java@0.94.1"
| StarcoderdataPython |
190767 | <gh_stars>1-10
class APP:
APPLICATION = "denon-commander"
AUTHOR = "<NAME>"
VERSION = "V1.0"
class CONNECTION:
# I recommend to set static IP address on device
IP = "192.168.1.150"
class DEFAULT:
# Default volume from -80 to 18
VOLUME = "-40"
# Default input
INPUT = "GAME"
# Default sound mode
SOUND_MODE = "MCH STEREO"
| StarcoderdataPython |
3357475 | <reponame>CPT-Jack-A-Castle/metalk8s
"""Expose a really crude mock of K8s API for use in rendering tests."""
import collections
import re
from typing import Any, Dict, Iterator, List, Optional
import pytest
APIVersion = str
Kind = str
ItemList = List[Any]
K8sData = Dict[APIVersion, Dict[Kind, ItemList]]
# pylint: disable=too-few-public-methods
class KubernetesMock:
"""Simple object for mocking basic API calls on an in-memory K8s dataset."""
Matcher = collections.namedtuple("Matcher", ("key", "op", "value"))
def __init__(self, data: K8sData):
self.data = data
@staticmethod
def _apply_matchers(objects: ItemList, matchers: List[Matcher]) -> Iterator[Any]:
def _filter(item: Any) -> bool:
matches = True
for matcher in matchers:
val = item
for key in matcher.key:
val = val[key]
if matcher.op == "=":
matches &= val == matcher.value
elif matcher.op == "!=":
matches &= val != matcher.value
return matches
return filter(_filter, objects)
def _get_item_list(self, api_version: APIVersion, kind: Kind) -> ItemList:
try:
return self.data[api_version][kind]
except KeyError:
pytest.fail(f"No data in Kubernetes mock for '{api_version}/{kind}'")
def get(
self,
api_version: APIVersion,
kind: Kind,
name: str,
namespace: Optional[str] = None,
) -> Optional[Any]:
"""Retrieve an object from the data store."""
items = self._get_item_list(api_version, kind)
matchers = [self.Matcher(["metadata", "name"], "=", name)]
if namespace is not None:
matchers.append(self.Matcher(["metadata", "namespace"], "=", namespace))
return next(self._apply_matchers(items, matchers), None)
def list(
self,
api_version: APIVersion,
kind: Kind,
namespace: Optional[str] = None,
label_selector: Optional[str] = None,
) -> List[Any]:
"""Retrieve a list of objects from the data store."""
items = self._get_item_list(api_version, kind)
matchers = []
if namespace is not None:
matchers.append(self.Matcher(["metadata", "namespace"], "=", namespace))
if label_selector is not None:
for match_expr in label_selector.split(","):
match = re.match(
r"^(?P<key>.*[^!])(?P<op>!=|=)(?P<value>.+)$", match_expr
)
assert (
match is not None
), f"Invalid label selector expression: {match_expr}"
matchers.append(
self.Matcher(
key=["metadata", "labels", match.group("key")],
op=match.group("op"),
value=match.group("value"),
)
)
return list(self._apply_matchers(items, matchers))
| StarcoderdataPython |
3270545 | import os, glob, cv2
import torch
import random
import linecache
import numpy as np
from torch.utils.data import Dataset
from PIL import Image
class MAKEUP(Dataset):
def __init__(self, image_path, transform, mode, transform_mask, cls_list):
self.image_path = image_path
self.transform = transform
self.mode = mode
self.transform_mask = transform_mask
self.A_seg = glob.glob(os.path.join(image_path, 'segs', 'non-makeup', '*.png'))
self.As = [os.path.join(image_path, 'images', 'non-makeup', x.split('/')[-1]) for x in self.A_seg]
self.B_seg = glob.glob(os.path.join(image_path, 'segs', 'makeup', '*.png'))
self.Bs = [os.path.join(image_path, 'images', 'makeup', x.split('/')[-1]) for x in self.B_seg]
self.noiA = len(self.As)
self.noiB = len(self.Bs)
print(self.noiA, self.noiB)
def __getitem__(self, index):
if self.mode == 'train':
idxA = random.choice(range(self.noiA))
idxB = random.choice(range(self.noiB))
mask_A = Image.open(self.A_seg[idxA]).convert("RGB")
mask_B = Image.open(self.B_seg[idxB]).convert("RGB")
image_A = Image.open(self.As[idxA]).convert("RGB")
image_B = Image.open(self.Bs[idxB]).convert("RGB")
image_A = Image.fromarray(cv2.resize(np.array(image_A), (256, 256)))
image_B = Image.fromarray(cv2.resize(np.array(image_B), (256, 256)))
return self.transform(image_A), self.transform(image_B), self.transform_mask(mask_A), self.transform_mask(mask_B)
def __len__(self):
if self.mode == 'train' or self.mode == 'train_finetune':
num_A = len(self.As)
num_B = len(self.Bs)
return max(num_A, num_B)
elif self.mode in ['test', "test_baseline", 'test_all']:
num_A = len(self.As)
num_B = len(self.Bs)
return num_A * num_B | StarcoderdataPython |
1770307 | class Restaurant:
"""餐馆"""
def __init__(self, restaurant_name, cuisine_type):
self.restaurant_name = restaurant_name
self.cuisine_type = cuisine_type
def describe_restaurant(self):
print('餐馆名称:' + self.restaurant_name)
print('餐品名称:' + self.cuisine_type)
def open_restaurant(self):
print('正在营业')
res = Restaurant('中餐馆', '粤菜')
print(res.restaurant_name)
print(res.cuisine_type)
print(res.describe_restaurant())
| StarcoderdataPython |
3332668 | # -*- coding: utf-8 -*-
import sys
import random
import time
from PIL import Image
import argparse
import os.path
import pickle
import subprocess
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from ppadb.client import Client as AdbClient
import re
if sys.version_info.major != 3:
print('Please run under Python3')
exit(1)
try:
from common import debug, config, screenshot, UnicodeStreamFilter
from common.auto_adb import auto_adb
from common import apiutil
from common.compression import resize_image, calcPercentage
except Exception as ex:
print(ex)
print('Please put the script in the project root directory to run')
print('Please check in the project root directory common Does the folder exist')
exit(1)
VERSION = "0.0.1"
# 我申请的 Key,随便用,嘻嘻嘻
# 申请地址 http://ai.qq.com
AppID = '1106858595'
AppKey = '<KEY>'
DEBUG_SWITCH = True
FACE_PATH = 'face/'
adb = auto_adb()
#adb.test_device()
# 审美标准
BEAUTY_THRESHOLD = 80
# 最小年龄
GIRL_MIN_AGE = 14
def yes_or_no():
client = AdbClient(host="127.0.0.1", port=5037)
devices = client.devices()
i = 0
for device in devices:
print(str(i) + " : " + device.serial)
i = i + 1
while True:
if i == 0: exit(0)
yes_or_no = str(input('Choose devices 0 to {x1} n for exit:'.format(x1 = i-1) ))
if yes_or_no == 'n':
print('Thanks for using')
exit(0)
if 0 <= int(yes_or_no) <= i:
global device_id
device_id = devices[int(yes_or_no)].serial
adb.set_device(device_id)
size = adb.get_screen()
m = re.search(r'(\d+)x(\d+)', size)
global config
print("get device info....")
config = config.open_accordant_config(device_id)
if m:
config['screen_size']['x'] = int(m.group(1))
config['screen_size']['y'] = int(m.group(2))
break
else:
print('please enter again')
def _random_bias(num):
"""
random bias
:param num:
:return:
"""
return random.randint(-num, num)
def next_page():
"""
翻到下一页
:return:
"""
percentageX= config['screen_size']['x'] / 2
percentageY= config['screen_size']['y'] /10 * 6
percentageX1= config['screen_size']['x'] / 2
percentageY1= config['screen_size']['y'] /6
cmd = 'shell input swipe {x1} {y1} {x2} {y2} {duration}'.format(
x1=percentageX,
y1=percentageY,
x2=percentageX1,
y2=percentageY1,
duration=350
)
adb.run(cmd)
time.sleep(1.5)
def follow_user():
"""
关注用户
:return:
"""
cmd = 'shell input tap {x} {y}'.format(
x=config['follow_bottom']['x'] + _random_bias(10),
y=config['follow_bottom']['y'] + _random_bias(10)
)
adb.run(cmd)
time.sleep(0.5)
def thumbs_up():
"""
点赞
:return:
"""
#check white
percentageX= config['screen_size']['x'] / 100 * 93
percentageY= config['screen_size']['y'] / 100 * 50
cmd = 'shell input tap {x} {y}'.format(
x=percentageX + _random_bias(10),
y=percentageY + _random_bias(10)
)
adb.run(cmd)
time.sleep(0.5)
def tap(x, y):
cmd = 'shell input tap {x} {y}'.format(
x=x + _random_bias(10),
y=y + _random_bias(10)
)
adb.run(cmd)
def auto_reply():
msg = random.choice(msgList)
#adb -s 172.30.65.129:5555 shell input keyevent 279
# Click the comment button on the right
percentageX= config['screen_size']['x'] / 10 * 9
percentageY= config['screen_size']['y'] / 10 * 6
tap(percentageX,percentageY)
time.sleep(2)
#After the comment list pops up, click the input comment box
percentageX1= config['screen_size']['x'] / 10 * 5
percentageY1= config['screen_size']['y'] / 100 * 93
tap(percentageX1, percentageY1)
time.sleep(2)
#Enter the above msg content, pay attention to use ADB keyboard, otherwise it cannot be entered automatically, refer to: https://www.jianshu.com/p/2267adf15595
#cmd =' shell am broadcast -a ADB_INPUT_TEXT --es msg "你好嗎'
cmd = 'shell am broadcast -a ADB_INPUT_TEXT --es msg {text}'.format(text=msg)
#cmd = 'shell input text "{}"'.format(msg)
#cmd = 'shell input text "{}"'.format('Nice')
adb.run(cmd)
time.sleep(2)
# Click the send button
cmd = 'shell input keyevent 4'
adb.run(cmd)
time.sleep(2)
cmd = 'shell input keyevent 4'
adb.run(cmd)
time.sleep(2)
print("send msg")
percentageX2= config['screen_size']['x'] / 100 * 93
percentageY2= config['screen_size']['y'] / 100 * 94
tap(percentageX2, percentageY2)
time.sleep(2)
print("close popup") #1300, 340
cmd = 'shell input keyevent 4'
adb.run(cmd)
# Trigger the return button, keyevent 4 corresponds to the return key of the Android system, refer to the corresponding button operation of KEY: https://www.cnblogs.com/chengchengla1990/p/4515108.html
def parser():
ap = argparse.ArgumentParser()
ap.add_argument("-r", "--reply", action='store_true',
help="auto reply")
args = vars(ap.parse_args())
return args
def main():
"""
maintest_device
:return:
"""
print('Program version number : {}'.format(VERSION))
print('Activate the window and press CONTROL + C Key combination to exit')
# debug.dump_device_info()
screenshot.check_screenshot(device_id)
cmd_args = parser()
SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']
SAMPLE_SPREADSHEET_ID = '1RePp_f8FqGBEotcK0TscFo4L5lwPHBypsjzJnJJJLU8'
SAMPLE_RANGE_NAME = 'Sheet1!A2:A4'
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,
range=SAMPLE_RANGE_NAME).execute()
values = result.get('values', [])
if not values:
print('No data found.')
else:
global msgList
msgList = []
for row in values:
msgList.append(row[0])
# Print columns A and E, which correspond to indices 0 and 4.
print('%s' % (row[0]))
#msgList = getfrom google spreadsheet
while True:
next_page()
time.sleep(1)
screenshot.pull_screenshot(device_id)
resize_image('autojump.png', 'optimized.png', 1024*1024)
with open('optimized.png', 'rb') as bin_data:
image_data = bin_data.read()
ai_obj = apiutil.AiPlat(AppID, AppKey)
rsp = ai_obj.face_detectface(image_data, 0)
major_total = 0
minor_total = 0
if rsp['ret'] == 0:
beauty = 0
for face in rsp['data']['face_list']:
msg_log = '[INFO] gender: {gender} age: {age} expression: {expression} beauty: {beauty}'.format(
gender=face['gender'],
age=face['age'],
expression=face['expression'],
beauty=face['beauty'],
)
print(msg_log)
face_area = (face['x'], face['y'], face['x']+face['width'], face['y']+face['height'])
img = Image.open("optimized.png")
cropped_img = img.crop(face_area).convert('RGB')
cropped_img.save(FACE_PATH + face['face_id'] + '.png')
# 性别判断
if face['beauty'] > beauty and face['gender'] < 50:
beauty = face['beauty']
if face['age'] > GIRL_MIN_AGE:
major_total += 1
else:
minor_total += 1
# 是个美人儿~关注点赞走一波
if beauty > BEAUTY_THRESHOLD and major_total > minor_total:
print('Found a beautiful girl! ! !')
start_heart_x = config['screen_size']['x'] * 100 / 90
start_heart_y = config['screen_size']['y'] * 100 / 50
heart_area = (start_heart_x, start_heart_y, start_heart_x+10, start_heart_y+10)
origin_img = Image.open("autojump.png")
heart_img = origin_img.crop(heart_area)
ratio = detect_color('red',heart_img)
thumbs_up()
follow_user()
auto_reply()
#if cmd_args['reply']:
# auto_reply()
else:
print(rsp)
continue
if __name__ == '__main__':
try:
yes_or_no()
main()
except KeyboardInterrupt:
#adb.run('kill-server')
print('Thanks for using')
exit(0)
| StarcoderdataPython |
4839075 | <reponame>TestQA14/PythonIntroduction<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
loop = 1
choice = 0
def menu():
print "We have several operations : "
print "Option 1 - plus"
print "Option 2 - minus"
print "Option 3 - multiply"
print "Option 4 - divide"
print "Option 5 - exit"
return input("Please, select operation: ")
def add(num1, num2):
print num1, " + ", num2, " = ", num1 + num2
def minus(num1, num2):
print num1, " - ", num2, " = ", num1 - num2
def multiply(num1, num2):
print num1, " * ", num2, " = ", num1 * num2
def divide(num1, num2):
print num1, " / ", num2, " = ", num1 / num2
while loop == 1:
choice = menu()
if choice == 1:
add(input("The first number: "), input("The second number: "))
elif choice == 2:
minus(input("The first number: "), input("The second number: "))
elif choice == 3:
multiply(input("The first number: "), input("The second number: "))
elif choice == 4:
divide(input("The first number: "), input("The second number: "))
elif choice == 5:
loop == 0
print "See you soon! "
| StarcoderdataPython |
166607 | class Solution(object):
def alertNames(self, keyName, keyTime):
"""
:type keyName: List[str]
:type keyTime: List[str]
:rtype: List[str]
"""
mapp = {}
for i in range(len(keyName)):
name = keyName[i]
if(name not in mapp):
mapp[name] = [keyTime[i]]
else:
mapp[name].append(keyTime[i])
res = []
for name, arr in mapp.items():
arr.sort()
for i in range(len(arr)-2):
time= arr[i]
t2 = arr[i+1]
t3 = arr[i+2]
if(time[0:2]=="23"):
endTime = "24:00"
if(t2<=endTime and t3<=endTime and t2>time and t3>time):
res.append(name)
break
else:
start = int(time[0:2])
endTime = str(start+1)+time[2:]
if(start<9):
endTime = "0"+endTime
if(t2<=endTime and t3<=endTime):
res.append(name)
break
return sorted(res)
| StarcoderdataPython |
1622854 | import time
import matplotlib as mpl
import matplotlib.pyplot as plt
import pytest
from pytest import approx
from rpi.stepper import Stepper
plt.style.use('dark_background')
class PiMock:
"""Fake pigpio.pi"""
def gpio_trigger(self, user_gpio, pulse_len=10, level=1):
pass
def set_mode(self, gpio, mode):
pass
def write(self, gpio, level):
pass
def mock_time(dt=None, mem=[0.0]):
"""Fake time.perf_counter()."""
if dt is not None:
mem[0] += dt
return mem[0]
def run(stepper, pre_func, dt, freq=100000):
"""Run stepper for specified time dt at given clock frequency."""
for _ in range(int(round(dt * freq))):
pre_func(stepper)
stepper.run()
mock_time(1 / freq)
def test_stepper(monkeypatch):
monkeypatch.setattr(time, 'perf_counter', mock_time)
stepper = Stepper(PiMock(), 0, 1, 2, accel_max=1000, velocity_max=4000)
approx_ = lambda x: approx(x, rel=0.1, abs=0.1)
times = []
position_hist = []
velocity_hist = []
def update_hist(stepper):
times.append(mock_time())
position_hist.append(stepper.position)
velocity_hist.append(stepper.velocity)
def plot():
fig, ax = plt.subplots()
ax.set_title('test_stepper')
ax.set_xlabel('Time (s)')
ax.set_ylabel('Position / Velocity')
ax.axhline(y=0, linewidth=1, color='w')
ax.plot(times, position_hist, label="position")
ax.plot(times, velocity_hist, label="velocity")
ax.legend()
fig.savefig('log/test_stepper.png', dpi=150)
try:
# Move CW direction
stepper.set_velocity_setpoint(1000)
run(stepper, update_hist, 1.0)
assert stepper.dir == Stepper.DIRECTION_CW
assert stepper.position > 0
assert stepper.velocity == approx_(1000.0)
# Reverse direction, but not instantaneously!
stepper.set_velocity_setpoint(-1000)
run(stepper, update_hist, 1e-3)
assert stepper.dir == Stepper.DIRECTION_CW
assert stepper.position > 0
assert stepper.velocity < 1000
run(stepper, update_hist, 1.0 - 1e-3)
assert stepper.position > 0
assert stepper.velocity == approx_(0.0)
run(stepper, update_hist, 1e-3)
assert stepper.dir == Stepper.DIRECTION_CCW
assert stepper.position > 0
run(stepper, update_hist, 1.0 - 1e-3)
assert stepper.dir == Stepper.DIRECTION_CCW
assert stepper.position > 0
assert stepper.velocity < 0
run(stepper, update_hist, 1.0)
assert stepper.dir == Stepper.DIRECTION_CCW
assert stepper.position < 0
assert stepper.velocity < 0
except AssertionError as e:
plot()
raise e
plot()
| StarcoderdataPython |
4837487 | <reponame>auderson/numba
import weakref
from numba.core import types
class DataModelManager(object):
"""Manages mapping of FE types to their corresponding data model
"""
def __init__(self):
# { numba type class -> model factory }
self._handlers = {}
# { numba type instance -> model instance }
self._cache = weakref.WeakKeyDictionary()
def register(self, fetypecls, handler):
"""Register the datamodel factory corresponding to a frontend-type class
"""
assert issubclass(fetypecls, types.Type)
self._handlers[fetypecls] = handler
def lookup(self, fetype):
"""Returns the corresponding datamodel given the frontend-type instance
"""
try:
return self._cache[fetype]
except KeyError:
pass
handler = self._handlers[type(fetype)]
model = self._cache[fetype] = handler(self, fetype)
return model
def __getitem__(self, fetype):
"""Shorthand for lookup()
"""
return self.lookup(fetype)
def copy(self):
"""
Make a copy of the manager.
Use this to inherit from the default data model and specialize it
for custom target.
"""
dmm = DataModelManager()
dmm._handlers = self._handlers.copy()
return dmm
| StarcoderdataPython |
3326070 | import os
import csv
# Create a path for budget_data file
csv_path = os.path.join('Resources', 'budget_data.csv')
# create a list to store number of rows
number_rows = []
change_list = []
temp_list = []
# For increments
Net_ProfitLoss = 0
Change = 0.0
index_min = 0
index_max = 0
min_date = ""
max_date = ""
count = 0
# empty list to receive values of changing in Profit/Loss
changing_PL = []
# Read csv file
with open(csv_path, 'r', newline="") as csvfile:
# file handler with list of rows
csvreader = csv.reader(csvfile)
# Skips header
csv_header = next(csvreader, None)
for row in csvreader: # loop into rows of the file
number_rows.append(row) # add row numbers to the list number_rows
change_list.append(row[1]) # creates a list with elements of the second column of input file only
Net_ProfitLoss = int(row[1]) + Net_ProfitLoss
for i in range(len(change_list)-1): # loops from 0 to 85 on the changeList
Change = ((float(change_list[i + 1]) - float(change_list[i])) + Change) # increments Change by adding the difference of two subsequent values
changing_PL.append(float(change_list[i + 1]) - float(change_list[i])) # adds the difference of two subsequent values to the list changing_PL
Change = round((Change /(i+1)),2)
min_elem = changing_PL[0]
max_elem = changing_PL[0]
for i in range(1,len(changing_PL)-1):
if changing_PL[i] < min_elem:
min_elem = changing_PL[i]
index_min = i + 2
if changing_PL[i] > max_elem:
max_elem = changing_PL[i]
index_max = i + 2
min_elem = round(min_elem, 0)
max_elem = round(min_elem, 0)
with open(csv_path, 'r', newline="") as csvfile:
# file handler with list of rows
csvreader = csv.reader(csvfile)
# Skips header
csv_header = next(csvreader, None)
for row in csvreader:
temp_list.append(row)
count = len(temp_list)
if count == index_min:
min_date = row[0]
if count == index_max:
max_date = row[0]
print("Total Months: " + str(len(number_rows)))
print("The total net profit/loss is $" + str(Net_ProfitLoss))
print("Average Change: $" + str(Change))
print("Greatest Increase in Profits: " + max_date + " ($" + str(max_elem) + ")")
print("Greatest Decrease in Profits: " + min_date + " ($" + str(min_elem) + ")")
elem1 = "Total Months: " + str(len(number_rows))
elem2 = "The total net profit/loss is $" + str(Net_ProfitLoss)
elem3 = "Average Change: $" + str(Change)
elem4 = "Greatest Increase in Profits: " + max_date + " ($" + str(max_elem) + ")"
elem5 = "Greatest Decrease in Profits: " + min_date + " ($" + str(min_elem) + ")"
textList = [elem1,elem2,elem3,elem4,elem5]
os.mkdir('Analysis')
output_path = os.path.join('Analysis', 'analysis_results.txt')
out = open(output_path, 'w', newline="")
for line in textList:
out.write(line)
out.write("\n")
out.close()
| StarcoderdataPython |
3206419 | <filename>libs/json/createDocsForParemeters.py<gh_stars>0
#!/usr/bin/python
import jinja2
import json
def getJsonFromFile(filename):
data = None
foundError = False
f = None
try:
# Opening JSON file
f = open(filename)
# returns JSON object as a dictionary
data = json.load(f)
except IOError:
message = "Can't open json file >" + filename + "<"
print(message)
foundError = True
except ValueError as err:
message = "There is an issue in the json file >" + filename + \
"<. Issue starts on character position " + \
str(err.pos) + ": " + err.msg
print(message)
foundError = True
finally:
if f is not None:
f.close()
if foundError is True:
message = "Can't run the use case before the error(s) mentioned above are not fixed"
print(message)
return data
def renderTemplateWithJson(TEMPLATE_FILE):
templateLoader = jinja2.FileSystemLoader(searchpath="templates/")
templateEnv = jinja2.Environment(loader=templateLoader)
template = templateEnv.get_template(TEMPLATE_FILE)
parameters = getJsonFromFile("paramServices.json")
renderedText = template.render(params=parameters) # this is where to put args to the template renderer
with open('../../docs/' + TEMPLATE_FILE, 'w') as f:
f.write(renderedText)
renderTemplateWithJson("PARAMETERS-SERVICES.md")
renderTemplateWithJson("PARAMETERS-BTPSA.md")
| StarcoderdataPython |
1741169 | <gh_stars>0
"""Contains Saga class"""
import asyncio
import itertools
import logging
from typing import Any, Callable, List, Optional
from uuid import uuid4
logger = logging.getLogger("sagah")
class SagaFailed(Exception):
"""Raised when a saga fails"""
def __init__(self, transaction: "SagaTransaction") -> None:
"""Initializes the SagaFailed exception
Args:
transaction: Saga transaction that failed
"""
self.transaction = transaction
self.message = "Saga failed"
class SagaTransaction:
"""Class representing a single local transaction within a saga"""
def __init__(
self, saga_id: str, action: Callable, compensator: Callable, name: str
) -> None:
"""Initialize the transaction
Args:
saga_id: Saga identifier
action: Function that represents the desired action
compensator: Function that is used to compensenate, or rollback, the action
name: Name to identify the action
"""
self.saga_id = saga_id
self.action = action
self.compensator = compensator
self.name = name
def __str__(self) -> str:
"""String representation of the SagaTransaction"""
return f'{self.__class__.__name__}(name="{self.name}")'
def _log(self, message: str, level: str = "INFO") -> None:
"""Log a message for the transaction
Args:
message: Log message
level: Log level
"""
logger.log(
getattr(logging, level), f"[saga={self.saga_id}] [tx={self.name}] {message}"
)
async def call_action(self) -> Any:
"""Call the action function
Returns:
action result
"""
self._log("Saga transaction starting")
try:
result = (
await self.action()
if asyncio.iscoroutinefunction(self.action)
else self.action()
)
self._log("Saga transaction succeeded")
return result
except Exception as e:
self._log(f"Saga transaction failed: {str(e)}", "ERROR")
raise
async def call_compensator(self) -> None:
"""Call the compensator function"""
self._log("Saga transaction rolling back")
await self.compensator() if asyncio.iscoroutinefunction(
self.compensator
) else self.compensator()
class Saga:
"""Context manager that implements a saga"""
def __init__(self, saga_id: Optional[str] = None) -> None:
"""Initializes the Saga
Args:
saga_id: Optional saga identifier
"""
self.saga_id = saga_id or str(uuid4())
# List of completed transactions for rolling back
self._transactions: List[SagaTransaction] = []
# Counter for generating an integer sequence for naming actions
self._counter = itertools.count(start=1)
def _log(self, message: str, level: str = "INFO") -> None:
"""Log a message for the saga
Args:
message: Log message
level: Log level
"""
logger.log(getattr(logging, level), f"[saga={self.saga_id}] {message}")
def __enter__(self) -> "Saga":
"""Entering the context returns the saga"""
self._log("Entering saga")
return self
def __exit__(self, *args: Any, **kwargs: Any) -> None:
"""Exiting the saga is a no-op"""
self._log("Exiting saga")
async def rollback(self) -> None:
"""Rollback all local transactions in reverse order
This calls each transaction's compensator function.
"""
self._log("Rolling back saga transactions")
while self._transactions:
tx = self._transactions.pop()
try:
await tx.call_compensator()
except Exception:
self._transactions.append(tx)
raise
async def action(
self, action: Callable, compensator: Callable, name: Optional[str] = None
) -> Any:
"""Trigger an action
If the action succeeds, register the transaction for a potential rollback in the
future.
Args:
action: Function that represents the desired action
compensator: Function that is used to compensenate, or rollback, the action
name: Optional name to identify the action. Will default to
auto-incrementing integer.
Raises:
:py:exc:`SagaFailed`: if the action fails
Returns:
result of the action, if it succeeds
"""
if not name:
name = str(next(self._counter))
tx = SagaTransaction(self.saga_id, action, compensator, name)
try:
result = await tx.call_action()
except Exception as e:
await self.rollback()
raise SagaFailed(tx) from e
else:
self._transactions.append(tx)
return result
| StarcoderdataPython |
3347774 | from highcliff.actions.actions import AIaction
class MonitorAirflow(AIaction):
def __init__(self, ai):
super().__init__(ai)
self.effects = {"is_airflow_adjustment_needed": True}
self.preconditions = {}
def behavior(self):
# decide if adjustment is needed and update the world accordingly
raise NotImplementedError
def no_adjustment_needed(self):
# this should be called by custom behavior if it determines that no adjustment is needed
self.actual_effects["is_airflow_adjustment_needed"] = False
class AuthorizeAirflowAdjustment(AIaction):
def __init__(self, ai):
super().__init__(ai)
self.effects = {"is_airflow_adjustment_authorized": True}
self.preconditions = {"is_airflow_adjustment_needed": True}
def behavior(self):
# custom behavior must be specified by anyone implementing an AI action
raise NotImplementedError
def authorization_failed(self):
# this should be called by custom behavior if it fails to authorize the adjustment
self.actual_effects["is_airflow_adjustment_authorized"] = False
class AdjustAirflow(AIaction):
def __init__(self, ai):
super().__init__(ai)
self.effects = {"is_airflow_comfortable": True}
self.preconditions = {"is_airflow_adjustment_authorized": True}
def behavior(self):
# custom behavior must be specified by anyone implementing an AI action
raise NotImplementedError
def adjustment_failed(self):
# this should be called by custom behavior if it fails to complete the adjustment
self.actual_effects["is_airflow_comfortable"] = False
| StarcoderdataPython |
3326520 | from django.contrib.auth.forms import UserCreationForm
from UsersApp.models import Account
from ArticlesApp.models import Author
from django import forms
from django.db import transaction
# noinspection PySuperArguments
class AuthorSignUpForm(UserCreationForm, forms.Form):
"""Form for fill sign up. """
class Meta(UserCreationForm.Meta):
model = Account
@transaction.atomic
def save(self):
user = super().save(commit=False)
user.is_author = True
user.save()
author = Author.objects.create(user=user)
return user
| StarcoderdataPython |
3312082 | from base import Constant
from errors import Request
from models import Mark as _Mark_, Type
from .mixins import Identify
class Mark(Identify):
CONNECTION_LIMIT = Constant.RIGID_CONNECTION_LIMIT
def _validate(self, request):
super()._validate(request)
self.__type = self._get(request, 'type', '')
if not self.__type in Type.__members__:
raise Request('type', self.__type)
self.__type = Type[self.__type]
def _process(self, request):
db = self._application.db
mark = _Mark_.query \
.filter(_Mark_.type == self.__type) \
.filter(_Mark_.task_id == self._task.id) \
.filter(_Mark_.session_id == self._session.id) \
.first()
if mark is None:
mark = _Mark_(type=self.__type)
mark.task = self._task
mark.session = self._session
db.session.commit()
| StarcoderdataPython |
3216729 | import numpy as np
import scipy.linalg as spla
from scipy.spatial.distance import cdist
def chol2inv(chol):
return spla.cho_solve((chol, False), np.eye(chol.shape[ 0 ]))
def matrixInverse(M):
return chol2inv(spla.cholesky(M, lower=False))
def compute_kernel(lls, lsf, x, z):
ls = np.exp(lls)
sf = np.exp(lsf)
if x.ndim == 1:
x= x[ None, : ]
if z.ndim == 1:
z= z[ None, : ]
r2 = cdist(x, z, 'seuclidean', V = ls)**2.0
k = sf * np.exp(-0.5*r2)
return k
def compute_psi1(lls, lsf, xmean, xvar, z):
if xmean.ndim == 1:
xmean = xmean[ None, : ]
ls = np.exp(lls)
sf = np.exp(lsf)
lspxvar = ls + xvar
constterm1 = ls / lspxvar
constterm2 = np.prod(np.sqrt(constterm1))
r2_psi1 = cdist(xmean, z, 'seuclidean', V = lspxvar)**2.0
psi1 = sf*constterm2*np.exp(-0.5*r2_psi1)
return psi1
def compute_psi2(lls, lsf, xmean, xvar, z):
ls = np.exp(lls)
sf = np.exp(lsf)
lsp2xvar = ls + 2.0 * xvar
constterm1 = ls / lsp2xvar
constterm2 = np.prod(np.sqrt(constterm1))
n_psi = z.shape[ 0 ]
v_ones_n_psi = np.ones(n_psi)
v_ones_dim = np.ones(z.shape[ 1 ])
D = ls
Dnew = ls / 2.0
Btilde = 1.0 / (Dnew + xvar)
Vtilde = Btilde - 1.0 / Dnew
Qtilde = 1.0 / D + 0.25 * Vtilde
T1 = -0.5 * np.outer(np.dot((z**2) * np.outer(v_ones_n_psi, Qtilde), v_ones_dim), v_ones_n_psi)
T2 = +0.5 * np.outer(np.dot(z, xmean * Btilde), v_ones_n_psi)
T3 = -0.25 * np.dot(z * np.outer(v_ones_n_psi, Vtilde), z.T)
T4 = -0.5 * np.sum((xmean**2) * Btilde)
M = T1 + T1.T + T2 + T2.T + T3 + T4
psi2 = sf**2.0 * constterm2 * np.exp(M)
return psi2
def d_trace_MKzz_dhypers(lls, lsf, z, M, Kzz):
dKzz_dlsf = Kzz
ls = np.exp(lls)
# This is extracted from the R-code of Scalable EP for GP Classification by DHL and JMHL
gr_lsf = np.sum(M * dKzz_dlsf)
# This uses the vact that the distance is v^21^T - vv^T + 1v^2^T, where v is a vector with the l-dimension
# of the inducing points.
Ml = 0.5 * M * Kzz
Xl = z * np.outer(np.ones(z.shape[ 0 ]), 1.0 / np.sqrt(ls))
gr_lls = np.dot(np.ones(Ml.shape[ 0 ]), np.dot(Ml.T, Xl**2)) + np.dot(np.ones(Ml.shape[ 0 ]), np.dot(Ml, Xl**2)) \
- 2.0 * np.dot(np.ones(Xl.shape[ 0 ]), (Xl * np.dot(Ml, Xl)))
Xbar = z * np.outer(np.ones(z.shape[ 0 ]), 1.0 / ls)
Mbar1 = - M.T * Kzz
Mbar2 = - M * Kzz
gr_z = (Xbar * np.outer(np.dot(np.ones(Mbar1.shape[ 0 ]) , Mbar1), np.ones(Xbar.shape[ 1 ])) - np.dot(Mbar1, Xbar)) +\
(Xbar * np.outer(np.dot(np.ones(Mbar2.shape[ 0 ]) , Mbar2), np.ones(Xbar.shape[ 1 ])) - np.dot(Mbar2, Xbar))
# The cost of this function is dominated by five matrix multiplications with cost M^2 * D each where D is
# the dimensionality of the data!!!
return gr_lsf, gr_lls, gr_z
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.