blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ae2b5badd77fad9407ce1c07606e609826621d63 | 375f9728b3ddc3f810de597926d7d4142e38e33e | /network/petition_centralities_signatures_pic_list2_int.py | 49db540d48df93f644b02e61a1131828dad6ec0d | [
"MIT"
] | permissive | yukimasano/pet_forecast | 614c38fc0a1bfe2fdf84bcf0ba9fac7f443bca57 | 57547fee4c222313e9c958536f60da4f43e23c8c | refs/heads/master | 2021-09-07T13:15:40.063282 | 2018-02-23T10:48:01 | 2018-02-23T10:48:01 | 111,246,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,729 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 26 21:28:06 2016
@author: YPC
"""
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
if False:
uu=np.load('mentions.npy')
G_dir = nx.from_edgelist(uu,create_using=nx.DiGraph())
G = nx.from_edgelist(uu)
G_dir.remove_edges_from(G_dir.selfloop_edges())
G.remove_edges_from(G.selfloop_edges())
#uids=G_dir.nodes()
c_pr=nx.pagerank_scipy(G_dir,tol=1e-09)
#%%
import pandas as pd
from email.utils import parsedate_tz, mktime_tz
if False:
df = pd.read_csv('../twitter/output_tweets.csv',sep='\t')#,dtype=np.str)
t_uid = df.user_id.values
t_time = df.created_at.values
t_tid = df.tweet_id.values
t_cen=np.zeros(shape=(len(t_uid)))
for i in range(len(t_uid)):
try:
t_cen[i] = c_pr['%s'%t_uid[i]] #c_pr.get('%s'%t_uid[i])
except:
t_cen[i]=0
df2 = pd.read_csv('../twitter/output_petitions.csv',sep='\t')#,dtype=np.str)
p_pid= df2.pet_id.values
p_tid= df2.tweet_id.values
#%%
"""
tu=np.unique(t_tid)
pu=np.unique(p_tid)
len(tu) # 996048
len(pu) # 1014276
len(t_tid) # 1007928
len(p_tid) # 1029688
--> also tweet file contains less tweets than patent file.
"""
import datetime
"""add petition_starting time"""
def get_cen_for_pet(pet_in,p_pid,p_tid, t_cen,t0_in):
C = []
T = []
missing=0
for i in range(len(p_pid)):
try:
if int(p_pid[i]) == int(pet_in):
ti = datetime.datetime.fromtimestamp(mktime_tz(parsedate_tz(t_time[i])))
ind = np.argwhere(t_tid == p_tid[i])[0][0]
C.append(t_cen[ind])
T.append(ti)
except IndexError:
missing+=1
#print 'Could not find tweet %s'%p_tid[i]
continue
try:
# t0 = min(T)
T = [(x-t0_in).total_seconds()/60. for x in T]
T = np.array(T)
C = np.array(C)
indx = np.argsort(T)
C = C[indx]
T = T[indx]
return C,T, missing
except:
print 'something wrong'
return 'no','no'
def give_opening_time(pet_in):
import json
f = open('../petitions.json', 'r')
met = json.load(f)
f.close()
for i in range(len(met)):
if str(met[i]['petition']['id'])==str(pet_in):
t = met[i]['petition']['created_datetime']
break
dt = t.encode()[0:10]
tim = t.encode()[11:-1]
#tweets are in utc time so substract one here
dt_tim = datetime.datetime(int(dt[0:4]),int(dt[5:7]),int(dt[8:10]),
int(tim[0:2]),int(tim[3:5]),int(tim[6:8]))
dt_tim -= datetime.timedelta(hours=1) # UK GMT, twitter: UTC
return dt_tim
if True:
pet_list=[]
# pet_list.append( int(54958))
# pet_list.append( int(49528))
# pet_list.append( int(38257))
# pet_list.append( int(62490))
# pet_list.append( int(45969))
pet_list.append( int(62385))
# pet_list.append( int(46455))
# pet_list.append( int(73911))
# pet_list.append( int(53523))
for pet in pet_list:
print pet
t0 = give_opening_time(pet)
c_vec, t_vec, mis = get_cen_for_pet(pet,p_pid,p_tid, t_cen,t0)
print mis
t_vec2 = t_vec/60.
sigs = np.load('../time-series/signatures/sig%s.npy'%pet)
sig_t = range(len(sigs))
c_vec2 = c_vec*(10**6)
fig = plt.figure(figsize=(7, 4.5))
plt.rcParams.update({'font.size': 14})
plt.rc('font', family='serif')
plt.semilogy(sig_t, sigs)
plt.semilogy(t_vec2, c_vec2, 'x',markersize=5)
plt.xlabel('Time since start of petition in h') # sollte time since petition start
#plt.ylabel('PageRank of tweet author')
plt.legend(['Signatures','Scaled PageRank of tweet author'],loc=2,fontsize=9)
plt.title('Centralities and signatures for petition %s'%pet)
plt.xlim([-3000,3000+20])
fig.savefig('c_sigs3000_%s'%pet, dpi=500)
plt.close()
#%%
#fig = plt.figure(figsize=(7, 4.5))
#plt.rcParams.update({'font.size': 14})
#plt.rc('font', family='serif')
#plt.xlabel('Time since start of petition in h') # sollte time since petition start
#plt.ylabel('PageRank of tweet author')
#plt.semilogy(t_vec2,c_vec2, 'gx')
#plt.semilogy(sig_t, sigs)
#plt.legend(['Signatures','Scaled PageRank of tweet author'],loc=1,fontsize=9)
#plt.title('Centralities and signatures for petition %s'%pet)
#plt.xlim([0,250])
#fig.savefig('c_sigs250_%s'%pet, dpi=500)
#%%
import winsound
# Play Windows exit sound.
winsound.PlaySound("SystemExit", winsound.SND_ALIAS) | [
"yuki.m.asano@gmail.com"
] | yuki.m.asano@gmail.com |
cd1f939ff3dcebcaf7e0de99d733bd8ea807a195 | f5fd578fbd9693cfe2146c3606a2d9c02c69245f | /memory.py | 4ca78b520de06075384f96c4153d7baee8c03f17 | [] | no_license | ShivanshuPurohit/Soccer-using-colaboraton-competition | 166005d67a55199d8a50555f3536322ec4b24335 | 7cc1e8853386d7b687e5c1ea9ca98057252b1b97 | refs/heads/master | 2022-07-09T12:17:37.004303 | 2020-05-15T11:33:46 | 2020-05-15T11:33:46 | 264,175,382 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,339 | py | import numpy as np
from collections import namedtuple
import random
class Memory:
def __init__(self):
self.memory = []
self.experience = namedtuple('Experience',
field_names=['actor_state', 'critic_state', 'action', 'log_prob', 'reward'])
def add(self, actor_state, critic_state, action, log_prob, reward):
"""Add a new experience to memory."""
e = self.experience( actor_state, critic_state, action, log_prob, reward )
self.memory.append(e)
def experiences(self, clear=True):
actor_states = np.vstack([e.actor_state for e in self.memory if e is not None])
critic_states = np.vstack([e.critic_state for e in self.memory if e is not None])
actions = np.vstack([e.action for e in self.memory if e is not None])
log_probs = np.vstack([e.log_prob for e in self.memory if e is not None])
rewards = np.vstack([e.reward for e in self.memory if e is not None])
n_exp = len(self)
if clear:
self.clear()
return actor_states, critic_states, actions, log_probs, rewards, n_exp
def delete(self, i):
del self.memory[i]
def clear(self):
self.memory.clear()
def __len__(self):
return len(self.memory) | [
"noreply@github.com"
] | noreply@github.com |
a5427a4f0624f0bb4bd5eda86c7a9e1f9396c37c | 049d6ad6f49aff27ac35fb329675d3ebb4efdb49 | /api/routers/dashboard.py | 7cdcd3e9c2d7e86acfb845c8d72f0dc9c0f23f7d | [
"MIT"
] | permissive | xming521/coco_API | 7be34d6d5cf797d7677b09d2a9f027e479d347b3 | 51d7ac3141e58f1d6a5438af135fba3ea101bd53 | refs/heads/master | 2023-07-15T14:03:27.358265 | 2021-09-04T08:33:14 | 2021-09-04T08:33:14 | 337,937,979 | 0 | 0 | null | 2021-09-04T08:33:15 | 2021-02-11T05:25:08 | Python | UTF-8 | Python | false | false | 2,364 | py | import time
import psutil
import pymysql
from fastapi import APIRouter
from api.utils import response_code
router = APIRouter()
@router.get('/dashboard/getinfo')
def getinfo():
from init_global import g
res = {}
db = g.db_pool.connection()
cur = db.cursor()
cur.execute(f'select count(app_name) from app_list')
res['app_count'] = cur.fetchall()[0][0]
cur.execute(f'select count(app_name) from app_list where status="running"')
res['app_run_count'] = cur.fetchall()[0][0]
res['image_count'] = len(g.dc.images.list())
res['networks_count'] = len(g.dc.networks.list())
cur = db.cursor(cursor=pymysql.cursors.DictCursor)
cur.execute(f'select * from app_list order by start_time desc limit 10')
res['recent_event'] = cur.fetchall()
db.close()
return response_code.resp_200(data={"res": res})
def get_performance():
res = {}
# cpu
cpuCount = psutil.cpu_count(logical=False) # CPU核心
cpuPercent = psutil.cpu_percent(0.5) # 使用率
cpufree = round(100 - cpuPercent, 2) # CPU空余
# 内存
m = psutil.virtual_memory() # 内存信息
memoryTotal = round(m.total / (1024.0 * 1024.0 * 1024.0), 2) # 总内存
memoryUsed = round(m.used / (1024.0 * 1024.0 * 1024.0), 2) # 已用内存
memoryFree = round(memoryTotal - memoryUsed, 2) # 剩余内存
# 磁盘
io = psutil.disk_partitions()
diskCount = len(io)
diskTotal = 0 # 总储存空间大小
diskUsed = 0 # 已用
diskFree = 0 # 剩余
for i in io:
try:
o = psutil.disk_usage(i.mountpoint)
diskTotal += int(o.total / (1024.0 * 1024.0 * 1024.0))
diskUsed += int(o.used / (1024.0 * 1024.0 * 1024.0))
diskFree += int(o.free / (1024.0 * 1024.0 * 1024.0))
except:
pass
res['cpu'] = cpuPercent
res['mem'] = m.percent
res['disk'] = o.percent
res['memoryTotal'] = memoryTotal
res['memoryUsed'] = memoryUsed
res['diskTotal'] = diskTotal
res['diskUsed'] = diskUsed
return res
def push_realinfo():
from init_global import g
from main import socket_manager as sm
print(g.person_online)
while g.person_online:
res = get_performance()
# print(res)
g.push_loop.run_until_complete(sm.emit('dashboard', {'data': res}))
time.sleep(3)
| [
"1223398803@qq.com"
] | 1223398803@qq.com |
a4e11a89ccc2959787b334639f4f2662aa95dfda | f308e4d68bd356615eb805c363b7814797dd2869 | /venv/bin/pip | 872204bb96205da0368975161740cc2fc3cb0716 | [] | no_license | rinagalperin/convolution_neural_networks_optimization_EA | cf9708cf387604cf8f1625a252564a6685d2e741 | 36c343b3c0ff31554d6662e2b51da603868a6134 | refs/heads/master | 2020-05-03T11:53:21.286476 | 2019-03-30T21:02:45 | 2019-03-30T21:02:45 | 178,611,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | #!/Users/rina/PycharmProjects/convolution_neural_networks_optimization_EA/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"rinag@post.bgu.ac.il"
] | rinag@post.bgu.ac.il | |
44dadbea994a5d273026036fca182ea229fd6eae | d7d6f612badd6c96bbc8b90fff6663da47af0b03 | /variational/variational_strategy.py | c0e8f211bc2db1667993299a533800cdfb113b88 | [] | no_license | hiteshsapkota/DRO-Deep-Kernel-Multiple-Instance-Learning | b8d69adba08b40722f19e151e06ffe57a3cee1f4 | 4e23236f50f75dc95ce0f7ab2358528bddcc5310 | refs/heads/main | 2023-01-02T07:24:38.105342 | 2020-10-26T00:07:21 | 2020-10-26T00:07:21 | 304,165,187 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,152 | py | #!/usr/bin/env python3
import warnings
import torch
from distributions import MultivariateNormal
from lazy import DiagLazyTensor, MatmulLazyTensor, RootLazyTensor, SumLazyTensor, delazify
from settings import trace_mode
from utils.cholesky import psd_safe_cholesky
from utils.memoize import cached
from utils.warnings import OldVersionWarning
from ._variational_strategy import _VariationalStrategy
def _ensure_updated_strategy_flag_set(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
device = state_dict[list(state_dict.keys())[0]].device
if prefix + "updated_strategy" not in state_dict:
state_dict[prefix + "updated_strategy"] = torch.tensor(False, device=device)
warnings.warn(
"You have loaded a variational GP model (using `VariationalStrategy`) from a previous version of "
"GPyTorch. We have updated the parameters of your model to work with the new version of "
"`VariationalStrategy` that uses whitened parameters.\nYour model will work as expected, but we "
"recommend that you re-save your model.",
OldVersionWarning,
)
class VariationalStrategy(_VariationalStrategy):
r"""
The standard variational strategy, as defined by `Hensman et al. (2015)`_.
This strategy takes a set of :math:`m \ll n` inducing points :math:`\mathbf Z`
and applies an approximate distribution :math:`q( \mathbf u)` over their function values.
(Here, we use the common notation :math:`\mathbf u = f(\mathbf Z)`.
The approximate function distribution for any abitrary input :math:`\mathbf X` is given by:
.. math::
q( f(\mathbf X) ) = \int p( f(\mathbf X) \mid \mathbf u) q(\mathbf u) \: d\mathbf u
This variational strategy uses "whitening" to accelerate the optimization of the variational
parameters. See `Matthews (2017)`_ for more info.
:param ~gpytorch.models.ApproximateGP model: Model this strategy is applied to.
Typically passed in when the VariationalStrategy is created in the
__init__ method of the user defined model.
:param torch.Tensor inducing_points: Tensor containing a set of inducing
points to use for variational inference.
:param ~gpytorch.variational.VariationalDistribution variational_distribution: A
VariationalDistribution object that represents the form of the variational distribution :math:`q(\mathbf u)`
:param bool learn_inducing_points: (optional, default True): Whether or not
the inducing point locations :math:`\mathbf Z` should be learned (i.e. are they
parameters of the model).
.. _Hensman et al. (2015):
http://proceedings.mlr.press/v38/hensman15.pdf
.. _Matthews (2017):
https://www.repository.cam.ac.uk/handle/1810/278022
"""
def __init__(self, model, inducing_points, variational_distribution, learn_inducing_locations=True):
super().__init__(model, inducing_points, variational_distribution, learn_inducing_locations)
self.register_buffer("updated_strategy", torch.tensor(True))
self._register_load_state_dict_pre_hook(_ensure_updated_strategy_flag_set)
@cached(name="cholesky_factor")
def _cholesky_factor(self, induc_induc_covar):
L = psd_safe_cholesky(delazify(induc_induc_covar).double())
return L
@property
@cached(name="prior_distribution_memo")
def prior_distribution(self):
zeros = torch.zeros_like(self.variational_distribution.mean)
ones = torch.ones_like(zeros)
res = MultivariateNormal(zeros, DiagLazyTensor(ones))
return res
def forward(self, x, inducing_points, inducing_values, variational_inducing_covar=None):
# Compute full prior distribution
full_inputs = torch.cat([inducing_points, x], dim=-2)
full_output = self.model.forward(full_inputs)
full_covar = full_output.lazy_covariance_matrix
# Covariance terms
num_induc = inducing_points.size(-2)
test_mean = full_output.mean[..., num_induc:]
induc_induc_covar = full_covar[..., :num_induc, :num_induc].add_jitter()
induc_data_covar = full_covar[..., :num_induc, num_induc:].evaluate()
data_data_covar = full_covar[..., num_induc:, num_induc:]
# Compute interpolation terms
# K_ZZ^{-1/2} K_ZX
# K_ZZ^{-1/2} \mu_Z
L = self._cholesky_factor(induc_induc_covar)
if L.shape != induc_induc_covar.shape:
# Aggressive caching can cause nasty shape incompatibilies when evaluating with different batch shapes
del self._memoize_cache["cholesky_factor"]
L = self._cholesky_factor(induc_induc_covar)
interp_term = torch.triangular_solve(induc_data_covar.double(), L, upper=False)[0].to(full_inputs.dtype)
# Compute the mean of q(f)
# k_XZ K_ZZ^{-1/2} (m - K_ZZ^{-1/2} \mu_Z) + \mu_X
predictive_mean = (
torch.matmul(
interp_term.transpose(-1, -2), (inducing_values - self.prior_distribution.mean).unsqueeze(-1)
).squeeze(-1)
+ test_mean
)
# Compute the covariance of q(f)
# K_XX + k_XZ K_ZZ^{-1/2} (S - I) K_ZZ^{-1/2} k_ZX
middle_term = self.prior_distribution.lazy_covariance_matrix.mul(-1)
if variational_inducing_covar is not None:
middle_term = SumLazyTensor(variational_inducing_covar, middle_term)
if trace_mode.on():
predictive_covar = (
data_data_covar.add_jitter(1e-4).evaluate()
+ interp_term.transpose(-1, -2) @ middle_term.evaluate() @ interp_term
)
else:
predictive_covar = SumLazyTensor(
data_data_covar.add_jitter(1e-4),
MatmulLazyTensor(interp_term.transpose(-1, -2), middle_term @ interp_term),
)
# Return the distribution
return MultivariateNormal(predictive_mean, predictive_covar)
def __call__(self, x, prior=False):
if not self.updated_strategy.item() and not prior:
with torch.no_grad():
# Get unwhitened p(u)
prior_function_dist = self(self.inducing_points, prior=True)
prior_mean = prior_function_dist.loc
L = self._cholesky_factor(prior_function_dist.lazy_covariance_matrix.add_jitter())
# Temporarily turn off noise that's added to the mean
orig_mean_init_std = self._variational_distribution.mean_init_std
self._variational_distribution.mean_init_std = 0.0
# Change the variational parameters to be whitened
variational_dist = self.variational_distribution
whitened_mean = (
torch.triangular_solve((variational_dist.loc - prior_mean).unsqueeze(-1).double(), L, upper=False)[
0
]
.squeeze(-1)
.to(variational_dist.loc.dtype)
)
whitened_covar = RootLazyTensor(
torch.triangular_solve(
variational_dist.lazy_covariance_matrix.root_decomposition().root.evaluate().double(),
L,
upper=False,
)[0].to(variational_dist.loc.dtype)
)
whitened_variational_distribution = variational_dist.__class__(whitened_mean, whitened_covar)
self._variational_distribution.initialize_variational_distribution(whitened_variational_distribution)
# Reset the random noise parameter of the model
self._variational_distribution.mean_init_std = orig_mean_init_std
# Reset the cache
if hasattr(self, "_memoize_cache"):
delattr(self, "_memoize_cache")
self._memoize_cache = dict()
# Mark that we have updated the variational strategy
self.updated_strategy.fill_(True)
return super().__call__(x, prior=prior)
| [
"hiteshsapkota@gmail.com"
] | hiteshsapkota@gmail.com |
3fa376f3ef087cee256d7492675fdc21898a7b95 | 92c724afcc40c9e4d86af24b1b493e10fc8a994d | /src/figures/exploratory/exploratory_plots.py | f99cbafd230e2935a17d634a4cf0fd989b289b41 | [
"MIT"
] | permissive | willgdjones/GTEx | 48d7551c765700d0db34bb8f6e01f7f2a55bec6c | c56a5d548978545ab8a98e74236d52343113e9e6 | refs/heads/master | 2021-09-13T13:21:12.928226 | 2018-02-06T16:42:41 | 2018-02-06T16:42:41 | 90,028,785 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,241 | py | import pickle
import numpy as np
import matplotlib.pyplot as plt
import seaborn
import h5py
GTEx_directory = '/hps/nobackup/research/stegle/users/willj/GTEx'
retrained_mean_features = {}
with h5py.File(GTEx_directory + '/small_data/new_retrained_inceptionet_aggregations.hdf5','r') as f:
expression = f['lung']['256']['expression'].value
for s in ['128','256','512','1024','2048','4096']:
size_retrained_mean_features = f['lung'][s]['mean'].value
retrained_mean_features[s] = size_retrained_mean_features
expression_IDs = f['lung']['256']['expression_IDs'].value
raw_mean_features = {}
with h5py.File(GTEx_directory + '/small_data/new_raw_inceptionet_aggregations.hdf5','r') as f:
for s in ['128','256','512','1024','2048','4096']:
size_raw_mean_features = f['lung'][s]['mean'].value
size_raw_mean_features[size_raw_mean_features < 0] = 0
raw_mean_features[s] = size_raw_mean_features
# Comparing variation for each patch size
# f, a = plt.subplots(1,6, figsize=(35,5))
# f.suptitle("Image feature variation. Lung, patch-size 256",size=30)
# for (i,s) in enumerate(['128','256','512','1024','2048','4096']):
# a[i].hist(np.std(retrained_mean_features[s],axis=0),bins=100)
# a[i].set_title("Patch-size {}".format(s),size=20)
# plt.tight_layout()
# plt.subplots_adjust(top=0.80)
# plt.savefig('figures/exploratory/plots/feature_variation.eps',format='eps', dpi=600)
# Comparing variation when concatenating all features together
# plt.figure()
# concatenated_features = np.vstack([retrained_mean_features['128'], retrained_mean_features['256'], retrained_mean_features['512'], retrained_mean_features['1024'], retrained_mean_features['2048'], retrained_mean_features['4096']])
# plt.hist(np.std(concatenated_features,axis=0),bins=100)
# cutoff = min(np.std(concatenated_features[:,np.argsort(np.std(concatenated_features,axis=0))[-500:]],axis=0))
# plt.plot([cutoff, cutoff], [0, 300],c='red')
# plt.title("Histogram of variance from concatenated features across patch-sizes",size=11)
# plt.xlabel("Variance")
# plt.ylabel("Counts")
# plt.tight_layout()
# plt.savefig('figures/exploratory/plots/concatenated_feature_variation.eps',format='eps', dpi=600)
# Histogram of expression means.
# Include cutoff for top 500
# plt.figure()
# plt.hist(np.mean(expression,axis=0),bins=100)
# cutoff = min(np.mean(expression[:,np.argsort(np.mean(expression,axis=0))[-1000:]],axis=0))
# plt.plot([cutoff, cutoff], [0, 4500],c='red')
# plt.title("Histogram of mean gene expression")
# plt.xlabel("Mean expression")
# plt.ylabel("Count")
# plt.tight_layout()
# plt.savefig('figures/exploratory/plots/mean_expression_histogram.eps',format='eps', dpi=600)
#
# # Histogram of expression standard deviation.
# # Include cutoff for top 1000
# plt.figure()
# plt.hist(np.std(expression,axis=0),bins=100)
# cutoff = min(np.std(expression[:,np.argsort(np.std(expression,axis=0))[-1000:]],axis=0))
# plt.plot([cutoff, cutoff], [0, 2500],c='red')
# plt.title("Histogram of gene expression standard deviation")
# plt.xlabel("Expression standard devation")
# plt.ylabel("Count")
# plt.tight_layout()
# plt.savefig('figures/exploratory/plots/std_expression_histogram.eps',format='eps', dpi=600)
| [
"williamgdjones@gmail.com"
] | williamgdjones@gmail.com |
2e6d525f0693ba26ecf20429238d8ba878370522 | bc441bb06b8948288f110af63feda4e798f30225 | /resource_package_tools_sdk/model/container/ingress_rule_pb2.py | e7533ea4ef040c1f29394bd3dd0d9f6cdf9fbc34 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 4,319 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: ingress_rule.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from resource_package_tools_sdk.model.container import http_ingress_path_pb2 as resource__package__tools__sdk_dot_model_dot_container_dot_http__ingress__path__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='ingress_rule.proto',
package='container',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'),
serialized_pb=_b('\n\x12ingress_rule.proto\x12\tcontainer\x1a\x42resource_package_tools_sdk/model/container/http_ingress_path.proto\"y\n\x0bIngressRule\x12\x0c\n\x04host\x18\x01 \x01(\t\x12)\n\x04http\x18\x02 \x01(\x0b\x32\x1b.container.IngressRule.Http\x1a\x31\n\x04Http\x12)\n\x05paths\x18\x01 \x03(\x0b\x32\x1a.container.HTTPIngressPathBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3')
,
dependencies=[resource__package__tools__sdk_dot_model_dot_container_dot_http__ingress__path__pb2.DESCRIPTOR,])
_INGRESSRULE_HTTP = _descriptor.Descriptor(
name='Http',
full_name='container.IngressRule.Http',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='paths', full_name='container.IngressRule.Http.paths', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=173,
serialized_end=222,
)
_INGRESSRULE = _descriptor.Descriptor(
name='IngressRule',
full_name='container.IngressRule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='host', full_name='container.IngressRule.host', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='http', full_name='container.IngressRule.http', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_INGRESSRULE_HTTP, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=101,
serialized_end=222,
)
_INGRESSRULE_HTTP.fields_by_name['paths'].message_type = resource__package__tools__sdk_dot_model_dot_container_dot_http__ingress__path__pb2._HTTPINGRESSPATH
_INGRESSRULE_HTTP.containing_type = _INGRESSRULE
_INGRESSRULE.fields_by_name['http'].message_type = _INGRESSRULE_HTTP
DESCRIPTOR.message_types_by_name['IngressRule'] = _INGRESSRULE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
IngressRule = _reflection.GeneratedProtocolMessageType('IngressRule', (_message.Message,), {
'Http' : _reflection.GeneratedProtocolMessageType('Http', (_message.Message,), {
'DESCRIPTOR' : _INGRESSRULE_HTTP,
'__module__' : 'ingress_rule_pb2'
# @@protoc_insertion_point(class_scope:container.IngressRule.Http)
})
,
'DESCRIPTOR' : _INGRESSRULE,
'__module__' : 'ingress_rule_pb2'
# @@protoc_insertion_point(class_scope:container.IngressRule)
})
_sym_db.RegisterMessage(IngressRule)
_sym_db.RegisterMessage(IngressRule.Http)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"service@easyops.cn"
] | service@easyops.cn |
bdae7154b5cea8e1017b5d3792285a3bbd6fc65a | e44e34bd7da991886fe72ca72376fc0e23d626c8 | /src/visualization/melbourne_risk_map.py | 354bcf394eb0480233fc3da00cb0ddf1693a0861 | [
"MIT"
] | permissive | delewis13/MelbourneCrashModel | dfe76e8b58ac4a6f3c3eed32bcdbf03b7aa1f02c | 82a01e5d33b4127e812f9a8ff374ac3d3ec88bd4 | refs/heads/master | 2020-04-28T14:16:30.301872 | 2019-03-13T03:40:33 | 2019-03-13T03:40:33 | 175,333,442 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,776 | py | """
Title: risk_map.py
Author: @bpben, @alicefeng
This script generates a map of the "risk estimates" from model predictions.
Usage:
--modelname: name of the models
these will be used as the name of the layers in the map so they must be unique
--filename: filename of predictions
predictions must be csvs with two columns; segment_id, prediction_column
--colname: name of the predictions column
--normalize: optional flag to indicate it predictions need to be normalized
Inputs:
csv files of model predictions
inter_and_non_int.shp - a Shapefile with both intersection and non-intersection segments and their segment_ids
Output:
risk_map.html - a Leaflet map with model predictions visualized on it
"""
import pandas as pd
import geopandas as gpd
import folium
import branca.colormap as cm
import argparse
import os
# all model outputs must be stored in the "data/processed/" directory
BASE_DIR = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__))))
DATA_FP = BASE_DIR + '/data/Melbourne/processed/'
MAP_FP = BASE_DIR + '/data/Melbourne/processed/maps/'
# Parse arguments
parser = argparse.ArgumentParser(description="Plot crash predictions on a map")
parser.add_argument("-m", "--modelname", nargs="+",
help="name of the model, must be unique")
parser.add_argument("-f", "--filename", nargs="+",
help="name of the file with the predictions to be plotted on the map, must specify at least 1")
parser.add_argument("-c", "--colname", nargs="+",
help="column name that has the predictions, must be specified in the same order as the filenames")
parser.add_argument("-n", "--normalize", help="normalize predictions", action="store_true")
args = parser.parse_args()
# Zip filenames and column names
if len(args.modelname) == len(args.filename) == len(args.colname):
match = zip(args.modelname, args.filename, args.colname)
else:
raise Exception("Number of models, files and column names must match")
def process_data(filename, colname):
"""Preps model output for plotting on a map
Reads in model output and filters to non-zero predictions.
Spatially joins the data to a shapefile of road network to match segments with
their predicted crash risk.
Normalizes the predictions if needed.
Args:
filename: name of the file with the predictions
colname: name of the predictions column
Returns:
a dataframe that links segment_ids, predictions and spatial geometries
"""
output = pd.read_csv(DATA_FP + filename, dtype={'segment_id': 'str'})
# filter dataframe to only seg with risk>0 to reduce size
output = output[output[colname] > 0]
# Merge on model results to the GeoDataframe
streets_w_risk = streets.merge(output, left_on='id', right_on='segment_id')
# normalize predictions if specified
if args.normalize:
print("Normalizing predictions...")
streets_w_risk[colname] = streets_w_risk[colname] / streets_w_risk[colname].max()
return streets_w_risk
def add_layer(dataset, modelname, colname, mapname):
"""Plots predictions on a Leaflet map
Args:
dataset: a dataframe with the data to be plotted
modelname: name of the model to be used as the layer name
colname: name of the predictions column
mapname: name of the map to be plotted on
Returns:
a GeoJSON layer added to the map
"""
folium.GeoJson(dataset,
name=modelname,
style_function=lambda feature: {
'color': color_scale(feature['properties'][colname])
}).add_to(mapname)
# Read in shapefile as a GeoDataframe
streets = gpd.read_file(MAP_FP + 'inter_and_non_int.geojson')
# Make map
# First create basemap
melbourne_map = folium.Map([-37.814, 144.96332], tiles='Cartodb dark_matter', zoom_start=12)
folium.TileLayer('Cartodb Positron').add_to(melbourne_map)
# Create style function to color segments based on their risk score
color_scale = cm.linear.YlOrRd_09.scale(0, 1)
# color_scale = cm.linear.YlOrRd.scale(streets_w_risk[args.colname].min(),
# streets_w_risk[args.colname].max())
# Plot model predictions as separate layers
for model in match:
predictions = process_data(model[1], model[2])
add_layer(predictions, model[0], model[2], melbourne_map)
# Add control to toggle between model layers
folium.LayerControl(position='bottomright').add_to(melbourne_map)
# Finally, add legend
color_scale.caption = "Risk Score"
melbourne_map.add_child(color_scale)
# Save map as separate html file
melbourne_map.save('melbourne_risk_map.html')
| [
"daniel.elliott.lewis@gmail.com"
] | daniel.elliott.lewis@gmail.com |
e9cd8a69e1305b9f4611f65499b9ed391fe008cc | f9869c7df81c12bd9c06fe7541c08875cea4b561 | /modelo2/doctor.py | 1a1941ee37f5eeda4856973c17f9f3074944c2a2 | [] | no_license | JoeSantamariaParedes/Trabajo-4---Modelamiento-UML- | 5230e4372826e4413f9c9232ccec6dcddc4e3776 | e1bc8be0a0fa7cf83a963b62eaf8ee107d01e830 | refs/heads/master | 2020-11-26T17:03:45.722533 | 2019-12-19T22:54:03 | 2019-12-19T22:54:03 | 229,149,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | class Doctor():
def __init__(self,nombre,bata,sexo,servicio,sueldo):
self.nombre=nombre
self.bata=bata
self.sexo=sexo
self.servicio=servicio
self.sueldo=sueldo
def curar(self):
return "el doctor "+self.nombre+" lleva una bata de talla "+self.bata.getTalla()
def getBata(self):
return self.bata
def setBata(self,bata):
self.bata=bata
def getNombre(self):
return self.nombre
def setNombre(self,nombre):
self.nombre=nombre
| [
"jsantamariap@unprg.edu.pe"
] | jsantamariap@unprg.edu.pe |
ba6f02e70e1a01a7f356e9a3c185e2cf3e8c12db | fa92171d92256022887c3460ed12b6124b42e21c | /doc/sphinxext/google_analytics.py | 219831dae12922be2ce0c549122b50445697b790 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | SimonBussy/tick | 667e5fe2fa8c9e4abfad69446591ba624a1589f1 | ea27359ed16f434adbc5a27549901c43953c4f7b | refs/heads/master | 2022-01-23T03:13:01.686981 | 2019-06-04T12:45:51 | 2019-06-04T12:45:51 | 108,631,034 | 0 | 0 | BSD-3-Clause | 2019-04-08T12:32:36 | 2017-10-28T08:02:11 | Python | UTF-8 | Python | false | false | 1,523 | py | """Add google analytics to sphinx documentation
imported from sphinxcontrib google analytics package
https://bitbucket.org/birkenfeld/sphinx-contrib/src/e758073384efd1ed5ed1e6286301b7bef71b27cf/googleanalytics/
"""
from sphinx.application import ExtensionError
def add_ga_javascript(app, pagename, templatename, context, doctree):
if not app.config.googleanalytics_enabled:
return
metatags = context.get('metatags', '')
metatags += """<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', '%s']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>""" % app.config.googleanalytics_id
context['metatags'] = metatags
def check_config(app):
if not app.config.googleanalytics_id:
raise ExtensionError("'googleanalytics_id' config value must be set "
"for ga statistics to function properly.")
def setup(app):
app.add_config_value('googleanalytics_id', '', 'html')
app.add_config_value('googleanalytics_enabled', True, 'html')
app.connect('html-page-context', add_ga_javascript)
app.connect('builder-inited', check_config)
return {'version': '0.1'}
| [
"mbompr@gmail.com"
] | mbompr@gmail.com |
19f990b8c95333d54d23e25a16d628b058b36667 | 853fc7cfb1d41d2f494e6bafd551ccc62e070c1f | /writelog_ora.py | 03b5a7e60805fad2ba98e70a1752a48be71eb7f6 | [] | no_license | AlbertoFro/mongodb | 923a62e4255163a8ff22fcffe42f44e74dfe93a5 | d72aaa8c3c33592515f49008d0448468363a61ee | refs/heads/master | 2021-01-02T22:51:54.267084 | 2015-12-09T15:07:44 | 2015-12-09T15:07:44 | 28,186,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,948 | py | '''
#-------------------------------------------------------------------------------
# Name: writelog mongodb
# Purpose:
#
# Author: alberto.frosi
#
# Created: 16/12/2014
# Copyright: (c) alberto.frosi 2014
# Licence: Use of this source code is governed by a BSD-style
#-------------------------------------------------------------------------------
#!/usr/bin/env python
'''
import re
from datetime import datetime
from subprocess import Popen, PIPE, STDOUT
from pymongo import Connection
from pymongo.errors import CollectionInvalid
import pymongo
HOST = 'AF-HP'
LOG_PATH = '/home/alberto/Documenti/Win7SP1/oracle_alert.txt'
DB_NAME = 'mydb'
COLLECTION_NAME = 'jasperok'
MAX_COLLECTION_SIZE = 5 # in megabytes
#today = datetime.date.today()
def main():
# open remote log file
cmd = 'tail -n 1000000 -f /home/alberto/Documenti/Win7SP1/oracle_alert.txt'
p = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT)
while True:
line = p.stdout.readline()
loglinetxt = parse_line(line)
def parse_line(line):
# m = re.search('controls', line)
# ('(?<=abc)def', 'abcdef')
m = re.search('(?<=ORA-)', line)
if m:
print("found a match!")
#print(today)
print line.split(" ")
# a,b = line.split(": ")
ora = line.split(" ")
a,b,= ora[0],ora[1]
print a
print b
DB_NAME = 'mydb'
COLLECTION_NAME = 'jasperok'
mongo_conn = Connection()
mongo_db = mongo_conn[DB_NAME]
mongo_coll = mongo_db[COLLECTION_NAME]
new_posts = [{"loglinetxt": line,
"host": HOST,
"logpath": LOG_PATH,
"collection_name":COLLECTION_NAME,
"ORA": a,
"DESC": b}]
mongo_coll.insert(new_posts)
else:
print(" No found a match!")
if __name__ == '__main__':
main()
| [
"alberto.frosi@gmail.com"
] | alberto.frosi@gmail.com |
73a79c0a0c1c5e9d6c2fee8835e043ee5cd436e9 | c534ad195ac97575b84945e5cecd1835f869b7fb | /graph.py | 60642a6dc93bc4eca45dc37c1d61f685726c4b3f | [] | no_license | GilTeixeira/del | a8174253a54fc2f8fd558096f9cbf50dbbef69d8 | 987f8a7888e7cdc588533f95a029cd9656333472 | refs/heads/master | 2022-10-10T23:58:22.132595 | 2020-06-07T15:36:14 | 2020-06-07T15:36:14 | 266,888,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | def fibonacci(n):
a = 0
b = 1
if n < 0:
print("Invalid Input")
elif n == 0:
return a
elif n == 1:
return b
else:
for i in range(2,n):
c = a + b
a = b
b = c
return b | [
"noreply@github.com"
] | noreply@github.com |
239e3bfb27478f5c1eba4bf0fee205ff315931a8 | d17c3478940c0fbb4d670a87a6b8725314529a63 | /대화상자2.py | 68ba35d981f3be6579b79c7cc02c6011365db815 | [] | no_license | whchoi78/python | 60e9b347bab81b7c8fd8e1328443bd6fd3653b29 | d12929bfb2d55cd85c2fea55c2d64f9374839107 | refs/heads/master | 2023-04-06T12:52:08.814720 | 2021-03-29T23:38:24 | 2021-03-29T23:38:24 | 329,628,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | from tkinter import*
from tkinter.filedialog import *
window = Tk()
window.geometry("700x500")
window.title("대화상자 테스트")
label = Label(window, text="test")
label.pack()
save = asksaveasfile(parent=window, mode="w", defaultextension=".jpg", filetype=(("GIF파일", "*.gif"), ("모든파일","*.*")))
label.configure(text=save)
save.close()
window.mainloop() | [
"caliper78@naver.com"
] | caliper78@naver.com |
7c36ac1c024cf960649d2e0a49ddbbd0087fdc2f | a849caca4cc7b66bb3ca93552da873c1415f435d | /Lab Exercise 1.6.2020/fermi.py | 0fb3f6a5b5b8d5291e9c7c3a08e24662cec98290 | [] | no_license | nmessa/Python | 5215b957dc73ece422a0f4cc65752c387a437d34 | 1a32ca1f59aa5a3f89453b6e42d4336e6e8fb961 | refs/heads/master | 2021-07-11T04:45:08.222102 | 2020-09-17T17:32:07 | 2020-09-17T17:32:07 | 199,273,131 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,182 | py | ## Game of Fermi Version 0.5
## Author:
## Date: 1/6/2020
## The goal of the game is for the player to guess the digits in
## the three positions in the least number of tries. For each guess,
## the player provides three digits for position 1, 2, and 3.
## The program replies with a hint consisting of Fermi, Pico, and Nano.
## If the digit guess for a given position is correct, then the reply is Fermi.
## If the digit guessed for a given position is in a different position, then
## the reply is Pico. If the digit guessed for a given position does not match
## any of the three digits, then the reply is Nano.
from random import *
#Create variables
numbers = [1,2,3,4,5,6,7,8,9]
again = True
while again:
win = False
#Build the secret number of 3 unique numbers from 1 to 9
secret = []
while len(secret) < 3:
temp = choice(numbers)
if temp not in secret:
secret.append(temp)
numGuesses = 0 #keep track of numbers guessed
#Play a round
while not win:
#initialize counter and phrases list
count = 0
phrases = []
#Get number guess from user
temp = input("Enter 3 numbers (1 - 9)seperated by spaces: ").split()
#Build a list that represents the number guessed
#Add code here
#update number of guesses
#Add code here
#Algorithm to test number and generate 3 phrases
#Add code here
#Print the result of algorithm execution
for p in phrases:
print(p, end = ' ')
print()
#Check to see if you won
if phrases.count('Fermi') == 3: #this means you won
print('You won in', numGuesses, 'guesses')
win = True
answer = input("Play again (y/n)? ")
if answer == 'n':
again = False
## Sample Output
## Enter 3 numbers (1 - 9): 6 3 5
## Nano Pico Nano
## Enter 3 numbers (1 - 9): 3 4 2
## Pico Pico Nano
## Enter 3 numbers (1 - 9): 4 3 7
## Fermi Pico Nano
## Enter 3 numbers (1 - 9): 4 8 3
## Fermi Fermi Fermi
## You won in 4 guesses
| [
"noreply@github.com"
] | noreply@github.com |
e64dc1af108b5ff6281dda5da8187a24124564d2 | 2ca6b3a82d30d67d609091e723dcf031f9fb7907 | /rents/models.py | 2ea787d96dd5f23ffd826f836f54b01565260a8a | [] | no_license | EKCodeDevAcc/finalproject | 81822ba531937a0cefe79d4d138bc4ddaa07f1e4 | 8314bb1485e3b3643025c15885b1ae9b8c0ffeff | refs/heads/master | 2020-03-25T05:14:30.343941 | 2018-08-09T20:37:36 | 2018-08-09T20:37:36 | 143,436,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,821 | py | from django.db import models
from django.conf import settings
# Create your models here.
class Location(models.Model):
location_name = models.CharField(max_length=64)
location_address = models.CharField(max_length=64)
def __str__(self):
return f"{self.location_name} {self.location_address}"
class Car(models.Model):
car_brand = models.CharField(max_length=64)
car_name = models.CharField(max_length=64)
car_type = models.CharField(max_length=64)
car_price = models.FloatField()
car_size = models.IntegerField()
car_detail = models.CharField(max_length=64)
car_status = models.CharField(max_length=64)
car_location = models.ForeignKey(Location, on_delete=models.CASCADE)
def __str__(self):
return f"{self.car_brand} {self.car_name} {self.car_type} {self.car_price} {self.car_size} {self.car_detail}"
class Reservation(models.Model):
reservation_user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
reservation_car = models.ForeignKey(Car, on_delete=models.CASCADE)
reservation_start_date = models.DateTimeField()
reservation_end_date = models.DateTimeField()
reservation_drop_off = models.ForeignKey(Location, on_delete=models.CASCADE)
reservation_protection = models.CharField(max_length=64)
reservation_total_price = models.FloatField()
reservation_status = models.CharField(max_length=64)
reservation_request = models.CharField(max_length=64)
def __str__(self):
return f"{self.reservation_user} {self.reservation_car} {self.reservation_protection} {self.reservation_total_price} {self.reservation_status} {self.reservation_status}"
class ReservedDate(models.Model):
reserved_date_car = models.ForeignKey(Car, on_delete=models.CASCADE)
reserved_date_reservation = models.ForeignKey(Reservation, on_delete=models.CASCADE)
reserved_date_start_date = models.DateTimeField()
reserved_date_end_date = models.DateTimeField()
def __str__(self):
return f"{self.reserved_date_car} {self.reserved_date_start_date} {self.reserved_date_end_date}"
class Request(models.Model):
request_reservation = models.ForeignKey(Reservation, on_delete=models.CASCADE)
request_reserved_date = models.ForeignKey(ReservedDate, on_delete=models.CASCADE)
request_start_date = models.DateTimeField(blank=True, null=True)
request_end_date = models.DateTimeField(blank=True, null=True)
request_drop_off = models.ForeignKey(Location, on_delete=models.CASCADE)
request_status = models.CharField(max_length=64)
request_approval = models.CharField(max_length=64)
def __str__(self):
return f"{self.request_reservation} {self.request_start_date} {self.request_end_date} {self.request_drop_off} {self.request_status} {self.request_approval}" | [
"edkang59@gmail.com"
] | edkang59@gmail.com |
59823f407dda26caab3487a38fd09aa7ffb051bb | 69aae40fbe697dc010b57f9ddcea300da3509824 | /lab2_7.py | 7a84737bc452cfa97485468a3c94f3d1a4e8cd3c | [] | no_license | eunseo5355/python_school | ad34336d37490169c4ff2a760f88e01697c9f083 | 695e070e915f4b312691cfb4ec12c26531b0b2ce | refs/heads/master | 2022-03-24T00:28:58.069406 | 2019-11-27T15:55:09 | 2019-11-27T15:55:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | # 이율, 기간, 원금을 입력받아서, 원리금 합계를 출력하라.
a = float(input("연이율:"))
b = int(input("기간(년):"))
c = int(input("원금(원):"))
print("원리금:", c*(1+a)**b) | [
"55755686+eunseo5355@users.noreply.github.com"
] | 55755686+eunseo5355@users.noreply.github.com |
7b9e3549a6ea1e1d8e6f67ce84745f3d7cd3ee9d | 28026dec7c84107d04a275ba8a4ef161d70fbd70 | /lightweighttranscribe.py | d256c074739e8434ec7faccc01159175b226844b | [] | no_license | krypted/lightweighttranscription | 7345386b2e2e60d7c41b38bf4790b38dbc5ace8d | f404cce6ae4f3c6244e539ba4c02dd2659d20fb0 | refs/heads/master | 2022-11-28T00:27:19.599481 | 2020-07-28T15:17:17 | 2020-07-28T15:17:17 | 283,249,724 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,478 | py | import sys
import ast
import uuid
import time
import boto3
import textwrap
import logging
from tqdm import tqdm
logging.getLogger().setLevel(logging.INFO)
output_file_name = str(sys.argv[-1])
input_file_name = sys.argv[1]
bucket_name = input('Enter the name of s3 bucket:') or 'krypted'
s3 = boto3.resource('s3')
client = boto3.client('transcribe')
file_identifier = str(uuid.uuid4())
def create_resources(bucket_name):
if s3.Bucket(bucket_name).creation_date is None:
s3.create_bucket(Bucket=bucket_name)
logging.info('Waiting for {} bucket creation...'.format(bucket_name))
time.sleep(5)
logging.info('Bucket {} created.'.format(bucket_name))
logging.info('Uploading {} file...'.format(input_file_name))
s3.Bucket(bucket_name).upload_file(input_file_name, 'input/{}'.format(input_file_name+'-'+file_identifier))
def convert_speech_to_text(job_name):
logging.info('Creating job defination.')
client.start_transcription_job(
TranscriptionJobName=job_name,
LanguageCode='en-US',
MediaFormat=input_file_name.split('.')[1],
Media={'MediaFileUri':'s3://{}/input/{}'.format(bucket_name, job_name)},
OutputBucketName=bucket_name
)
while True:
status = client.get_transcription_job(TranscriptionJobName=job_name)
if status['TranscriptionJob']['TranscriptionJobStatus'] in ['COMPLETED', 'FAILED']:
break
for _ in tqdm(range(100)):
time.sleep(.7)
logging.info('Job Ready and starting conversion process.')
if status['TranscriptionJob']['TranscriptionJobStatus'] == 'COMPLETED':
obj = s3.Object(bucket_name, job_name+'.json')
text = ast.literal_eval(obj.get()['Body'].read().decode('UTF-8'))['results']['transcripts'][0]['transcript']
return text
return
def write_to_file(speech_text):
with open(output_file_name, 'wt') as f:
logging.info('Writing data to {} file.'. format(output_file_name))
f.write(textwrap.fill(speech_text, width=130))
logging.info('Saving data to file.')
def resource_cleanup(job_name):
logging.info('Cleaning-up Resources.')
client.delete_transcription_job(TranscriptionJobName=job_name)
logging.info('Process complete.')
job_name = input_file_name+'-'+file_identifier
create_resources(bucket_name)
speech_text = convert_speech_to_text(input_file_name+'-'+file_identifier)
write_to_file(speech_text)
resource_cleanup(job_name)
| [
"noreply@github.com"
] | noreply@github.com |
21fcabff5a0b2ddbb8b5f1f51c925095120b9ba9 | bc4c6c63b7c4b652c102d9729b897e64fbddc826 | /setup.py | e085f7737ee0fbbc52dd9cf33e9420065627f549 | [
"MIT"
] | permissive | jgoodell/messenger | 442b3cbf26c43ec1b0fee19cdfb1d00dc44a4c6b | e2789121b0ee42629de2ec0630b0eb7cc826804b | refs/heads/master | 2021-01-22T02:52:50.994108 | 2014-09-11T19:21:38 | 2014-09-11T19:21:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | import os
import shutil
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# Move the files to a location where setuptools is not confused
try:
os.mkdir('package')
except OSError:
os.remove('package/models.py')
os.remove('package/views.py')
os.remove('package/tests.py')
os.remove('package/__init__.py')
os.rmdir('package')
os.mkdir('package')
shutil.copy2('models.py', 'package/models.py')
shutil.copy2('views.py', 'package/views.py')
shutil.copy2('tests.py', 'package/tests.py')
shutil.copy2('__init__.py', 'package/__init__.py')
#os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='messenger',
version='v0.1.0',
packages=['package'],
include_package_data=True,
license='MIT License',
description='A simple app, part of a Hudson Bay project.',
long_description=README,
url='',
author='Jason Goodell',
author_email='jason.goodell@harrison.edu',
)
| [
"jason.goodell@harrison.edu"
] | jason.goodell@harrison.edu |
80d5b183ea338606cf31e81d66bb09688ab48bc1 | 868dc7389792d77824a9bbe32afcee2a7188d8bf | /SportcenterApp/migrations/0006_auto_20180723_1411.py | dae16038e590c43e582dca297c3ee4b82ba81e9f | [] | no_license | Mobencl/Internship | cc00fc9e29b311de70299a9063cdd309a88c5f3c | f7e40c3784a0e2b0a52031ac320cef0324ba8e20 | refs/heads/master | 2020-03-23T22:58:37.720558 | 2018-07-24T20:58:59 | 2018-07-24T20:58:59 | 142,076,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-07-23 18:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('SportcenterApp', '0005_remove_terrain_totalterrains'),
]
operations = [
migrations.RenameField(
model_name='terrain',
old_name='Capacity',
new_name='maximumCapacity',
),
migrations.AddField(
model_name='terrain',
name='minimumCapacity',
field=models.IntegerField(default=0),
),
]
| [
"noreply@github.com"
] | noreply@github.com |
f35949841d7f53542b0440e85b402c3e15a4c2c8 | 1a1d872fd04050587a28f4da6b0062640afd99a4 | /aladin/url.py | 8b056d009b530bab15d9f20cdec22103fb231629 | [] | no_license | kashvats/ecom | d8f0fe1ff6ffc38e300db3ab8b66bcde010a3269 | a92d7f4d1c410e5313ba11f4c3736460b4cdb91f | refs/heads/master | 2023-08-24T17:31:45.414888 | 2021-10-05T18:16:25 | 2021-10-05T18:16:25 | 413,929,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,155 | py | from . import views
from django.urls import path
urlpatterns = [
# path('curt', views.curtaclan,name='curta'),
path('login/', views.log_in,name='login'),
path('logout/', views.log_out,name='logout'),
path('register/', views.register,name='register'),
path('category', views.categoryview,name='category'),
path('', views.homeview,name='product'),
path('product/', views.productview,name='product_d'),
path('product/<str:name>', views.detailview,name='product_details'),
path('category/<str:name>', views.catprod,name='category_details'),
path('cart/', views.cartsview,name='cart'),
path('cart/<str:name>', views.carts_item, name='cart_add'),
path('cart_edit/<str:name>', views.cartsedit,name='cart_edit'),
path('cart_delete/<str:name>', views.cartsdelete,name='cart_delete'),
path('try/<str:name>', views.producttryview,name='tried'),
path('send/<str:name>', views.send_try,name='send'),
path('try/', views.triedview, name='try'),
path('payment', views.payme,name='payment'),
path('search/', views.search,name='search'),
path('checkout/', views.checkoutview,name='checkout'),
]
| [
"vatsakash168@gmail.com"
] | vatsakash168@gmail.com |
4b8151db96044034be066029bc33c2d661687670 | 87cabec126acb99572ed4832f6c9a7c55309417a | /simple/wishful_simple_controller | bf82b7707bee137b9f9add8fa6dee5457b2ae7c3 | [] | no_license | ECOAP/examples | a996a9762ffe429b1f7faba3ac0adfb0e93bac97 | 8f67b9334c7c21f7cd1443afd54c4c12ee260be5 | refs/heads/master | 2021-05-02T11:23:54.175327 | 2018-05-23T15:53:04 | 2018-05-23T15:53:04 | 120,776,617 | 0 | 1 | null | 2018-03-07T12:05:07 | 2018-02-08T15:06:32 | Python | UTF-8 | Python | false | false | 4,683 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
wishful_controller_simple.py: First implementation of WiSHFUL controller
Usage:
wishful_controller_simple.py [options] [-q | -v]
Options:
--logfile name Name of the logfile
--config configFile Config file path
Example:
./wishful_simple_local_controller -v --config ./config.yaml
Other options:
-h, --help show this help message and exit
-q, --quiet print less text
-v, --verbose print more text
--version show version and exit
"""
import sys
import datetime
import logging
import wishful_controller
import gevent
import yaml
import wishful_upis as upis
__author__ = "Piotr Gawlowicz, Mikolaj Chwalisz"
__copyright__ = "Copyright (c) 2015, Technische Universität Berlin"
__version__ = "0.1.0"
__email__ = "{gawlowicz, chwalisz}@tkn.tu-berlin.de"
log = logging.getLogger('wishful_agent.main')
controller = wishful_controller.Controller()
nodes = []
@controller.new_node_callback()
def new_node(node):
nodes.append(node)
print("New node appeared:")
print(node)
@controller.node_exit_callback()
def node_exit(node, reason):
if node in nodes:
nodes.remove(node);
print("NodeExit : NodeID : {} Reason : {}".format(node.id, reason))
@controller.set_default_callback()
def default_callback(group, node, cmd, data):
print("{} DEFAULT CALLBACK : Group: {}, NodeName: {}, Cmd: {}, Returns: {}".format(datetime.datetime.now(), group, node.name, cmd, data))
@controller.add_callback(upis.radio.set_channel)
def set_channel_reponse(group, node, data):
print("{} set_channel_reponse : Group:{}, NodeId:{}, msg:{}".format(datetime.datetime.now(), group, node.id, data))
controller.delay(3).node(node.id).net.create_packetflow_sink(port=1234)
@controller.add_callback(upis.radio.get_channel)
def get_channel_reponse(group, node, data):
print("{} get_channel_reponse : Group:{}, NodeId:{}, msg:{}".format(datetime.datetime.now(), group, node.id, data))
def print_response(group, node, data):
print("{} Print response : Group:{}, NodeIP:{}, Result:{}".format(datetime.datetime.now(), group, node.ip, data))
def main(args):
log.debug(args)
config_file_path = args['--config']
config = None
with open(config_file_path, 'r') as f:
config = yaml.load(f)
controller.load_config(config)
controller.start()
#control loop
while True:
gevent.sleep(10)
print("\n")
print("Connected nodes", [str(node.name) for node in nodes])
if nodes:
#execute non-blocking function immediately
controller.blocking(False).node(nodes[0]).radio.iface("wlan0").set_tx_power(12)
#execute non-blocking function immediately, with specific callback
controller.callback(print_response).node(nodes[0]).radio.iface("wlan0").get_tx_power()
#schedule non-blocking function delay
controller.delay(3).node(nodes[0]).net.create_packetflow_sink(port=1234)
#schedule non-blocking function exec time
exec_time = datetime.datetime.now() + datetime.timedelta(seconds=3)
controller.exec_time(exec_time).node(nodes[0]).radio.iface("wlan1").set_channel(channel=4)
#execute blocking function immediately
result = controller.node(nodes[0]).radio.iface("wlan1").get_channel()
print("{} Channel is: {}".format(datetime.datetime.now(), result))
#exception handling, clean_per_flow_tx_power_table implementation raises exception
try:
controller.node(nodes[0]).radio.iface("wlan1").clean_per_flow_tx_power_table()
except Exception as e:
print("{} !!!Exception!!!: {}".format(datetime.datetime.now(), e))
if __name__ == "__main__":
try:
from docopt import docopt
except:
print("""
Please install docopt using:
pip install docopt==0.6.1
For more refer to:
https://github.com/docopt/docopt
""")
raise
args = docopt(__doc__, version=__version__)
log_level = logging.INFO # default
if args['--verbose']:
log_level = logging.DEBUG
elif args['--quiet']:
log_level = logging.ERROR
logfile = None
if args['--logfile']:
logfile = args['--logfile']
logging.basicConfig(filename=logfile, level=log_level,
format='%(asctime)s - %(name)s.%(funcName)s() - %(levelname)s - %(message)s')
try:
main(args)
except KeyboardInterrupt:
log.debug("Controller exits")
finally:
log.debug("Exit")
controller.stop()
| [
"gawlowicz@tu-berlin.de"
] | gawlowicz@tu-berlin.de | |
ef31a019c6a45e981d10734a870eb4e44043c0d3 | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /pkgs/bokeh-0.11.1-py27_0/lib/python2.7/site-packages/bokeh/command/subcommands/tests/test_info.py | 42f6e6e775b0cc0b11df05470c21ff00bfa6d4cd | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 1,238 | py | from __future__ import absolute_import
import bokeh.command.subcommands.info as scinfo
from bokeh.command.bootstrap import main
def test_create():
import argparse
from bokeh.command.subcommand import Subcommand
obj = scinfo.Info(parser=argparse.ArgumentParser())
assert isinstance(obj, Subcommand)
def test_name():
assert scinfo.Info.name == "info"
def test_help():
assert scinfo.Info.help == "print information about Bokeh and Bokeh server configuration"
def test_args():
assert scinfo.Info.args == (
('--static', dict(
action='store_true',
help="Print the locations of BokehJS static files",
)),
)
def test_run(capsys):
main(["bokeh", "info"])
out, err = capsys.readouterr()
lines = out.split("\n")
assert len(lines) == 5
assert lines[0].startswith("Python version")
assert lines[1].startswith("IPython version")
assert lines[2].startswith("Bokeh version")
assert lines[3].startswith("BokehJS static")
assert lines[4] == ""
assert err == ""
def test_run_static(capsys):
main(["bokeh", "info", "--static"])
out, err = capsys.readouterr()
assert err == ""
assert out.endswith('/bokeh/server/static\n')
| [
"wgyumg@mgail.com"
] | wgyumg@mgail.com |
41f649e92b5eca74f9ec9f8b7e3b850ee74739a1 | 6703ccb01ec7ac9d3a1e8528d1a66f12ffb2a180 | /KennywoodEnv/bin/sqlformat | ccc94c4728753b77ba5b619e8ce13c3d3559a593 | [] | no_license | mister-michael/kennywoodapi | a36c4b7a811eb4fad857cc0d01fa5df5eef98c0b | c442411dc9da5a95eaf59ec1a687dff4358d3016 | refs/heads/master | 2023-08-23T15:22:58.728621 | 2020-05-19T20:33:00 | 2020-05-19T20:33:00 | 265,338,133 | 0 | 0 | null | 2021-09-22T19:03:19 | 2020-05-19T19:02:25 | Python | UTF-8 | Python | false | false | 278 | #!/Users/d.a.n.k.e./workspace/backEnd/book4/kennywood/KennywoodEnv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"michaelclarknashville@protonmail.com"
] | michaelclarknashville@protonmail.com | |
7769a729fa30e2520aea537678665b9927d2a569 | 130321044c6a0633dafdd8af0dce04df4c261bb0 | /get_data/split_json_list.py | 1dc2e16893c9e4db66ed89253aa6bc5e32d3341a | [] | no_license | etdej/twitter_ideal_point_estimation | d4fcf18d40ec67121b55ca5f0bcaa36c5e54bb59 | 72558a50a80d389e1a77da2525ce055acc941c3e | refs/heads/master | 2021-08-30T05:18:10.092049 | 2017-12-16T05:14:43 | 2017-12-16T05:14:43 | 105,032,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | import math
import utils
import random
# number of splits
N = 10
#file to split
fin = 'fr_elites_handles.json'
twitters = utils.json_load(fin)
random.shuffle(twitters)
M = len(twitters)
k = math.ceil(M/N)
separated = [twitters[i:i+k] for i in range(0, M, k)]
fname, extension = fin.split('.')
for i, accnts in enumerate(separated):
fout = "{}_{}.{}".format(fname, str(i+1), extension)
utils.json_dump(accnts, fout) | [
"d.mesquita285@gmail.com"
] | d.mesquita285@gmail.com |
4dab070f449410287de403e2ad634405becb5e22 | f6d52b8c660756e07763ffa88e07e6c02423c10f | /Python/interview/structure/stack.py | 136d905b5b9b5f893bb8332475ebdb27b744d770 | [
"MIT"
] | permissive | lxy1992/LeetCode | 4cf12eed77baed4b04755389fdb5e1133d994a07 | 9486d8611b169bca0cadc6cd83bbc12b0fa4f669 | refs/heads/master | 2021-01-13T17:25:08.145893 | 2017-06-06T09:45:09 | 2017-06-06T09:45:09 | 81,793,899 | 0 | 0 | null | 2017-02-13T06:47:56 | 2017-02-13T06:47:56 | null | UTF-8 | Python | false | false | 1,321 | py | """所有操作均为O(1)"""
import Empty
class ArrayStack:
"""LIFO Stack implementation using a Python list as underlying storage."""
def __init__(self):
"""Create an empty stack."""
self._data = [] # nonpublic list instance
def __len__(self):
"""Return the number of elements in the stack."""
return len(self._data)
def is_empty(self):
"""Return True if the stack is empty."""
return len(self._data) == 0
def push(self, e):
"""Add element e to the top of the stack."""
self._data.append(e) # new item stored at end of list
def top(self):
"""Return (but do not remove) the element at the top of the stack.
Raise Empty exception if the stack is empty."""
if self.is_empty():
raise Empty('Stack is empty')
return self._data[-1] # the last item in the list
def pop(self):
"""Remove and return the element from the top of the stack (i.e., LIFO).
Raise Empty exception if the stack is empty"""
if self.is_empty():
raise Empty("Stack is empty")
return self._data.pop() # remove last item from list
if __name__ == '__main__':
S = ArrayStack()
S.push(5)
S.push(3)
print (len(S))
print S.pop()
print S.is_empty() | [
"lxy@lvxinyandeMacBook-Pro.local"
] | lxy@lvxinyandeMacBook-Pro.local |
ca4b7e3b02e3f9d8bd800d4002d8a1a7aaa44271 | 0b7add5d8583ba3bb02faf4fd5c356fd578f2fcc | /compileProtobuf/dstPb/RightInputProto_pb2.py | 6c7f725c3da07982fafe4f3b3735e4d2df9ca053 | [] | no_license | cappuccino213/IMCIS2Performance | 281f052f1a5dddb4956b3e7127781d2395c07e04 | 74528e0606f78459f6f3bfcf38d4fdf176a36f90 | refs/heads/master | 2023-03-27T20:44:57.266345 | 2021-03-29T07:56:56 | 2021-03-29T07:56:56 | 352,560,398 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | true | 3,095 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: RightInputProto.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='RightInputProto.proto',
package='',
syntax='proto3',
serialized_options=b'\252\002\037TomTaw.eWordIMCIS.WebAPI.Models',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x15RightInputProto.proto\"K\n\x0fRightInputProto\x12\x0f\n\x07roleUID\x18\x01 \x01(\t\x12\x0f\n\x07userUID\x18\x02 \x01(\t\x12\x16\n\x0eisSuperManager\x18\x03 \x01(\tB\"\xaa\x02\x1fTomTaw.eWordIMCIS.WebAPI.Modelsb\x06proto3'
)
_RIGHTINPUTPROTO = _descriptor.Descriptor(
name='RightInputProto',
full_name='RightInputProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='roleUID', full_name='RightInputProto.roleUID', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='userUID', full_name='RightInputProto.userUID', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='isSuperManager', full_name='RightInputProto.isSuperManager', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=25,
serialized_end=100,
)
DESCRIPTOR.message_types_by_name['RightInputProto'] = _RIGHTINPUTPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RightInputProto = _reflection.GeneratedProtocolMessageType('RightInputProto', (_message.Message,), {
'DESCRIPTOR' : _RIGHTINPUTPROTO,
'__module__' : 'RightInputProto_pb2'
# @@protoc_insertion_point(class_scope:RightInputProto)
})
_sym_db.RegisterMessage(RightInputProto)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"yeahcheung213@163.com"
] | yeahcheung213@163.com |
af2e54884f3e5f2acd073a099712200b9e6d0ae3 | 47d1d1b09594d1d22137fe188825a7bee58eae77 | /MasterEquipment/TradeGoods.py | 01c2c947af72e268b6b630833440fe53a9072441 | [] | no_license | CattMoker/Discord-Diamond-Dog | 554feb7072991779db337e6d366dd1f8635b3085 | 45b6a01f49acb2b19171ddddf8425c8928979332 | refs/heads/master | 2020-03-30T03:01:20.478250 | 2018-05-07T21:54:27 | 2018-05-07T21:54:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | from MasterEquipment.Equipment import Equipment as Equipment
class TradeGoods(Equipment):
goods = ""
def __init__(self, inCategory, inCost, inWeight, inGoods):
super().__init__(inCategory, inCost, inWeight)
self.goods = inGoods
def getGoods(self):
return self.goods
def setGoods(self, inGoods):
self.goods = inGoods
def toString(self):
super().toString()
print("Goods: " + self.getGoods())
def botMessage(self):
return "Cost: " + self.getCost() + " Goods: " + self.getGoods() | [
"alcoholicorn@gmail.com"
] | alcoholicorn@gmail.com |
5268c2e9a82702cd0d3934e974fa03bb8519fc99 | 2c599a057c064f6e2b6e3b6795551dc3c6e931df | /main.py | 4f040bb5669bd42e086e8a602f1c8b9a7f7fa0ca | [] | no_license | SomeJavaGuy/mintworthy.art | 8bd58776b38c8b66e8d68c66919cdd40d06ba0a0 | c5293a798946383fce428aab201f4d33d5ae0573 | refs/heads/main | 2023-03-24T04:21:29.802308 | 2021-03-21T22:05:25 | 2021-03-21T22:15:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,460 | py | from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from random import randint
from glitch_this import ImageGlitcher
import utils
import os
import uuid
from web3.auto.infura import w3
glitcher = ImageGlitcher()
assets_dir = "assets/"
extension = ".png"
# construct_path constructs the path for the assets.
# Reads the number of files on the given layer
# and uses one with equal probability
def construct_path(layer):
path, dirs, files = next(os.walk(assets_dir + "L" + str(layer)))
file_count = len(files)
generated_path = assets_dir + "L" + str(layer) + "/" + "l" + str(
layer) + "b" + str(randint(1, file_count)) + extension
return generated_path
# make_opaque takes an image and makes it opaque by the given factor
# e.g to make an image 20% opaque apply a factor of 0.2
def make_opaque(on_image, factor):
return texture.putalpha(int(256 * factor))
# resize an image by a factor
# e.g to make it 40% of the orinal apply a factor of 0.4
def scale_image(on_image, factor):
new_size = (int(on_image.size[0] * factor), int(on_image.size[1] * factor))
return on_image.resize(new_size)
# rnd_coordinates returns random coordinates based on the randint func
def rnd_coordinates():
# edge factor regulates how close the layer is applied to the border
edge_factor = 0.8
x = randint(0, int(bg.size[0] * edge_factor))
y = randint(0, int(bg.size[1] * edge_factor))
return [x, y]
# draw_words writes a word given the string to write,
# the layer to write it on, the size of the font and
# the coordinates
def draw_words(str_to_draw, layer, size, color, x, y):
draw = ImageDraw.Draw(layer)
font = ImageFont.truetype("FredokaOne-Regular.ttf", size)
draw.text((x, y), str_to_draw, "#" + color, font=font)
# draw_multiline draws a string and respects the newlines
def draw_multiline(str_to_draw, layer, size, x, y):
draw = ImageDraw.Draw(layer)
font = ImageFont.truetype("FredokaOne-Regular.ttf", size)
draw.multiline_text((x, y),
str_to_draw,
font=font,
fill=(256, 256, 256),
spacing=100)
# draw_circle draws a circle on the given layer
def draw_circle(layer, x, y):
draw = ImageDraw.Draw(layer)
circle_radius_factor = 0.6
draw.pieslice([(x, y),
(x + layer.size[0] * circle_radius_factor,
y + layer.size[0] * circle_radius_factor)],
0,
360,
fill='#' + colors[randint(0,
len(colors) - 1)])
# place_coord creates the coordinates needed to
# place an in image on the desired position
# needs some experimentation. See examples below.
# x,y grow from up to bottom
# if you want to place an item on the bottom right corner
# the percentages would be ~ 0.70, 0.90
def place_coord(on_layer, x_perc, y_perc):
x = int(on_layer.size[0] * x_perc)
y = int(on_layer.size[1] * y_perc)
return [x, y]
# get latest block hash as string
# connected to infura
def get_block_hash():
block = w3.eth.get_block('latest')
hash = block.hash.hex()
print("generated on ⏱ : " + hash)
return hash
bg = Image.open(construct_path(1))
insect = Image.open(construct_path(2))
plant = Image.open(construct_path(5))
signature = Image.open("assets/signature.png")
logo = Image.open("assets/logo.png")
# Create image to be used as layer0 to become the borders of the final canvas
border = Image.new('RGB', (int(bg.size[0] * 1.1), int(bg.size[1] * 1.1)),
color='white')
colors = [
'a1cae2', 'c2b092', 'cfc5a5', 'eae3c8', 'ffaec0', 'ffd384', 'ffab73',
'e4bad4', 'f6dfeb', 'caf7e3'
]
colored_blank_image = Image.new('RGB', (int(bg.size[0]), int(bg.size[1])),
color='#' + colors[randint(0,
len(colors) - 1)])
bg = colored_blank_image
x_circle, y_circle = place_coord(bg, 0.2, 0.2)
draw_circle(bg, x_circle, y_circle)
bg.paste(insect, (rnd_coordinates()), insect.convert("RGBA"))
insect = Image.open(construct_path(2))
bg.paste(insect, (rnd_coordinates()), insect.convert("RGBA"))
bg = glitcher.glitch_image(bg, 8, color_offset=True)
bg.paste(plant, (rnd_coordinates()), plant.convert("RGBA"))
# draw .word
words = [
'.visualise', '.imagine', '.think', '.empower', '.explore', '.engage',
'.motivate', '.inspire', '.aspire', '.belong', '.accept', '.defy',
'.exist', '.transcend', '.catalyse', '.overcome', '.dismantle',
'.meditate', '.reflect', '.express'
]
x, y = place_coord(bg, 0.05, 0.9)
draw_words(words[randint(0, len(words) - 1)], bg, 256, "ffffff", x, y)
# draw one letter
letters = [
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'
]
x_letter, y_letter = place_coord(bg, 0.75, 0.02)
draw_words(letters[randint(0,
len(letters) - 1)], bg, 512, "ffffff", x_letter,
y_letter)
#draw number next to letter
draw_words(str(randint(0, 100)), bg, 96, "ffffff", x_letter * 1.2,
y_letter * 1.5)
# draw multiline text
x, y = place_coord(bg, 0.2, 0.4)
phrases = [
'What\ninspires\nyou?', 'What\ninspires\nyou?',
'Am I\nusing my\ntime wisely?', 'Am I taking \nanything for\ngranted?',
'Am I employing\na healthy perspective?', 'Am I living\ntrue to myself?',
'Am I waking up\nin the morning ready\nto take on the day?',
'Am I taking\ncare of myself\nphysically?',
'Am I achieving\nthe goals that I have\nset for myself?',
'Who am I, really?'
]
x, y = place_coord(bg, 0.1, 0.1)
draw_multiline(phrases[randint(0, len(phrases) - 1)], bg, 128, x, y_letter * 2)
border.paste(bg, (int(border.size[0] * 0.045), int(border.size[1] * 0.035)),
bg.convert("RGBA"))
signature = scale_image(signature, 0.4)
logo = scale_image(logo, 0.15)
border.paste(signature, place_coord(border, 0.71, 0.93),
signature.convert("RGBA"))
border.paste(logo, place_coord(border, 0.68, 0.943), logo.convert("RGBA"))
# draw hash
x, y = place_coord(border, 0.045, 0.96)
draw_words(get_block_hash(), border, 32, "bbbbbb", x, y)
# border.show()
# reduce the generated image size. Reduce is not compress.
# Reduce touches the resolution.
# border = border.reduce(3)
border.save("generated/" + str(uuid.uuid1()) + ".png")
| [
"ktmfuji@gmail.com"
] | ktmfuji@gmail.com |
ef7250bd0abdff76776b5c47208d55fca1b57e6b | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/farm.py | 230a7720738a0d70a94d4b11e691cbfc733a27b7 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 1,923 | py | ii = [('EmerRN.py', 4), ('CookGHP3.py', 2), ('LyelCPG2.py', 1), ('MarrFDI.py', 4), ('GodwWSL2.py', 1), ('SadlMLP.py', 1), ('WilbRLW4.py', 1), ('RennJIT.py', 2), ('AubePRP2.py', 6), ('CookGHP.py', 3), ('MartHSI2.py', 22), ('LeakWTI2.py', 6), ('KembFJ1.py', 4), ('WilkJMC3.py', 5), ('WilbRLW5.py', 2), ('LeakWTI3.py', 16), ('MarrFDI3.py', 21), ('TennAP.py', 1), ('PeckJNG.py', 24), ('KnowJMM.py', 1), ('AubePRP.py', 18), ('ChalTPW2.py', 4), ('AdamWEP.py', 1), ('FitzRNS3.py', 1), ('WilbRLW2.py', 4), ('ClarGE2.py', 6), ('GellWPT2.py', 1), ('WilkJMC2.py', 7), ('CarlTFR.py', 3), ('SeniNSP.py', 47), ('CoopJBT2.py', 1), ('RoscTTI3.py', 2), ('KiddJAE.py', 1), ('AdamHMM.py', 2), ('BailJD1.py', 4), ('RoscTTI2.py', 5), ('CoolWHM.py', 3), ('ClarGE.py', 10), ('LandWPA.py', 1), ('IrviWVD.py', 2), ('LyelCPG.py', 2), ('GilmCRS.py', 3), ('DaltJMA.py', 11), ('CrocDNL.py', 10), ('MedwTAI.py', 1), ('LandWPA2.py', 3), ('WadeJEB.py', 8), ('FerrSDO2.py', 2), ('SoutRD2.py', 1), ('LeakWTI4.py', 13), ('LeakWTI.py', 15), ('MedwTAI2.py', 7), ('BachARE.py', 41), ('SoutRD.py', 1), ('WheeJPT.py', 4), ('MereHHB3.py', 9), ('HowiWRL2.py', 22), ('MereHHB.py', 42), ('WilkJMC.py', 4), ('MartHRW.py', 13), ('MackCNH.py', 6), ('WestJIT.py', 1), ('FitzRNS4.py', 13), ('CoolWHM3.py', 4), ('DequTKM.py', 1), ('BentJRP.py', 4), ('EdgeMHT.py', 3), ('FerrSDO.py', 3), ('RoscTTI.py', 3), ('ThomGLG.py', 1), ('KembFJ2.py', 4), ('LewiMJW.py', 4), ('MackCNH2.py', 6), ('JacoWHI2.py', 4), ('HaliTBC.py', 1), ('WilbRLW3.py', 5), ('AinsWRR2.py', 3), ('MereHHB2.py', 24), ('BrewDTO.py', 1), ('JacoWHI.py', 17), ('ClarGE3.py', 22), ('RogeSIP.py', 3), ('MartHRW2.py', 4), ('DibdTRL.py', 1), ('FitzRNS2.py', 14), ('HogaGMM2.py', 1), ('MartHSI.py', 28), ('EvarJSP.py', 8), ('DwigTHH.py', 1), ('SadlMLP2.py', 1), ('BowrJMM2.py', 6), ('BowrJMM3.py', 1), ('BeckWRE.py', 2), ('TaylIF.py', 1), ('WordWYR.py', 3), ('KeigTSS.py', 7), ('WaylFEP.py', 28), ('ClarGE4.py', 23)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
048c074ff0fd7901b63ef7e725035b3503c24214 | 136b315c9573cbbd6d613e45c34c6e79e98c8b25 | /corehq/apps/case_search/xpath_functions/comparison.py | 64a4f5850102399d6adbc5b5364445e3bd35c904 | [
"BSD-3-Clause"
] | permissive | soitun/commcare-hq | 088074835dd9cb1507a3d57523e5065ae4533fd8 | 5ee0ecadc08ecd1691ee0a26f8907ae0728af28e | refs/heads/master | 2023-08-23T08:06:09.694400 | 2023-05-12T10:18:24 | 2023-05-12T10:18:24 | 124,774,234 | 0 | 0 | BSD-3-Clause | 2023-05-15T22:20:21 | 2018-03-11T16:08:49 | Python | UTF-8 | Python | false | false | 1,432 | py | from django.utils.translation import gettext
from eulxml.xpath import serialize
from eulxml.xpath.ast import Step
from corehq.apps.case_search.dsl_utils import unwrap_value
from corehq.apps.case_search.exceptions import CaseFilterError
from corehq.apps.case_search.const import RANGE_OP_MAPPING, EQ, NEQ
from corehq.apps.es import filters
from corehq.apps.es.case_search import case_property_query, case_property_range_query
def property_comparison_query(context, case_property_name_raw, op, value_raw, node):
if not isinstance(case_property_name_raw, Step):
raise CaseFilterError(
gettext("We didn't understand what you were trying to do with {}").format(serialize(node)),
serialize(node)
)
case_property_name = serialize(case_property_name_raw)
value = unwrap_value(value_raw, context)
if op in [EQ, NEQ]:
query = case_property_query(case_property_name, value, fuzzy=context.fuzzy)
if op == NEQ:
query = filters.NOT(query)
return query
else:
try:
return case_property_range_query(case_property_name, **{RANGE_OP_MAPPING[op]: value})
except (TypeError, ValueError):
raise CaseFilterError(
gettext("The right hand side of a comparison must be a number or date. "
"Dates must be surrounded in quotation marks"),
serialize(node),
)
| [
"skelly@dimagi.com"
] | skelly@dimagi.com |
c989ba0f008495f905558182fddc35d7568485b4 | 3d1ec51718c98505c41a94dc26c0cd35f46482ec | /3-5-2_英翻中字典.py | 10e9e5eaa04d518cd7a09d5001dbbf5f5d4cc59e | [] | no_license | a235689741023/python-learning | 0c772ef79f29ed53def858e253625a1230cccd59 | badb3bfa66348934cd63eee1eff7d4c7a0f02422 | refs/heads/master | 2022-01-13T20:04:19.989119 | 2019-08-01T01:32:55 | 2019-08-01T01:32:55 | 198,148,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | 字典 = {'dog':'狗', 'fish':'魚', 'cat':'貓', 'pig':'豬'}
print('可檢索單字:', 字典.keys())
print('字典全部內容:', 字典)
單字 = input('請輸入要找的英文單字:')
print(字典.get(單字, '找不到該單字')) | [
"helenhan25@gmail.com"
] | helenhan25@gmail.com |
d30e940f0c1fa6242abd3278f9b7bf1d9ec24ff8 | b677b5319f76466601521d47ed91b177f4bbaffb | /dittybox/vm_param_generator.py | 468dc35c78b68cbe512fd05224dcedd776b1ef1d | [] | no_license | matelakat/ditty-box | b74e7b823cf8a75fde14e71dcc852a0d2fd401ab | 64a274feefa2d319c6ef70c78cc6acd30b27ccbc | refs/heads/master | 2021-01-01T05:38:51.916016 | 2014-07-20T20:30:29 | 2014-07-20T20:31:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | class NoMoreResources(Exception):
pass
class NameGenerator(object):
def __init__(self, prefix, first_id, last_id):
self.prefix = prefix
self.first_id = first_id
self.last_id = last_id
def new_name(self, vm_names):
for counter in range(self.first_id, self.last_id + 1):
vm_name = self.prefix + str(counter)
if vm_name not in vm_names:
return vm_name
raise NoMoreResources('No more names are available for new VM')
class FakeNameGenerator(object):
def __init__(self):
self.fake_name = None
self.vm_names = []
def new_name(self, vm_names):
self.vm_names.append(vm_names)
return self.fake_name
| [
"mlakat@mlakat.hu"
] | mlakat@mlakat.hu |
0cce2b2191fd7796e4fecb0a131df5fc58e86448 | 0a2cb84edea4df799ae3abd016a81ffe7bb22e45 | /caesar.py | ef65abeb3df94ba7f0aa691cbb78f50ee1698100 | [] | no_license | suhachakka/web-caesar | 4c7221e69bd1ccc4ab485a83c669f1006d699973 | ec6c80a5e59eb1bbe1782b98ec1b202e80b803b8 | refs/heads/master | 2020-03-29T01:28:17.545134 | 2018-09-22T04:34:09 | 2018-09-22T04:34:09 | 149,391,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,115 | py | def alphabet_position(character):
alphabet = 'abcdefghijklmnopqrstuvwxyz'
lower = character.lower()
return alphabet.index(lower)
def rotate_string_13(text):
rotated = ''
alphabet = 'abcdefghijklmnopqrstuvwxyz'
for char in text:
rotated_idx = (alphabet_position(char) + 13) % 26
if char.isupper():
rotated = rotated + alphabet[rotated_idx].upper()
else:
rotated = rotated + alphabet[rotated_idx]
return rotated
def rotate_character(char, rot):
alphabet = 'abcdefghijklmnopqrstuvwxyz'
rotated_idx = ((alphabet_position(char) + rot) % 26)
if char.isupper():
return alphabet[rotated_idx].upper()
else:
return alphabet[rotated_idx]
def rotate_string(text, rot):
rotated = ''
for char in text:
if (char.isalpha()):
rotated = (rotated + rotate_character(char, rot))
else:
rotated = rotated + char
return rotated
#text =input("enter text:")
#rot = int(input("enter rotation value:"))
#print(rotate_string(text,rot)) | [
"suhasinichakka@gmail.com"
] | suhasinichakka@gmail.com |
38a80c15f2ce13d2c78e5913a3b1aadf4fc2e70a | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/benchmarks/sieve-457.py | 99bc8c55e3be4471951f624dbaca0a87b6c3a62a | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,586 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if ($ID.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
# Data
v:Vector = None
i:int = 0
# Crunch
v = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
9ffcf463440da067d9865ec658dd284d73cce73f | fac69bc589361d0f1a8470a0f6070d15b9577b30 | /hostmonitor/management/commands/pinghosts.py | 5bbde81eda5559f042745776df2983086b8df138 | [
"MIT"
] | permissive | pombredanne/vahti | ec2d4595b1e1349cb2cef69ac47ec260f590f4e9 | f6e130d08b959e96cae537b85871e3c3795cb4a0 | refs/heads/master | 2020-12-31T03:56:34.885272 | 2012-05-28T17:03:51 | 2012-05-28T17:03:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,462 | py | from datetime import datetime
from subprocess import Popen, PIPE
from time import sleep
from optparse import make_option
import socket
from django.core.management.base import BaseCommand, CommandError
from hostmonitor.models import Host
FPING = 'fping'
def ping_hosts(hosts):
args = [FPING] + hosts
p = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
(fping_out, fping_err) = p.communicate()
out_lines = fping_out.split("\n")
err_lines = fping_err.split("\n")
up = [x.split(" ",1)[0] for x in out_lines if ' is alive' in x]
down = [x.split(" ",1)[0] for x in out_lines if ' is unreachable' in x]
#down += [x.split(" ",1)[0] for x in err_lines]
return (set(up), set(down))
def dns_lookup(hosts):
if hasattr(socket, 'setdefaulttimeout'):
# Set the default timeout on sockets to 5 seconds
socket.setdefaulttimeout(5)
names = []
for ip in hosts:
try:
names.append(socket.gethostbyaddr(ip)[0])
except socket.herror:
print ip
names.append(None)
return dict(zip(hosts, names))
class Command(BaseCommand):
args = ''
help = 'Add the specified hosts or CIDR networks (not network/broadcast)'
option_list = BaseCommand.option_list + (
make_option('--loop', action="store_true", dest="loop"),
)
def handle(self, *args, **options):
if len(args) > 0:
self.stderr.write("This command does not take arguments\n")
return
while True:
self.ping_once()
if not options['loop']:
break
sleep(3600)
def ping_once(self):
hosts = []
for host in Host.objects.filter(monitor=True):
hosts.append(host.ip)
self.stdout.write("Pinging all monitored hosts\n")
(up, down) = ping_hosts(hosts)
self.stdout.write("Resolving DNS names\n")
names = dns_lookup(hosts)
for ip in up:
self.stdout.write("%s up\n" % ip)
h = Host.objects.get(ip=ip)
h.name = names[ip]
h.up = True
h.last_up = datetime.now()
if not h.up_since:
h.up_since = datetime.now()
h.save()
for ip in down:
self.stdout.write("%s down\n" % ip)
h = Host.objects.get(ip=ip)
h.name = names[ip]
h.up = False
h.up_since = None
h.save()
| [
"joneskoo@kapsi.fi"
] | joneskoo@kapsi.fi |
9e3333b2f64dc19a208bd5f1e9d3f314db688059 | 22744511e8a5683fb3c1f7ae32ceff72ae6052af | /DepthSynthesis/models/sync_batchnorm/batchnorm_reimpl.py | d0b6b6c085239ca83e9047d8d4033a5de771da5e | [
"MIT",
"BSD-2-Clause"
] | permissive | YanchaoYang/DCL-DepthSynthesis | a655db76eb4c70305ce566f37d6279aec1c4309e | daca03cad66836aae65ac4d73f25b02a06c98595 | refs/heads/main | 2023-06-26T00:52:34.981130 | 2021-07-26T08:16:26 | 2021-07-26T08:16:26 | 451,676,814 | 1 | 0 | NOASSERTION | 2022-01-25T00:07:46 | 2022-01-25T00:07:45 | null | UTF-8 | Python | false | false | 2,382 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : batchnorm_reimpl.py
# Author : acgtyrant
# Date : 11/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import torch
import torch.nn as nn
import torch.nn.init as init
__all__ = ['BatchNorm2dReimpl']
class BatchNorm2dReimpl(nn.Module):
"""
A re-implementation of batch normalization, used for testing the numerical
stability.
Author: acgtyrant
See also:
https://github.com/vacancy/Synchronized-BatchNorm-PyTorch/issues/14
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1):
super().__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.weight = nn.Parameter(torch.empty(num_features))
self.bias = nn.Parameter(torch.empty(num_features))
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
def reset_running_stats(self):
self.running_mean.zero_()
self.running_var.fill_(1)
def reset_parameters(self):
self.reset_running_stats()
init.uniform_(self.weight)
init.zeros_(self.bias)
def forward(self, input_):
batchsize, channels, height, width = input_.size()
numel = batchsize * height * width
input_ = input_.permute(1, 0, 2, 3).contiguous().view(channels, numel)
sum_ = input_.sum(1)
sum_of_square = input_.pow(2).sum(1)
mean = sum_ / numel
sumvar = sum_of_square - sum_ * mean
self.running_mean = (
(1 - self.momentum) * self.running_mean
+ self.momentum * mean.detach()
)
unbias_var = sumvar / (numel - 1)
self.running_var = (
(1 - self.momentum) * self.running_var
+ self.momentum * unbias_var.detach()
)
bias_var = sumvar / numel
inv_std = 1 / (bias_var + self.eps).pow(0.5)
output = (
(input_ - mean.unsqueeze(1)) * inv_std.unsqueeze(1) *
self.weight.unsqueeze(1) + self.bias.unsqueeze(1))
return output.view(channels, batchsize, height, width).permute(1, 0, 2, 3).contiguous() | [
"jhonve@zju.edu.cn"
] | jhonve@zju.edu.cn |
1e75dd937e7a1e842472c37a23b9269408e82317 | c0340c511cff5b40b4681c4d3238d807624c0323 | /results/correlations/plot_byLanguage/plotByLanguage_Combined.py | 9578733a8e10cb798dc13acd08a5f49b86cbdb51 | [] | no_license | m-hahn/grammar-optim | 5fa7ade47d2ad91f517c887ee2c65af24059069d | 07a1a80692a504bcafc8120a21c4dc9066b495ee | refs/heads/master | 2022-08-30T06:54:42.749264 | 2022-08-05T12:09:28 | 2022-08-05T12:09:28 | 156,456,167 | 13 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,378 | py | source("./readGrammarsPerLanguage_Combined.py")
D$LanguageNumeric = as.numeric(D$Language_Ordered)
D$FamilyPrint = as.character(D$Family)
D = D %>% mutate(FamilyPrint = ifelse(FamilyPrint == "Malayo-Sumbawan", "Mal.-Sum.", as.character(FamilyPrint)))
D = D %>% mutate(FamilyPrint = ifelse(FamilyPrint == "Sino-Tibetan", "Sin.-Tib.", as.character(FamilyPrint)))
D = D %>% mutate(FamilyPrint = ifelse(FamilyPrint == "Viet-Muong", "Viet-M.", as.character(FamilyPrint)))
DFam = D %>% group_by(FamilyPrint) %>% summarise(Start = min(LanguageNumeric), End = max(LanguageNumeric), Mean = mean(LanguageNumeric))
DFam$yOffset = 0.2*(1:(nrow(DFam)))
D$yOffset=NULL
D = merge(D, DFam %>% select(FamilyPrint, yOffset), by=c("FamilyPrint"))
DLang = unique(D %>% select(Language_Ordered, iso_Ordered, LanguageNumeric, yOffset))
D = D %>% mutate(CoarseDependency = recode(CoarseDependency, lifted_case=1, lifted_cop=2, aux=3, nmod=4, acl=5, lifted_mark=6, obl=7, xcomp=8))
plot_orders_real = ggplot(D %>% filter(Type == "Real Languages"), aes(x = 1, y = LanguageNumeric+yOffset, group=CoarseDependency)) +
geom_point(aes(fill=DirB, colour = DirB, size =1), position = position_dodge(width=2.0)) +
# scale_color_gradient() + #values=c("blue", "green")) +
theme_classic() +
#theme_bw() +
theme(axis.text.x=element_blank(), #element_text(size=9, angle=0, vjust=0.3),
axis.text.y=element_blank(),axis.ticks=element_blank(),
plot.title=element_text(size=11)) +
theme(axis.title=element_blank()) +
theme(legend.position="none") + labs(x=NULL) +
scale_x_continuous(breaks = NULL) +
scale_y_continuous(breaks = NULL)
plot_orders_eff = ggplot(D %>% filter(Type == "Efficiency"), aes(x = 1, y = LanguageNumeric+yOffset, group=CoarseDependency)) +
geom_point(aes(fill=DirB, colour = DirB, size =1), position = position_dodge(width=2.0)) +
# scale_color_gradient() + #values=c("blue", "green")) +
theme_classic() +
theme(axis.text.x=element_blank(), #element_text(size=9, angle=0, vjust=0.3),
axis.text.y=element_blank(),axis.ticks=element_blank(),
plot.title=element_text(size=11)) +
theme(axis.title=element_blank()) +
theme(legend.position="none") + labs(x=NULL) +
scale_x_continuous(breaks = NULL) +
scale_y_continuous(breaks = NULL)
plot_orders_surp = ggplot(D %>% filter(Type == "Predictability"), aes(x = 1, y = LanguageNumeric+yOffset, group=CoarseDependency)) +
geom_point(aes(fill=DirB, colour = DirB, size =1), position = position_dodge(width=2.0)) +
# scale_color_gradient() + #values=c("blue", "green")) +
theme_classic() +
theme(axis.text.x=element_blank(), #element_text(size=9, angle=0, vjust=0.3),
axis.text.y=element_blank(),axis.ticks=element_blank(),
plot.title=element_text(size=11)) +
theme(axis.title=element_blank()) +
theme(legend.position="none") + labs(x=NULL) +
scale_x_continuous(breaks = NULL) +
scale_y_continuous(breaks = NULL)
plot_orders_pars = ggplot(D %>% filter(Type == "Parseability"), aes(x = 1, y = LanguageNumeric+yOffset, group=CoarseDependency)) +
geom_point(aes(fill=DirB, colour = DirB, size =1), position = position_dodge(width=2.0)) +
# scale_color_gradient() + #values=c("blue", "green")) +
theme_classic() +
theme(axis.text.x=element_blank(), #element_text(size=9, angle=0, vjust=0.3),
axis.text.y=element_blank(),axis.ticks=element_blank(),
plot.title=element_text(size=11)) +
theme(axis.title=element_blank()) +
theme(legend.position="none") + labs(x=NULL) +
scale_x_continuous(breaks = NULL) +
scale_y_continuous(breaks = NULL)
plot_langs = ggplot(DLang)
plot_langs = plot_langs + theme_classic()
plot_langs = plot_langs + theme(axis.text.x=element_blank(), #element_text(size=9, angle=0, vjust=0.3),
axis.text.y=element_blank(),
plot.title=element_text(size=11))
plot_langs = plot_langs + geom_text(aes(x=1.2 + 0.07, y=LanguageNumeric+yOffset, label=iso_Ordered), hjust=1, size=3, colour="grey30")
plot_langs = plot_langs + theme(axis.title=element_blank())
plot_langs = plot_langs + xlim(-2.0, 1.35)
plot_langs = plot_langs + geom_segment(data=DFam, aes(x=0, y=Start+yOffset, xend=0.5, yend=Start+yOffset))
plot_langs = plot_langs + geom_segment(data=DFam, aes(x=0, y=End+yOffset, xend=0.5, yend=End+yOffset))
plot_langs = plot_langs + geom_segment(data=DFam, aes(x=0, y=Start+yOffset, xend=0, yend=End+yOffset))
plot_langs = plot_langs + geom_text(data=DFam, aes(x=-0.1, y=Mean+yOffset , label=FamilyPrint), hjust=1, size=3, colour="grey30")
plot_langs = plot_langs + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_blank(),
plot.margin=unit(c(0,0,0,0), "mm"),
axis.ticks = element_blank()) + labs(x=NULL)
library("gridExtra")
plot_orders_real = plot_orders_real + theme( plot.margin=unit(c(0,0,0,0), "mm"))
plot_orders_eff = plot_orders_eff + theme( plot.margin=unit(c(0,0,0,0), "mm"))
plot_orders_surp = plot_orders_surp + theme( plot.margin=unit(c(0,0,0,0), "mm"))
plot_orders_pars = plot_orders_pars + theme( plot.margin=unit(c(0,0,0,0), "mm"))
plot = grid.arrange(plot_langs, plot_orders_real, plot_orders_eff, plot_orders_surp, plot_orders_pars, nrow=1, widths=c(1, 1.2, 1.2, 1.2, 1.2))
ggsave(plot=plot, "../figures/pred-eff-pred-pars-families.pdf", width=6, height=8)
plot_langs2 = plot_langs + annotate("text", label="", x=1, y=58.5, size=6)
plot_orders_real2 = plot_orders_real + annotate("text", label="Real", x=1, y=58.5, size=6)
plot_orders_real2 = plot_orders_real2 + geom_point(data=data.frame(num=c(1,2,3,4,5,6,7,8)), aes(x=0.25 * num - 0.12, group=NA, y=56.7, colour=NA, fill=NA), color="black", fill=NA, size=4.5, shape=21)
plot_orders_real2 = plot_orders_real2 + geom_text(data=data.frame(CoarseDependency=unique(D$CoarseDependency), num=c(1,2,3,4,5,6,7,8)), aes(x=0.25 * num - 0.12, group=CoarseDependency, y=56.55, label=as.character(num)))
plot_orders_real2
plot_orders_eff2 = plot_orders_eff + annotate("text", label="Efficiency", x=1, y=58.5, size=5)
plot_orders_eff2 = plot_orders_eff2 + geom_point(data=data.frame(num=c(1,2,3,4,5,6,7,8)), aes(x=0.25 * num - 0.12, group=NA, y=56.7, colour=NA, fill=NA), color="black", fill=NA, size=4.5, shape=21)
plot_orders_eff2 = plot_orders_eff2 + geom_text(data=data.frame(CoarseDependency=unique(D$CoarseDependency), num=c(1,2,3,4,5,6,7,8)), aes(x=0.25 * num - 0.12, group=CoarseDependency, y=56.55, label=as.character(num)))
plot_orders_eff2
plot_orders_surp2 = plot_orders_surp + annotate("text", label="Predictability", x=1, y=58.5, size=5)
plot_orders_surp2 = plot_orders_surp2 + geom_point(data=data.frame(num=c(1,2,3,4,5,6,7,8)), aes(x=0.25 * num - 0.12, group=NA, y=56.7, colour=NA, fill=NA), color="black", fill=NA, size=4.5, shape=21)
plot_orders_surp2 = plot_orders_surp2 + geom_text(data=data.frame(CoarseDependency=unique(D$CoarseDependency), num=c(1,2,3,4,5,6,7,8)), aes(x=0.25 * num - 0.12, group=CoarseDependency, y=56.55, label=as.character(num)))
plot_orders_surp2
plot_orders_pars2 = plot_orders_pars + annotate("text", label="Parseability", x=1, y=58.5, size=5)
plot_orders_pars2 = plot_orders_pars2 + geom_point(data=data.frame(num=c(1,2,3,4,5,6,7,8)), aes(x=0.25 * num - 0.12, group=NA, y=56.7, colour=NA, fill=NA), color="black", fill=NA, size=4.5, shape=21)
plot_orders_pars2 = plot_orders_pars2 + geom_text(data=data.frame(CoarseDependency=unique(D$CoarseDependency), num=c(1,2,3,4,5,6,7,8)), aes(x=0.25 * num - 0.12, group=CoarseDependency, y=56.55, label=as.character(num)))
plot_orders_pars2
plot = grid.arrange(plot_langs2, plot_orders_real2, plot_orders_eff2, plot_orders_surp2, plot_orders_pars2, nrow=1, widths=c(1, 1.2, 1.2, 1.2, 1.2))
plot
ggsave(plot=plot, "../figures/pred-eff-pred-pars-families-2.pdf", width=6, height=8)
D2 = (D %>% select(Family, Language, CoarseDependency, DirB, Type) %>% spread(Type, DirB) %>% rename(Real = 'Real Languages') %>% rename(Predicted = Efficiency))
D2$Agree = (D2$Real == D2$Predicted)
#summary(glmer(Agree ~ (1|CoarseDependency) + (1|Family), data=D2, family="binomial"))
mean(D2$Agree)
| [
"mhahn29@gmail.com"
] | mhahn29@gmail.com |
a1ca652bbcbc6fe3ceebec0c3c56a8205ba2449f | 3597ecf8a014dbd6f7d998ab59919a94aff8011d | /front-web/src/www/application/modules/treatment/block/actions.py | 18cc3477c9e71c71e2a949ed2b6fbd5799dbce77 | [] | no_license | duytran92-cse/nas-genomebrowser | f42b8ccbb7c5245bde4e52a0feed393f4b5f6bf1 | d0240ad5edc9cfa8e7f89db52090d7d733d2bb8a | refs/heads/master | 2022-10-24T05:26:01.760241 | 2020-06-14T19:01:35 | 2020-06-14T19:01:35 | 272,264,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,214 | py | from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.conf import settings
from notasquare.urad_web import actions, page_contexts, widgets
from notasquare.urad_web_material import renderers
from application.modules.common import page_contexts, actions as common_actions, components as common_components
from application.themes.genopedia import renderers as genopedia_renderers
from application.themes.genopedia import widgets as genopedia_widgets
from application import constants
from . import components
class Update(actions.crud.UpdateAction, common_actions.BaseAction):
def create_form(self):
treatment_block = components.TreatmentBlockStore(self.get_container()).get(self.params['block_id'])
kind = treatment_block['data']['record']['kind']
form = widgets.form.Form()
form.renderer = renderers.widgets.form.HorizontalFormRenderer()
if kind == 'general_text':
form.add_field(widgets.field.Textbox('title'))
form.add_field(widgets.field.Textarea('text'))
form.renderer.add_section('General - Text')
form.renderer.add_field('title', 'Title')
form.renderer.add_field('text', 'Text', rows=15)
if kind == 'general_publications':
form.add_field(widgets.field.List('publications', {
'pmid': widgets.field.Textbox('pmid'),
'doi': widgets.field.Textbox('doi'),
'pmc': widgets.field.Textbox('pmc'),
'title': widgets.field.Textarea('title'),
'authors': widgets.field.Textarea('authors'),
'journal': widgets.field.Textarea('journal')
}))
form.renderer.add_section('General - Publications')
form.renderer.add_field('publications', 'Publications', columns=[
{'id': 'pmid', 'label': 'PMID', 'width': '10%'},
{'id': 'doi', 'label': 'DOI', 'width': '10%'},
{'id': 'pmc', 'label': 'PMC', 'width': '10%'},
{'id': 'title', 'label': 'Title', 'width': '30%'},
{'id': 'authors', 'label': 'Authors', 'width': '15%'},
{'id': 'journal', 'label': 'Journal', 'width': '15%'},
])
if kind == 'general_alias':
# Show effect & risk
form.add_field(widgets.field.List('alias', {
'id': widgets.field.Textbox('id'),
'alias': widgets.field.Textbox('alias')
}))
form.renderer.add_section('Variation - Alias')
form.renderer.add_field('alias', 'Alias', columns=[
{'id': 'alias', 'label': 'Alias', 'width': '50%'}
])
form.renderer.set_field_renderer('textbox', renderers.widgets.field.TextboxRenderer())
form.renderer.set_field_renderer('textarea', renderers.widgets.field.TextareaRenderer())
form.renderer.set_field_renderer('combobox', renderers.widgets.field.ComboboxRenderer())
form.renderer.set_field_renderer('list', renderers.widgets.field.ListRenderer())
return form
def load_form(self, form):
result = components.TreatmentBlockStore(self.get_container()).get(self.params['block_id'])
if result['status'] == 'ok':
record = result['data']['record']
form.set_things({
'page': 'treatment',
'page_title': record['treatment_title']
})
form.set_form_data(record)
else:
form.add_message('danger', "Can't load form")
def process_form_data(self, data):
# print "POST-Params-Update:", self.params
data['new_version'] = True
res = components.TreatmentBlockStore(self.get_container()).update(data, self.params['block_id'])
rs = components.TreatmentBlockStore(self.get_container()).helper(res['data']['pk'])
self.params['page_title'] = rs['data']['record']['title']
return res
def handle_on_success(self, messages):
return HttpResponseRedirect('/treatment/%s' % (self.params["page_title"]))
| [
"thanh.tran@etudiant.univ-lr.fr"
] | thanh.tran@etudiant.univ-lr.fr |
ad4ea41c0d680a2d8809ce479448e93a9cd7d4a6 | 69e035ffa4e668e34ee4bb4f390d12a05ab8de51 | /stats.py | f16a23ab096a555fabfbf66fa2f47f87636bc12c | [] | no_license | bhavyakarki/useful-scripts | e2f5e7fa7ea2e8bd70b69ed3cbc81b77b1e92b21 | e61557283ed903bdfee8b79b85190ff1fe4d8b2a | refs/heads/master | 2021-04-27T10:00:33.756630 | 2018-04-25T19:59:40 | 2018-04-25T19:59:40 | 122,526,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | import os
import pandas as pd
file_path = './segmented_file/WL1_combined.csv' # change this
df = pd.read_table(file_path, sep = ',')
(log_num, feature_numbers) = df.shape
dbnode = df['dbNode'].unique()
subdf = {}
for node in dbnode:
subdf[node] = df[df['dbNode'] == node]
stats = {}
for d in subdf:
stats[d] = subdf[d].describe()
| [
"noreply@github.com"
] | noreply@github.com |
69e0a9dba8e04c37c15b79176b50c4f4a85b68b6 | 114c853cc80877b542bb6d4cc3eabf09452f02a2 | /blockchain.py | 09b20846aa25ccca9e99c357a687337e099100f6 | [] | no_license | ashishbaghudana/blockchain | e5b3d8e6c7a5ddefabcff066c15c6b20ceb7d039 | 0e8dcd2b403f2024a48ad882e30fd4556fbd91d2 | refs/heads/master | 2022-12-10T17:54:01.079638 | 2018-11-15T17:34:33 | 2018-11-15T17:34:33 | 155,915,415 | 0 | 1 | null | 2022-12-08T01:17:54 | 2018-11-02T19:58:23 | Python | UTF-8 | Python | false | false | 4,844 | py | import hashlib
import json
from urllib.parse import urlparse
import requests
from block import Block
from transactions import Transaction
class Blockchain(object):
def __init__(self):
self.chain = []
self.current_transactions = []
self.last_consensus = 0
self.nodes = set()
self.new_block(proof=100, previous_hash=1)
def new_block(self, proof, previous_hash=None):
"""Create a new block in the blockchain.
Parameters
----------
proof : int
The proof given by the Proof of Work algorithm.
previous_hash : str
Hash of previous block.
Returns
-------
dict
New block.
"""
block = Block(
index=len(self.chain) + 1,
transactions=self.current_transactions,
proof=proof,
previous_hash=(previous_hash or self.hash(self.chain[-1])),
transaction_hashes=set(
[Transaction.hash(t) for t in self.current_transactions]))
self.current_transactions = []
self.chain.append(block)
return block
@staticmethod
def hash(block):
"""Creates a SHA-256 hash of a block.
Parameters
----------
block : dict
Block.
Returns
-------
str
SHA-256 hash of the block.
"""
block_string = json.dumps(
block.to_dictionary(), sort_keys=True).encode()
return hashlib.sha256(block_string).hexdigest()
@property
def last_block(self):
return self.chain[-1]
def proof_of_work(self, last_proof):
"""Simple Proof of Work Algorithm.
- Find a number p' such that hash(pp') contains 4 leading zeros
where p is the previous p'
- p is the previous proof, and p' is the new proof
Parameters
----------
last_proof : int
The last proof value.
Returns
-------
int
The new proof value.
"""
proof = 0
previous_hash = self.hash(self.chain[-1])
while self.valid_proof(last_proof, proof, previous_hash) is False:
proof += 1
return proof
@staticmethod
def valid_proof(last_proof, proof, hash):
guess = f'{last_proof}{proof}{hash}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:4] == "0000"
def register_node(self, address):
parsed_url = urlparse(address)
node = f'{parsed_url.scheme}://{parsed_url.netloc}'
self.nodes.add(node)
def register_voter_node(self, address):
parsed_url = urlparse(address)
node = f'{parsed_url.scheme}://{parsed_url.netloc}'
self.voter_nodes.add(node)
def valid_chain(self, chain):
last_block = chain[self.last_consensus]
current_index = self.last_consensus + 1
while current_index < len(chain):
block = chain[current_index]
# Check that the hash of the block is correct
if block.previous_hash != self.hash(last_block):
return False
# Check that the Proof of Work is correct
if not self.valid_proof(last_block.proof, block.proof,
block.previous_hash):
return False
last_block = block
current_index += 1
return True
def resolve_conflicts(self):
neighbours = self.nodes
new_chain = None
# We're only looking for chains longer than ours
max_length = len(self.chain)
# Grab and verify the chains from all the nodes in our network
for node in neighbours:
response = requests.get(f'{node}/chain')
if response.status_code == 200:
length = response.json()['length']
chain = Blockchain.deserialize_chain(response.json()['chain'])
# Check if the length is longer and the chain is valid
if length > max_length and self.valid_chain(chain):
max_length = length
new_chain = chain
# Replace our chain if we discovered a new, valid chain longer
# than ours
if new_chain:
self.chain = new_chain
self.consensus = new_chain[-1].index
return True
return False
@staticmethod
def deserialize_chain(chain_obj):
chain = []
for block_obj in chain_obj:
block = Block.from_json(block_obj)
chain.append(block)
return chain
def serialize_chain(self):
chain_obj = []
for block in self.chain:
chain_obj.append(block.to_dictionary())
return chain_obj
| [
"ashish.baghudana26@gmail.com"
] | ashish.baghudana26@gmail.com |
2acbb3b79b0a4861189cb1c43f2d7fd5049f0132 | fc2447b91cbee82e74e939092ec1903678f3217a | /PythonPractice/hm_py/hm_oop/oop_single.py | 4b09ef3d411106af86bc146dc8c60d1ee2a315ee | [] | no_license | yglj/learngit | 0eac654e7c49f2ede064b720e6ee621a702193b4 | 74fb4b93d5726c735b64829cafc99878d8082121 | refs/heads/master | 2022-12-24T10:01:56.705046 | 2019-05-27T21:04:08 | 2019-05-27T21:04:08 | 146,157,116 | 0 | 1 | null | 2022-12-12T07:01:25 | 2018-08-26T06:28:20 | HTML | UTF-8 | Python | false | false | 924 | py | # 单例设计模式
# 类只有创建唯一个对象实例
# 应用场景: 打印机,回收站,音乐播放对象
# __new__() object提供的内置静态方法,作用:为对象分配空间,返回对象引用
class MusicPlayer:
__init_flag = False
instance = None
def __new__(cls, *args):
if cls.instance is None: # 利用__new__只分配一次对象空间,来实现单例
print('创建对象时,自动分配空间')
cls.instance = super().__new__(cls)
# print(instance)
return cls.instance # 返回对象引用
return cls.instance
def __init__(self): # 让初始化动作只执行一次:利用标志位控制
if MusicPlayer.__init_flag:
return
print('初始化对象,分配实例对象属性')
MusicPlayer.__init_flag = True
m = MusicPlayer()
print('-' * 30)
m2 = MusicPlayer()
| [
"2365952530@qq.com"
] | 2365952530@qq.com |
e0f35f41455126b5d8fad7e0a5ea444fa821a098 | ae73157165bce648fce14bb77df19c20f3cd1027 | /setup.py | f25ea985f92182701b2134e62e7bc69eca2cfc7a | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | honzamach/pyzenkit | 870ab1ff7b2dca8b13af57cc44bc3ac17a2d8ce9 | ef344bee2f4df6c08595b98b7f830852cca63bed | refs/heads/master | 2022-01-26T02:23:07.924087 | 2022-01-20T08:51:16 | 2022-01-20T08:51:16 | 84,448,268 | 0 | 2 | NOASSERTION | 2021-01-10T19:36:05 | 2017-03-09T14:00:44 | Python | UTF-8 | Python | false | false | 2,180 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# This file is part of PyZenKit package.
#
# Copyright (C) since 2016 CESNET, z.s.p.o (http://www.ces.net/)
# Copyright (C) since 2015 Honza Mach <honza.mach.ml@gmail.com>
# Use of this package is governed by the MIT license, see LICENSE file.
#
# This project was initially written for personal use of the original author. Later
# it was developed much further and used for project of author`s employer.
#-------------------------------------------------------------------------------
# Resources:
# https://packaging.python.org/en/latest/
# https://python-packaging.readthedocs.io/en/latest/index.html
import sys
import os
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
#
# Import local version of pynspect library, so that we can insert correct version
# number into documentation.
#
sys.path.insert(0, os.path.abspath('.'))
import pyzenkit
here = os.path.abspath(os.path.dirname(__file__))
#-------------------------------------------------------------------------------
# Get the long description from the README file
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'pyzenkit',
version = pyzenkit.__version__,
description = 'Python 3 script and daemon toolkit',
long_description = long_description,
classifiers = [
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3 :: Only',
'Environment :: Console',
],
keywords = 'library console script daemon',
url = 'https://github.com/honzamach/pyzenkit',
author = 'Honza Mach',
author_email = 'honza.mach.ml@gmail.com',
license = 'MIT',
packages = find_packages(),
test_suite = 'nose.collector',
tests_require = [
'nose'
],
install_requires=[
'python-dotenv',
'jsonschema',
'pydgets'
],
include_package_data = True,
zip_safe = False
)
| [
"honza.mach.ml@gmail.com"
] | honza.mach.ml@gmail.com |
ea8bcdc0b183def68c8745950edbbf13533c588d | 65b708f0646ea090a4e9bc615cd37fd799bd9bce | /venv/Scripts/pip3-script.py | 307f938a7427296d42bf18912a97aeee71dc9f96 | [] | no_license | chrisna2/python-web-scrapping | af803079586c7b798365d23f5667a24d0c6633e8 | 92e74b4985006246f543de87ff26673b94e8c0a8 | refs/heads/master | 2020-07-08T14:40:32.959560 | 2019-08-23T03:19:47 | 2019-08-23T03:19:47 | 203,703,270 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | #!D:\tyn_dev\workspace_pycham\web-scrapping\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"chrisna2@hanmail.net"
] | chrisna2@hanmail.net |
2c4770fe31758b3032479cc4fd7393a4fa97b0d9 | 89012c1f0873f787e2cdb2306216508f759ed153 | /python/python-request/pyreq/lib/python3.7/codecs.py | 3e7810769ba9d240fcc55119421e3a7cbd4bf541 | [] | no_license | mazharul-miraz/summer-mid-night-019 | 4727aa7ec70fc22f8870b1ce63cdd19637c0f6aa | ba73ab575fd24a6307d5a0421b514600c2d1e93f | refs/heads/master | 2020-05-26T23:20:08.874374 | 2019-06-13T07:36:09 | 2019-06-13T07:36:09 | 188,410,243 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | /home/miraz/miniconda3/lib/python3.7/codecs.py | [
"mazharul.miraz@outlook.com"
] | mazharul.miraz@outlook.com |
33098e32a62e5b220295bdfe9ae5aa02f0f87789 | 39abd398b6198cfff0744e535eb1491a8bb15e01 | /geektext/shoppingcart/migrations/0001_initial.py | 8c824b532f342b6b36b68d4475da33168be6bab6 | [] | no_license | Dhern251/GeekText | 62f067aac41473b1ec9b3913ad4fbcd8dee5cbf6 | 3b4ea603e5407d8c61af22ac2adbae90ef5be286 | refs/heads/master | 2020-05-07T19:53:50.419075 | 2019-04-12T18:43:51 | 2019-04-12T18:43:51 | 180,834,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,585 | py | # Generated by Django 2.1.5 on 2019-04-05 19:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('userId', models.IntegerField()),
],
),
migrations.CreateModel(
name='CartItems',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField()),
('cartId', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shoppingcart.Cart')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('author', models.CharField(max_length=250)),
('price', models.DecimalField(decimal_places=2, max_digits=6)),
],
),
migrations.AddField(
model_name='cartitems',
name='productId',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shoppingcart.Product'),
),
]
| [
"noreply@github.com"
] | noreply@github.com |
589d99eaa85b21d2d2cd4f07fe756024239ee12f | e3b2c43265611599389ea0a97ab50a8b7d7d7657 | /firstproject/firstapp/urls.py | 92ba5fccb5923e724108c5c0e349ee5bac0283d0 | [] | no_license | HosneMubarak/prectice-rest-framework-version-1 | 979d1faf1d09c4e070ce8b638bc1261afb630c52 | 01291c85c947e77d90b61af8a83251e2002245f0 | refs/heads/master | 2023-07-09T22:55:24.883705 | 2021-08-24T19:55:39 | 2021-08-24T19:55:39 | 399,588,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | from django.urls import path
from firstapp import views
urlpatterns = [
path('employees', views.employeeview),
]
| [
"h.m.tasmir@gmail.com"
] | h.m.tasmir@gmail.com |
a86a0154482e16cf8c3571c48245f99aa3682c3a | 3929bd90b9c82108b064e8b63494f6959cc7c4e1 | /ke/images/python/replication_controller.py | 75ed95f1209707eb848207d9d93a500d12008d77 | [
"Apache-2.0"
] | permissive | justasabc/kubernetes-ubuntu | 32ab2b84dbedd9950ae1f992b1edce1437d86d41 | afc670297a5becb2fcb4404c3ee1e02c99b5eaf4 | refs/heads/master | 2016-09-11T10:57:51.936077 | 2015-05-31T14:42:33 | 2015-05-31T14:42:33 | 31,883,281 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,280 | py | """
Class Hierarchy
G{classtree: BaseController}
Package tree
G{packagetree: controller}
Import Graph
G{importgraph: controller}
"""
from cluster_tool import KubernetesTool
from controller_param import ControllerParam
class BaseController:
def __init__(self,controller_id,config_path):
self.controller_id = controller_id
""" @type: C{string} """
self.config_path = config_path
""" @type: C{string} """
self.controller_param = None
""" @type: L{ControllerParam} """
self.tool = KubernetesTool()
""" @type: L{KubernetesTool} """
def get_controller_id(self):
return self.controller_id
def get_config_path(self):
return self.config_path
def get_controller_param(self):
return self.controller_param
def get_tool(self):
return self.tool
def start(self):
self.tool.create_controller(self.config_path)
def stop(self):
self.tool.delete_controller(self.controller_id)
class ApacheController(BaseController):
def __init__(self):
print "[ApacheController] init ..."
self.create_controller_param()
print "[ApacheController] OK"
def create_controller_param(self):
controller_id = 'apache-controller'
config_path = 'json/apache-controller.json'
BaseController.__init__(self,controller_id,config_path)
def start(self):
print "[ApacheController] start..."
BaseController.start(self)
class MysqlController(BaseController):
def __init__(self):
print "[MysqlController] init ..."
self.create_controller_param()
print "[MysqlController] OK"
def create_controller_param(self):
controller_id = 'mysql-controller'
config_path = 'json/mysql-controller.json'
BaseController.__init__(self,controller_id,config_path)
def start(self):
print "[MysqlController] start..."
BaseController.start(self)
class RobustController(BaseController):
def __init__(self):
print "[RobustController] init ..."
self.create_controller_param()
print "[RobustController] OK"
def create_controller_param(self):
controller_id = 'robust-controller'
config_path = 'json/robust-controller.json'
BaseController.__init__(self,controller_id,config_path)
def start(self):
print "[RobustController] start..."
BaseController.start(self)
class ControllerTesting(ApacheController,MysqlController,RobustController):
pass
| [
"zunlin1234@gmail.com"
] | zunlin1234@gmail.com |
88d7f07934a63b614d966b992377ec9e7127692d | 08bad9043ac209a954b4f014d578001ea2cb2330 | /boards/tests/test_view_topic_posts.py | ac02a80bb210afec3c8c4ada1954eb2efce1f2c2 | [] | no_license | vathsanvk/mydjangoproject | dddb1456b0a0edb92f754781227fe25b06edeca4 | 35d96413384f7bdd5915dabf2ccf42a130b62051 | refs/heads/master | 2021-05-09T05:48:01.402210 | 2018-02-12T00:22:48 | 2018-02-12T00:22:48 | 119,319,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import resolve, reverse
from ..models import Board, Post, Topic
from ..views import topic_posts
class TopicPostsTests(TestCase):
def setUp(self):
board = Board.objects.create(name='Django', description='Django board.')
user = User.objects.create_user(username='john', email='john@doe.com', password='123')
topic = Topic.objects.create(subject='Hello, world', board=board, starter=user)
Post.objects.create(message='Lorem ipsum dolor sit amet', topic=topic, created_by=user)
url = reverse('topic_posts', kwargs={'pk': board.pk, 'topic_pk': topic.pk})
self.response = self.client.get(url)
def test_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_view_function(self):
view = resolve('/boards/1/topics/1/')
self.assertEquals(view.func.view_class, PostListView
| [
"vathsan.vk@gmail.com"
] | vathsan.vk@gmail.com |
398f050c5dd184fd74799218637e14792cb91eac | c166ede88bb38447b661bc3f30bcd4a1a68c0637 | /post/models.py | ae8b72c77afcdc1489137504533aadf88c344af7 | [] | no_license | Sherlock527/blog- | f432604af3d68ac43c8f30d9d0f35336f53564fc | f6c9872fd6c409460f7cf89ef3bfedde31d2377d | refs/heads/master | 2020-05-04T00:33:14.442394 | 2020-02-12T05:00:20 | 2020-02-12T05:00:20 | 178,886,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | from django.db import models
# Create your models here.
from user.models import User
class Post(models.Model):
class Meta:
db_table='post'
id=models.AutoField(primary_key=True)
title=models.CharField(max_length=256,null=False)
postdate=models.DateTimeField(null=False)
# 指定外键,migrate会生成author_id字段
author=models.ForeignKey(User)
# self.content可以访问Content实例,其内容是self.content.content
def __repr__(self):
return '<Post id={} title={} author_id={} content={}>'.format(self.id,self.title,self.author_id, self.content)
__str__=__repr__
class Content(models.Model):
class Meta:
db_table='content'
# 没有主键,会自动创建一个自增主键
# 一对一,这边会有一个外键post_id引用自post.id
post=models.OneToOneField(Post)
content=models.TextField(null=False)
def __repr__(self):
return '<Content id={} content={} >'.format(self.post.id,self.content[:20])
__str__ = __repr__
| [
"1971474563@qq.com"
] | 1971474563@qq.com |
557dd823f5842595cf89e5913043fe2de6b19efb | 96d56ea8559a82b7709f108751d234611abfd346 | /Guess_number.py | f02d75052846b07e4578221891421a9dff7d33c7 | [] | no_license | vishwasbeede/Python3-Programs-examples | 10b9800232a1dcdebb119fc69be918ab0b1298a5 | 6c9f54cf5dda8bae8abdf981737ddfa6705ab298 | refs/heads/master | 2023-05-14T12:30:17.716385 | 2023-05-08T06:36:33 | 2023-05-08T06:36:33 | 221,274,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | #Used random module to select a number from your input and this will match to yours input
import random
list_int = []
char_sep='-'
for i in range(0,6):
list_int.append(i)
print ("{:<30} {} ".format(" ",(char_sep*100)))
print ("\n\n{:<30} Enter charecters for input to stop execution\n\n".format(" "))
print ("{:<30} Enter numbers in range of 1 to 5 \n\n".format(" "))
print ("{:<30} {} \n\n".format(" ",(char_sep*100)))
while True:
# user_input = input("Would you like to play? (Y/n):")
user_number = random.choice(list_int)
# if user_input == "n":
# break
try:
user_input_guess=int(input("Enter the number you guess: "))
except:
print ("{:<30}Enter only numbers as input charecters".format(""))
print ("{:<30}Stopping execution".format(""))
exit(1)
if user_number == user_input_guess:
print ("{:<30}Congratulations guessed number correctly!!!!".format(" "))
else:
print ("{:<30}Sorry!!, you need to guess {}".format(" ",user_number))
| [
"noreply@github.com"
] | noreply@github.com |
e6cc8b2f9f4f193759e2a16a4b7d84f28a162423 | d87483a2c0b50ed97c1515d49d62c6e9feaddbe0 | /.history/get_positions_20210205021452.py | db0322e005440ad0d993a22856f8587be75cdf25 | [
"MIT"
] | permissive | HopperKremer/hoptrader | 0d36b6e33922414003cf689fb81f924da076a54b | 406793c10bc888648290fd15c7c2af62cf8c6c67 | refs/heads/main | 2023-06-12T15:51:00.910310 | 2021-07-06T16:15:41 | 2021-07-06T16:15:41 | 334,754,936 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,840 | py | # Buy top tickers from Financhill
import requests
from tda import auth, client
from tda.orders.equities import equity_buy_market, equity_buy_limit
from tda.orders.common import Duration, Session
import os, sys
import time
from selenium import webdriver
import json
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
import config # stored in parent directory for security
token_path = "token"
c = auth.client_from_token_file(token_path, config.api_key)
# positions = c.get_account(config.tda_acct_num, c.Account.Fields.POSITIONS)
# account_info = c.get_account(config.tda_acct_num, fields=[c.Account.Fields.POSITIONS]).json()
# print(account_info)
# positions = c.Account.Fields.POSITIONS
# r = c.get_account(config.tda_acct_num, fields=positions)
# stocks = r.json()['securitiesAccount']['positions']
# # stocks = json.dumps(r.json(), indent=4)
# for stock in stocks:
# print('--------------------------------')
# print(stock['instrument']['symbol'])
# orders = c.Order.Status.FILLED
# r = c.get_orders_by_path(config.tda_acct_num, status = client.Client.Order.Status.WORKING)
# res = c.get_orders_by_path(config.tda_acct_num, status = orders)
# res = s = c.get_account(config.tda_acct_num, fields=c.Account.Fields.POSITIONS)
# data = r.json()
# print(r.json())
orders = client.Client.Account.Fields.ORDERS
r = c.get_account(config.tda_acct_num, fields=orders)
print(json.dumps(r.json(), indent=4))#queued orders would appear here, if not blank list
l = r.json()['securitiesAccount']['orderStrategies']
canceled_orders = [i['orderId'] for i in l if i['status'] == 'CANCELED']
print('canceled', canceled_orders)
id
for order_id in canceled_orders:
g = c.get_order(order_id, config.tda_acct_num)
print(json.dumps(g.json(), indent=4)) | [
"hopperkremer@gmail.com"
] | hopperkremer@gmail.com |
a10fce460d77a793e1bf0d8880ecf7cde15f4dbf | 222fdb05a31160b3039a8c3d192bedf5fd4fb122 | /Exercise_07:Chapter3 problem 3.12 实验报告.py | 8a80bcf5e99c4206081d9fb8113156aca1e2efcc | [] | no_license | XiaoxiaTao/compuational_physics_N2015301020157 | 0e0431ca981aef7261a8e8e858499a8867b7f68f | d30e9d5925e873f792d6adf7d38fb6dbe939efed | refs/heads/master | 2021-09-02T09:32:20.832192 | 2018-01-01T13:15:21 | 2018-01-01T13:15:21 | 103,544,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,010 | py | from pylab import *
import numpy as np
l=9.8
g=9.8
dt=0.04
q=0.5
F=1.2
D=2.0/3.0
class pendulum:
def __init__(self,w,x,t):
self.w=[w]
self.x=[x]
self.t=[t]
def update(self):
global g,dt,l
current_w=self.w[-1]
current_x=self.x[-1]
current_t=self.t[-1]
self.next_w=current_w-(g/l*np.sin(current_x)+q*current_w-F*sin(D*current_t))*dt
self.next_x=current_x+self.next_w*dt
self.next_t=current_t+dt
def fire(self):
while (self.t[-1]<=5000):
self.update()
if self.next_x>np.pi:self.next_x+=-2*np.pi
else:
if self.next_x<-np.pi:self.next_x+=2*np.pi
else:self.next_x=self.next_x
self.w.append(self.next_w)
self.x.append(self.next_x)
self.t.append(self.next_t)
plot(self.x,self.w,',')
a=pendulum(0,3,0)
a.fire()
show()
from pylab import *
import numpy as np
l=9.8
g=9.8
dt=0.04
q=0.5
F=1.2
D=2.0/3.0
class pendulum:
def __init__(self,w,x,t):
self.w=[w]
self.x=[x]
self.t=[t]
self.chosen_w=[]
self.chosen_x=[]
self.chosen_t=[]
def update(self):
global g,dt,l
current_w=self.w[-1]
current_x=self.x[-1]
current_t=self.t[-1]
self.next_w=current_w-(g/l*np.sin(current_x)+q*current_w-F*sin(D*current_t))*dt
self.next_x=current_x+self.next_w*dt
self.next_t=current_t+dt
def fire(self):
while (self.t[-1]<=50000):
self.update()
if self.next_x>np.pi:self.next_x+=-2*np.pi
else:
if self.next_x<-np.pi:self.next_x+=2*np.pi
else:self.next_x=self.next_x
self.w.append(self.next_w)
self.x.append(self.next_x)
self.t.append(self.next_t)
test=((self.t[-1]*D)%np.pi)/np.pi
test2=self.t[-1]-int(self.t[-1]/np.pi)*np.pi
#print test
if (test<=0.01):
if (test2<=1):
self.chosen_x.append(self.next_x)
self.chosen_w.append(self.next_w)
self.chosen_t.append(self.next_t)
else:
pass
else:
pass
plot(self.chosen_x,self.chosen_w,',')
#plot(self.x,self.w,',',label='Chaos')
#plot(self.chosen_x,self.chosen_w)
a=pendulum(0,3,0)
a.fire()
#legend(loc='best')
show()
| [
"noreply@github.com"
] | noreply@github.com |
e5fe6c7970e5ab3c1ec59d4823406e86e6ad60d6 | b192fa5cca5faaace36b3cac52d67bd4b50d881c | /libs/myflick/routes.py | 06f6d1e655e7141283fcb14fb1c63707776faa59 | [] | no_license | mitemitreski/myflicks | f9ae3501607e3d321f7812731766c73f6fa6df3e | c95addd564d71c2267fbf029060d14848c225c7e | refs/heads/master | 2021-01-18T02:11:51.298371 | 2013-11-09T16:35:20 | 2013-11-09T16:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,757 | py | from werkzeug.routing import Map, Rule, EndpointPrefix
routes = [
EndpointPrefix('index|', [
Rule('/', endpoint='index')]),
EndpointPrefix('login|', [
Rule('/login/g', defaults = {'original_url': ''}, endpoint = 'g_request', methods = ['GET']),
Rule('/login/g/<path:original_url>', endpoint = 'g_request', methods = ['GET']),
Rule('/login/callback/g', endpoint = 'g_callback', methods = ['GET']),
Rule('/login/twitter', defaults = {'original_url': ''}, endpoint = 'twitter_request', methods = ['GET']),
Rule('/login/twitter/<path:original_url>', endpoint = 'twitter_request', methods = ['GET']),
Rule('/login/callback/twitter', defaults = {'original_url': ''}, endpoint = 'twitter_callback', methods = ['GET']),
Rule('/login/callback/twitter/<path:original_url>', endpoint = 'twitter_callback', methods = ['GET']),
Rule('/login/fb', defaults = {'original_url': ''}, endpoint = 'fb_request', methods = ['GET']),
Rule('/login/fb/<path:original_url>', endpoint = 'fb_request', methods = ['GET']),
Rule('/login/callback/fb', defaults = {'original_url': ''}, endpoint = 'fb_callback', methods = ['GET']),
Rule('/login/callback/fb/<path:original_url>', endpoint = 'fb_callback', methods = ['GET']),
Rule('/login/logout', defaults = {'original_url': ''}, endpoint = 'logout', methods = ['GET']),
Rule('/login/logout/<path:original_url>', endpoint='logout', methods=['GET'])]),
EndpointPrefix('mgmt|', [
Rule('/mgmt/update/movies', endpoint='update_movies', methods=['GET'])]),
EndpointPrefix('search|', [
Rule('/autocomplete/movie/<path:q>', endpoint='auto_movie', methods=['GET'])]),
EndpointPrefix('rating|', [
Rule('/user/rate', endpoint='user_rate', methods=['POST'])]),
EndpointPrefix('movie|', [
Rule('/movie/partial/<int:movie_id>', endpoint='partial', methods=['GET']),
Rule('/movie/<int:movie_id>-<path:dummy>', endpoint='show', methods=['GET']),
Rule('/movie/missing', endpoint='missing', methods=['GET']),
Rule('/movie/missing', endpoint='fill_missing', methods=['POST'])]),
EndpointPrefix('user|', [
Rule('/user/<int:user_id>-<path:dummy>', endpoint='show', methods=['GET']),
Rule('/home', endpoint='home', methods=['GET'])]),
EndpointPrefix('cast|', [
Rule('/director/<path:fullname>', defaults = {'crew': 'director'}, endpoint='show', methods=['GET']),
Rule('/screenwriter/<path:fullname>', defaults = {'crew': 'screenwriter'}, endpoint='show', methods=['GET']),
Rule('/actor/<path:fullname>', defaults = {'crew': 'actor'}, endpoint='show', methods=['GET'])])
]
url_map = Map(routes, strict_slashes = False)
| [
"b.petrushev@gmail.com"
] | b.petrushev@gmail.com |
7a3f9d1a7437cf258fd93efcfdfa3f3a3316d099 | 45ca434bdb9e48fdbb2cda0e7fdd9a76474117b0 | /aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/SetFileCacheExpiredConfigRequest.py | 55bd0c22970cd64217840720fb797559c0c97d7f | [
"Apache-2.0"
] | permissive | wanyanzhenjiang/aliyun-openapi-python-sdk | e41e9937ad3f851e5a58f6bea95663e88f7fee13 | 4a5bf1b35f2395d047ead4444ea46721976bdd24 | refs/heads/master | 2020-12-30T10:37:55.789911 | 2017-07-27T06:55:15 | 2017-07-27T06:55:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,989 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class SetFileCacheExpiredConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2014-11-11', 'SetFileCacheExpiredConfig')
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_CacheContent(self):
return self.get_query_params().get('CacheContent')
def set_CacheContent(self,CacheContent):
self.add_query_param('CacheContent',CacheContent)
def get_TTL(self):
return self.get_query_params().get('TTL')
def set_TTL(self,TTL):
self.add_query_param('TTL',TTL)
def get_Weight(self):
return self.get_query_params().get('Weight')
def set_Weight(self,Weight):
self.add_query_param('Weight',Weight) | [
"haowei.yao@alibaba-inc.com"
] | haowei.yao@alibaba-inc.com |
a1e3abb3deba3ff508edaacfaadc44769ef7c1c3 | 3fd5a7b768be44f45fa535c124ab35df064e5eba | /python/template/thread_list.py | 00b9e7383aa15d8225234a70ee18640424004f04 | [] | no_license | thermitegod/shell-scripts | c5898ad2a877d4d007c33e645fd45528a1a35daf | 1fa14cd2c742097e95e3b44bfda445c4dbf5c136 | refs/heads/master | 2023-07-23T10:36:21.479669 | 2023-07-06T01:02:52 | 2023-07-06T01:03:44 | 152,808,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2018-2022 Brandon Zorn <brandonzorn@cock.li>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# SCRIPT INFO
# 1.1.0
# 2020-12-20
# copy to python/private/thread_list.py to use
from collections import namedtuple
class _ThreadList:
def __init__(self):
Threads = namedtuple('Threads', ['board', 'thread_number', 'save_dir'])
self.THREADS_4CHAN = (
# /a/
Threads('a', '', ''),
# Threads('a', '', ''),
# Threads('a', '', ''),
# /w/
Threads('w', '', ''),
# Threads('w', '', ''),
# Threads('w', '', ''),
)
self.THREADS_8KUN = (
# /tech/
# Threads('tech', '', ''),
)
ThreadList = _ThreadList()
| [
"thermitethegod@gmail.com"
] | thermitethegod@gmail.com |
b06a84b8b264cc5bc663484e33e4776b9343a656 | d1d02da85c052f4dd6909b51936b49d277bd564b | /backend/celery.py | 5fcb32598d860a6991b39a9d059fb2692fb19638 | [] | no_license | Inbaroth/AssistanceProgramsWatcher | 24c406fd04e050a8781dd9d658b3f5d3464ba9c1 | 99b62c025b1d6a97345c1f2c2a3180dc3d6f2fc3 | refs/heads/master | 2023-06-29T13:32:39.989496 | 2021-08-08T14:23:44 | 2021-08-08T14:23:44 | 382,018,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
app = Celery('backend')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks() | [
"inbaroth1993@gmail.com"
] | inbaroth1993@gmail.com |
10b53f27063148b525f2477dbcd38b2f9b3b7bd4 | c97a15335e80e402b8b12f4ce24376ee5bb76e75 | /htu21d.py | 8a40b4842d116e79b593b8f49be279448d8481bb | [
"MIT"
] | permissive | kevinkk525/htu21d-esp8266 | cced054d6efdd83372f52c9781213339eaf19122 | 04563a1c9aff3f6dbad6665024cdac881eaa332f | refs/heads/master | 2021-09-16T22:04:35.820414 | 2018-06-25T11:48:44 | 2018-06-25T11:48:44 | 108,892,044 | 0 | 0 | null | 2017-10-30T18:31:49 | 2017-10-30T18:31:49 | null | UTF-8 | Python | false | false | 2,104 | py | from machine import I2C, Pin
import time
class HTU21D(object):
ADDRESS = 0x40
ISSUE_TEMP_ADDRESS = 0xE3
ISSUE_HU_ADDRESS = 0xE5
def __init__(self, scl=None, sda=None, i2c=None):
"""Initiate the HUT21D
Args:
scl (int): Pin id where the sdl pin is connected to
sda (int): Pin id where the sda pin is connected to
"""
if i2c is not None:
self.i2c=i2c
else:
self.i2c = I2C(scl=Pin(scl), sda=Pin(sda), freq=100000)
def _crc_check(self, value):
"""CRC check data
Notes:
stolen from https://github.com/sparkfun/HTU21D_Breakout
Args:
value (bytearray): data to be checked for validity
Returns:
True if valid, False otherwise
"""
remainder = ((value[0] << 8) + value[1]) << 8
remainder |= value[2]
divsor = 0x988000
for i in range(0, 16):
if remainder & 1 << (23 - i):
remainder ^= divsor
divsor >>= 1
if remainder == 0:
return True
else:
return False
def _issue_measurement(self, write_address):
"""Issue a measurement.
Args:
write_address (int): address to write to
:return:
"""
self.i2c.start()
self.i2c.writeto_mem(int(self.ADDRESS), int(write_address), '')
self.i2c.stop()
data = bytearray(3)
time.sleep_ms(50)
self.i2c.readfrom_into(self.ADDRESS, data)
if not self._crc_check(data):
raise ValueError()
raw = (data[0] << 8) + data[1]
raw &= 0xFFFC
return raw
def temperature(self):
"""Calculate temperature"""
raw = self._issue_measurement(self.ISSUE_TEMP_ADDRESS)
return -46.85 + (175.72 * raw / 65536)
def humidity(self):
"""Calculate humidity"""
raw = self._issue_measurement(self.ISSUE_HU_ADDRESS)
return -6 + (125.0 * raw / 65536)
| [
"noreply@github.com"
] | noreply@github.com |
2cd2ec93edcfebd515d0dd45bdf1acdd8df14937 | 69b238a8a332033e9a595cb684ef46fc57fb33c2 | /home/migrations/0001_initial.py | df58552bb438ce1841d15acbf361e7f5f04c7e01 | [] | no_license | sebastianbila/electronic_museum | 0688cd76f51a123a5592c73d3093d815b888d829 | 118ba7fb5a6657e52d5af6790f28a4fcb644ca16 | refs/heads/master | 2022-11-15T22:59:53.544338 | 2020-07-14T06:37:42 | 2020-07-14T06:37:42 | 279,503,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | # Generated by Django 3.0.5 on 2020-05-03 16:20
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Feedback',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('email', models.CharField(max_length=250)),
('message', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"sebii_sw@Sebastians-MacBook-Pro.local"
] | sebii_sw@Sebastians-MacBook-Pro.local |
fb7d5bf4a453cf483c36820ec233a56926f63930 | c7e765a9bed33d3bfb21774e3995bf4a09e04add | /adminmgr/media/code/A2/python/task/BD_1117_1375_1419_1525.py | ea86b27f17cbd91f3957294e879438d4f68c005f | [
"Apache-2.0"
] | permissive | IamMayankThakur/test-bigdata | 13dd2ac7fb76c9baed6c3a0aa943057a22e2d237 | 7f507918c7bec31c92eedcd94491a83486623049 | refs/heads/master | 2022-05-03T00:59:44.127494 | 2022-02-10T19:50:16 | 2022-02-10T19:50:16 | 201,585,028 | 10 | 4 | Apache-2.0 | 2022-04-22T23:39:45 | 2019-08-10T05:34:09 | Python | UTF-8 | Python | false | false | 2,562 | py | from __future__ import print_function
import re
import sys
from operator import add
from pyspark.sql import *
def calcRank(BatBowl, rank):
n = len(BatBowl)
for i in BatBowl:
yield (i, float(rank)/float(n))
checking = 1
def batbowlKeyValue(x):
lol = x.split(',')
return lol[0],lol[1]
def batbowlRank(x):
lol = x.split(',')
return lol[1],float(lol[2])/float(lol[3])
if __name__ == "__main__" :
if len(sys.argv) != 4:
sys.exit(-1)
spark = SparkSession.builder.appName("Bowlerrank").getOrCreate()
lol = spark.read.text(sys.argv[1]).rdd.map(lambda x : x[0])
lol2 = lol.map(lambda x: batbowlKeyValue(x)).distinct().groupByKey().cache()
lol_temp = lol.map(lambda x: batbowlRank(x)).distinct().groupByKey()
bowr = lol_temp.map(lambda x : (x[0], max(sum(x[1]),1.00)))
itcount = 0
bowr_temp = bowr
noi = int(sys.argv[2])
if (noi <= 0) :
while True:
lol3 = lol2.join(bowr).flatMap(lambda x : calcRank(x[1][0], x[1][1]))
perc = int(sys.argv[3])
if(perc!=0):
bowr = lol3.reduceByKey(add).mapValues(lambda deadpool : deadpool*(float(perc/100)) + 1-(float(perc/100)))
else:
bowr = lol3.reduceByKey(add).mapValues(lambda deadpool : deadpool*0.8 + 0.2)
#for wolverine, iron_man in bowr.collect():
# print("%s has rank: %s." % (wolverine, iron_man))
temp = bowr.join(bowr_temp)
temp2 = temp.collect()
flag = 0
for i in temp2:
if(abs(i[1][0]-i[1][1])<0.0001):
flag = flag + 1
else:
break
itcount = itcount + 1
bowr_temp = bowr
if flag==len(temp2):
break
else:
t = int(sys.argv[2])
for _ in range(t):
lol3 = lol2.join(bowr).flatMap(lambda x : calcRank(x[1][0], x[1][1]))
perc = int(sys.argv[3])
if(perc!=0):
bowr = lol3.reduceByKey(add).mapValues(lambda deadpool : deadpool*(float(perc)/100.00) + 1-(float(perc)/100.00))
else:
bowr = lol3.reduceByKey(add).mapValues(lambda deadpool : deadpool*0.8 + 0.2)
bowr = bowr.sortBy(lambda x : (-x[1],x[0]))
for wolverine, iron_man in bowr.collect():
print("%s,%.12f" % (wolverine, iron_man))
#print("...................................",itcount,"...............................................")
spark.stop()
| [
"ubuntu@ip-172-31-18-251.ap-south-1.compute.internal"
] | ubuntu@ip-172-31-18-251.ap-south-1.compute.internal |
a05dd3fb251fe56180bf527dd125d49e78ed2f08 | 9fba88f0f177ec82e8ad2a37fbbd3a73ce761bd5 | /llord_core_app/urls.py | 226236ad8921a0eedb8ea54588ada5088c4584ed | [] | no_license | dasysad/llord | fc5d36077eb781e964871898b472a66790981260 | c421ededfa7dee2caa6da9ad9b17503280cb20c3 | refs/heads/master | 2021-01-18T14:28:41.535329 | 2014-09-26T08:56:21 | 2014-09-26T08:56:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | # llord_core_app.urls
from django.conf.urls import patterns, url
from llord_core_app import views
urlpatterns = patterns('',
# ex: /
url(r'^$', views.index, name='index'),
# ex: /username/
url(r'^user/(?P<username>\w+)/$', views.user, name='user'),
# ex: /property/832NorthWoodstockStreet/
url(r'^property/(?P<property_slug>\w+)/$', views.property, name='property'),
)
| [
"jkdevops@gmail.com"
] | jkdevops@gmail.com |
de584c4eb19bb366f817c87559e42f7c262ffe1d | 64a2e19c11929e9077a8c99e8d388de279e512e9 | /testRunner/runner.py | 0278655d701a52dba205cd68ffe58b706039b381 | [] | no_license | wallaceok/appiumn_auto | 60f8a2b152a27c39cabf12529345909979527115 | e543a662897c0eedfafdea64297947aa6de45539 | refs/heads/master | 2020-12-24T06:00:26.338592 | 2016-11-08T10:04:04 | 2016-11-08T10:04:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,471 | py | __author__ = 'Administrator'
# -*- coding: utf-8 -*-
import sys
sys.path.append("..")
import datetime
import xlsxwriter
import time
import unittest
from common import reportPhone
from testRunner.runnerBase import TestInterfaceCase, ga
from testCase.Home import testHome
from testCase.work import testContact
from testCase.web.comment import testComment
from testBLL import email as b_email
from testBLL import server
from testBLL import adbCommon
from testMode import email as memail
from testBLL import report as b_report
from testBLL import appBase
from testBLL import apkBase
from testMode import report as m_report
from common.variable import GetVariable as common
from common import dataToString
import os
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
def get_email():
m_email = memail.GetEmail()
m_email.file = PATH( '../email.ini' )
email = b_email.read_email(m_email)
return email
def get_app_msg(f=r"D:\app\appium_study\img\t.apk"):
return apkBase.apkInfo(f).get_app_msg()
def get_common_report(start_test_time, endtime, starttime):
mreport = m_report.GetReport()
b_get_hp_info = appBase.get_phone_info()
raw = appBase.get_men_total(r"d:\men.log")
app_msg = get_app_msg(PATH( '../img/t.apk'))
mreport.test_sum = common.test_sum
mreport.test_failed = common.test_failed
mreport.test_success = common.test_success
mreport.test_sum_date = str((endtime - starttime).seconds-6) +"秒"
mreport.app_name = app_msg[0]
mreport.app_size = app_msg[1]
mreport.phone_name = b_get_hp_info["phone_name"] +" " + b_get_hp_info["phone_model"]
mreport.phone_rel =b_get_hp_info["release"]
mreport.phone_pix = appBase.get_app_pix()
mreport.phone_raw = reportPhone.phone_raw(raw/1024)
print(common.MEN)
avg_men = appBase.get_avg_raw(common.MEN) # 获取每次占用内存多少
mreport.phone_avg_use_raw = avg_men
mreport.phone_max_use_raw = reportPhone.phone_max_use_raw(common.MEN)
mreport.phone_cpu = appBase.get_cpu_kel()
mreport.phone_avg_use_cpu = reportPhone.phone_avg_use_cpu(common.CPU)
mreport.phone_avg_max_use_cpu = reportPhone.phone_avg_max_use_cpu(common.CPU)
mreport.app_version = app_msg[2]
mreport.test_date = start_test_time
mreport.fps_max = reportPhone.fps_max(common.FPS)
mreport.fps_avg = reportPhone.fps_avg(common.FPS)
b_report.OperateReport().set_report(mreport)
def get_common_web_report(start_test_time, endtime, starttime):
pass
def runnerCaseWeb():
suite = unittest.TestSuite()
starttime = datetime.datetime.now()
suite.addTest(TestInterfaceCase.parametrize(testComment))
unittest.TextTestRunner(verbosity=2).run(suite)
def runnerCaseApp():
start_test_time = dataToString.getStrTime(time.localtime(), "%Y-%m-%d %H:%M %p")
suite = unittest.TestSuite()
starttime = datetime.datetime.now()
suite.addTest(TestInterfaceCase.parametrize(testHome))
# suite.addTest(TestInterfaceCase.parametrize(testContact))
unittest.TextTestRunner(verbosity=2).run(suite)
endtime = datetime.datetime.now()
get_common_report(start_test_time, endtime, starttime)
report()
def report():
workbook = xlsxwriter.Workbook('GetReport.xlsx')
worksheet = workbook.add_worksheet("测试总况")
worksheet2 = workbook.add_worksheet("测试详情")
print(common.RRPORT)
b_OperateReport = b_report.OperateReport(wd=workbook, data=common.RRPORT)
b_OperateReport.init(worksheet)
b_OperateReport.detail(worksheet2)
b_OperateReport.close()
b_email.send_mail(get_email())
if __name__ == '__main__':
if ga.selenium_appium == common.APPIUM and ga.platformName == common.ANDROID :
if adbCommon.attached_devices():
appium_server = server.AppiumServer(ga.appiumJs, ga.Remote,ga.selenium_appium)
appium_server.start_server()
while not appium_server.is_runnnig():
time.sleep(2)
runnerCaseApp()
appium_server.stop_server()
else:
print(u"设备不存在")
if ga.selenium_appium == common.SELENIUM:
appium_server = server.AppiumServer(ga.selenium_jar, ga.sel_remote, ga.selenium_appium)
appium_server.start_server()
while not appium_server.is_runnnig():
time.sleep(2)
runnerCaseWeb()
appium_server.stop_server()
| [
"284772894@qq.com"
] | 284772894@qq.com |
70a52ddad8d7f3c331c89b56dda190218f419e88 | befe40af7fcd3fc56f794357ab5cb75fc96fe3db | /blog/forms.py | 7e1fa1aaf84ccaf5cb0518e601ccd92f475db77d | [] | no_license | cacheson/my-first-blog | 63d11daaeb5d4f34740a72d50adeabf109e61fd0 | 7a0f5da3d65f9afe6ae4a81582b9ae681cb225ea | refs/heads/master | 2020-04-21T14:32:04.615646 | 2019-02-13T18:04:23 | 2019-02-13T18:04:23 | 169,638,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | from django import forms
from .models import Post
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'text')
| [
"colin@nullptr.com"
] | colin@nullptr.com |
990f33373020f071da1a6715454e1c6db5635576 | 96c259497ca0430893782f5a735b86aec2b4858b | /linkedlist/single-linkedlist/delet-oparation2/delete_any_node.py | 9d8fdf48a89d3366cd99db17c4d2dc4a19411bf9 | [] | no_license | Dhavade/Data-structure | 08415d4b432d15ddbdd5ed5dd2f8fcb87cc4555a | 176f13804bb7bb66794137858878213543a3441b | refs/heads/main | 2023-07-12T23:22:34.864789 | 2021-08-15T11:02:50 | 2021-08-15T11:02:50 | 396,316,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,936 | py | class Node:
def __init__(self,data):
self.data=data
self.ref=None
class linkedlist:
def __init__(self):
self.head=None
def print_ll(self):
if self.head is None:
print("linked list is empty !")
else:
n=self.head
while n is not None:
print(n.data ,end="----->")
n=n.ref
def add_beginnning(self,data):
new_node=Node(data)
new_node.ref=self.head
self.head=new_node
def add_end(self,data):
new_node=Node(data)
if self.head is None:
self.head=new_node
else:
n=self.head
while n.ref is not None:
n=n.ref
n.ref=new_node
def delete_begin(self):
if self.head is None:
print("LL is empty so we cant delete npde !")
else:
self.head=self.head.ref
def delete_last(self):
if self.head is None:
print("LL is empty so we cant delete npde !")
else:
n=self.head
while n.ref.ref is not None:
n=n.ref
n.ref=None
def delete_any(self,x):
if self.head is None:
print("LL is empty so we cant delete npde !")
return
if x==self.head.data:
self.head=self.head.ref
return
n=self.head
while n.ref is not None:
if x==n.ref.data:
break
n=n.ref
if n.ref is None:
print("Nod is not present !")
else:
n.ref=n.ref.ref
ll1=linkedlist()
ll1.add_beginnning(18)
ll1.add_beginnning(15)
ll1.add_beginnning(58)
#ll1.delete_begin()
#ll1.delete_last()
ll1.delete_any(15)
#ll1.delete_last()
ll1.print_ll() | [
"noreply@github.com"
] | noreply@github.com |
dc816389c06442347a202791e2f3ecfc4e43a317 | 2cd06e44dd79b45708ddf010c31289458d850b94 | /test/functional/feature_maxuploadtarget.py | b5a44cbc6b5cb6b89aca3c4c47d2ce7ef4634a00 | [
"MIT"
] | permissive | adymoloca/flocoin | bc66233e5b3b1af294ca6719b4a26f8829d682e4 | d9244577577dede975c852f6fcfe1afba4d71a57 | refs/heads/master | 2023-08-21T23:51:28.266695 | 2021-10-06T01:40:10 | 2021-10-06T01:40:10 | 408,609,250 | 0 | 0 | MIT | 2021-09-30T10:11:53 | 2021-09-20T21:45:28 | C++ | UTF-8 | Python | false | false | 6,653 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respected even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
"""
from collections import defaultdict
import time
from test_framework.messages import CInv, MSG_BLOCK, msg_getdata
from test_framework.p2p import P2PInterface
from test_framework.test_framework import FlocoinTestFramework
from test_framework.util import assert_equal, mine_large_block
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.block_receive_map = defaultdict(int)
def on_inv(self, message):
pass
def on_block(self, message):
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
class MaxUploadTest(FlocoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[
"-maxuploadtarget=800",
"-acceptnonstdtxn=1",
"-peertimeout=9999", # bump because mocktime might cause a disconnect otherwise
]]
self.supports_cli = False
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# p2p_conns[0] will only request old blocks
# p2p_conns[1] will only request new blocks
# p2p_conns[2] will test resetting the counters
p2p_conns = []
for _ in range(3):
p2p_conns.append(self.nodes[0].add_p2p_connection(TestP2PConn()))
# Now mine a big block
mine_large_block(self.nodes[0], self.utxo_cache)
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
mine_large_block(self.nodes[0], self.utxo_cache)
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
big_new_block = int(big_new_block, 16)
# p2p_conns[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(MSG_BLOCK, big_old_block))
max_bytes_per_day = 800*1024*1024
daily_buffer = 144 * 4000000
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 576MB will be reserved for relaying new blocks, so expect this to
# succeed for ~235 tries.
for i in range(success_count):
p2p_conns[0].send_and_ping(getdata_request)
assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for _ in range(3):
p2p_conns[0].send_message(getdata_request)
p2p_conns[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
self.log.info("Peer 0 disconnected after downloading old block too many times")
# Requesting the current block on p2p_conns[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 800 times
getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)]
for i in range(800):
p2p_conns[1].send_and_ping(getdata_request)
assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1)
self.log.info("Peer 1 able to repeatedly download new block")
# But if p2p_conns[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)]
p2p_conns[1].send_message(getdata_request)
p2p_conns[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
self.log.info("Peer 1 disconnected after trying to download old block")
self.log.info("Advancing system time on node to clear counters...")
# If we advance the time by 24 hours, then the counters should reset,
# and p2p_conns[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
p2p_conns[2].sync_with_ping()
p2p_conns[2].send_and_ping(getdata_request)
assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1)
self.log.info("Peer 2 able to download old block")
self.nodes[0].disconnect_p2ps()
self.log.info("Restarting node 0 with download permission and 1MB maxuploadtarget")
self.restart_node(0, ["-whitelist=download@127.0.0.1", "-maxuploadtarget=1"])
# Reconnect to self.nodes[0]
peer = self.nodes[0].add_p2p_connection(TestP2PConn())
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)]
for i in range(20):
peer.send_and_ping(getdata_request)
assert_equal(peer.block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)]
peer.send_and_ping(getdata_request)
self.log.info("Peer still connected after trying to download old block (download permission)")
peer_info = self.nodes[0].getpeerinfo()
assert_equal(len(peer_info), 1) # node is still connected
assert_equal(peer_info[0]['permissions'], ['download'])
if __name__ == '__main__':
MaxUploadTest().main()
| [
"adymoloca91@gmail.com"
] | adymoloca91@gmail.com |
7af5ac811bb16f3ee4877eb9643bb8ebc1f166c1 | 2b2d3b1fccccab25e381f142f74a933bb188a3a6 | /messages_generator.py | 0f918a3c5489fda2f0534ef1ab97472e80222380 | [
"MIT"
] | permissive | gilwo/rabbitmq_messaging_using_kombu | b3d45456b9ebee17ce1e0d1dec29c605dd95098f | 82f48a66c62b2fedef1c384777db5690be454ef9 | refs/heads/master | 2021-01-10T11:23:02.575937 | 2016-01-23T17:30:30 | 2016-01-23T17:30:30 | 48,984,229 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | from enum import Enum
import time
class lvl(Enum):
log_error = 1
log_warning = 2
log_info = 3
log_debug = 4
log_trace = 5
event_alert = 6
event_activity = 7
event_trace = 8
#lvl_list = [(x.name, x.value) for x in lvl]
lvl_list = [x.name for x in lvl]
count = 0
while True:
print("msg (%s : %s) route %s" % (count,
lvl_list[count % len(lvl_list)],
lvl_list[count % len(lvl_list)].replace('_', '.')))
count += 1
if count % 20 == 0:
time.sleep(2)
| [
"gilwo@users.noreply.github.com"
] | gilwo@users.noreply.github.com |
fc1b4270b320bbc07d5577cbc2b0f37f06a14176 | 8e56e6b926db7e3465b9887a399db9b378fa08a6 | /tests/run_flake8/oneline.py | 2ba224328019a70169f9405f8eb9f2d38f093a4f | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | keisheiled/flake8-no-implicit-concat | 308573f918e9932ec8d8c282adc881b58fc25692 | b565e990eed61b6918f910e96167d2c844e5d03a | refs/heads/master | 2022-12-07T11:11:38.243270 | 2020-08-18T20:19:38 | 2020-08-18T20:19:38 | 290,342,412 | 0 | 0 | null | 2020-08-25T23:04:08 | 2020-08-25T23:04:07 | null | UTF-8 | Python | false | false | 16 | py | a = "aaa" "bbb"
| [
"8.slashes@gmail.com"
] | 8.slashes@gmail.com |
81c62b50755df3ada006f5fa494ea575c97bbf2d | c199c5954092cb714b7e1e780d6b3d9d246034fd | /2019141470414雒彬赫/代码部分/text(gui).py | 7cac60151a371bf1fa9d947ce339ed9cbd17e781 | [] | no_license | guan-yunxiao/Homework01 | 62c14f94d2d005c00a508b7030cf686f15e72d92 | 2aaa899b58b84cccc0f514b6e913bdb5147a1e83 | refs/heads/main | 2023-06-17T18:02:00.782859 | 2021-07-07T04:07:39 | 2021-07-07T04:07:39 | 380,497,605 | 0 | 0 | null | 2021-06-27T07:18:44 | 2021-06-26T12:33:51 | null | UTF-8 | Python | false | false | 2,689 | py | #!/usr/bin/env python3
#coding: utf-8
import smtplib
from tkinter import *
from email.mime.text import MIMEText
from email.header import Header
mail_host = "smtp.163.com" # 设置服务器
mail_user = "luobinhe_vip.com" # 用户名
mail_pass = "OTJZCUFHJIEZNWVH" # 授权码
me = "luobinhe_vip" + "<" + "luobinhe_vip@163.com" + ">"
'''发送函数'''
def sendmail(mail_receiver, mail_subject, mail_content):
msg = MIMEText(mail_content, 'plain', 'utf-8')#中文需参数‘utf-8’,单字节字符不需要
msg['Subject'] = mail_subject
msg['From'] = me
msg['To'] = ";".join(mail_receiver)
try:
smtp = smtplib.SMTP()
smtp.connect('smtp.163.com')
smtp.login(mail_user, mail_pass)
smtp.sendmail(mail_user, mail_receiver, msg.as_string())
smtp.quit()
except smtplib.SMTPException:
print("Error: 邮件发送错误")
'''可视化界面'''
def client():
top = Tk()
top.title("邮件发送客户端")
top.geometry('600x700')
'''发送人'''
Label(top, text="发送人:", bg="yellow",font="等线", width=10, height=1).place(x=30, y=30)
Label(top, text="luobinhe_vip@163.com",font="等线",bg="white", width=20, height=1).place(x=170, y=30)
'''接收人'''
Label(top, text="接收人:", bg="yellow",font="等线",width=10, height=1).place(x=30,y=70)
receiver_entry = Entry(top,width=50)
receiver_entry.place(x=170,y=70)
'''主题'''
Label(top, text="主题:", bg="yellow",font="等线",width=10, height=1).place(x=30,y=110)
subject_entry = Entry(top, width=50)
subject_entry.place(x=170, y=110)
'''内容'''
Label(top, text="内容:", bg="yellow",font="等线",width=10, height=1).place(x=30,y=150)
content_text = Text(top,width=60,height=20)
content_text.place(x=30,y=190)
def clearcontent():
content_text.delete('0.0','end')
def send():
receiver = receiver_entry.get()
subject = subject_entry.get()
content = content_text.get('0.0','end')
if "@" in receiver:
try:
sendmail(receiver,subject,content)
print("邮件已发送")
except IOError:
print("发送失败")
else:
print("邮箱格式不对\n请确认接收人邮箱")
'''按钮'''
Button(top,text="清空",bd=5,font="等线",width=10,command=clearcontent).place(x=30,y=460)
Button(top,text="发送",bd=5,font="等线",width=10,command=send).place(x=170,y=460)
top.mainloop()
if __name__ == '__main__':
client()
| [
"noreply@github.com"
] | noreply@github.com |
4e1103b9de170a9e585a6f4fad3737a42afb29b4 | 4712c51f51fa6a6d6afad4fb0fcfceaecbedac64 | /main.py | 5c218d1fadd527d43e57caebaa51f297930ce2e4 | [] | no_license | kitowskiv/build-a-blog | e19de8b6ff99129d3e9a5f51ef501cae62581ab5 | 895d7a59a4832c9826ac6fa00d485d8a9b443a5f | refs/heads/master | 2020-03-10T03:29:15.921559 | 2018-04-15T21:22:22 | 2018-04-15T21:22:22 | 129,166,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,265 | py | from flask import Flask, request, redirect, render_template, session, flash
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://build-a-blog:build-a-blog@localhost:8889/build-a-blog'
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
app.secret_key = 'A3s5g7jdgw'
#creates blog table in database
class Blog(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(120))
body = db.Column(db.Text)
def __init__(self, title, body):
self.title = title
self.body = body
@app.route('/blog', methods=['POST', 'GET'])
def index():
blogid = request.args.get('id')
if blogid:
blog = Blog.query.get(blogid)
return render_template('singlepost.html',title="Post",blog=blog)
blogs = Blog.query.filter_by().order_by(Blog.id.desc()).all()
return render_template('blog.html',title="Build-a-blog", blogs=blogs)
def input_error(input):
if input == "":
return True
@app.route('/blog?id={{blog.id}}', methods=['POST', 'GET'])
def singlepost():
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
return render_template('singlepost.html',title_error=title,body_error=body)
@app.route('/newpost', methods=['POST', 'GET'])
def newpost():
title_error = ""
body_error = ""
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
newblog = Blog(title, body)
if input_error(title):
title_error = "Title Required"
if input_error(body):
body_error = 'Body Required'
if not input_error(title) and not input_error(body):
db.session.add(newblog)
db.session.commit()
singlepost = "/blog?id=" + str(newblog.id)
return redirect(singlepost)
return render_template('newpost.html', title="Add a New Post",
title_error=title_error,
body_error=body_error)
@app.route('/')
def reroute():
return redirect('/blog')
if __name__ == '__main__':
app.run() | [
"vincent.kitowski@gmail.com"
] | vincent.kitowski@gmail.com |
2883cab45c71d7b3cbcdf254f7b13f0a93302c69 | 4e7d38e15be88396b8e581ba07b9b006af93b92c | /Python3/Studying/Lesson_1/alphabet_and_numbers.py | d01ebce2466d7feea65fe59f004ab889288b6402 | [] | no_license | KathrynMorgan/sort | f24287d3cdf5508b42728696e97a19c50b236296 | 0d25b65a88e3df3af57a6c29ae6ede26e049f380 | refs/heads/master | 2020-04-04T17:10:52.579786 | 2019-01-03T07:24:05 | 2019-01-03T07:24:05 | 156,110,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | #!/usr/bin/env python3
# Make a variable into a multi line string; where does this variable get used?
first = ''' Tell me the alphabet!
Please please please?
What are the first 4?
'''
second = '''
But wait, that looks funny!\n\
Can we put them in order?\n\
'''
# We are going to count and play with the alphabet!
print(first, '\n c, a, b, d!')
# Can we do better?
print(second)
# Set your base number & echo the value to screen
# What happens here when you use the semicolon?
# What commentary can you provide on this syntax?
c = 1; print('The starting count is:',c)
# Now lets keep count and say them in order
# Whats the difference between the first, second, and third counter sum syntax?
# Do all syntax(es) work?
print(c,'= a')
c = c + 1
print(c,'= b')
c = \
c + 1
print(c,'= c')
c = \
c + 1
print(c, '= d')
# Remove the "#" from the last line (do not remove the leading space)
# Try to run the program, what happens?
# Repeat the 4th count!
# print(c, '= d')
| [
"kathryn.morgan@braincraft.io"
] | kathryn.morgan@braincraft.io |
af55d7f605f4b9d9e2e61aa0dd31a3b47dce5974 | 20be530509901864ff0b26185ad761b34fb074ee | /untitled/Helloworldmethods.py | c1682db4a07bb394e434514e7ff684dd50347b5e | [] | no_license | Shehab-Magdy/PycharmProjects | cd3f6fe5969dbc76fddaf05cacbec237633a4ec7 | 531e5f4e6c070c8b67eb57e357c075d4258b880f | refs/heads/master | 2020-07-08T12:27:18.191190 | 2019-09-04T21:15:43 | 2019-09-04T21:15:43 | 203,671,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | def addnum(x, y):
return x + y
def subnum(x, y):
return x - y
def mulnum(x, y):
return x * y
def divnum(x, y):
if y == 0:
print('You can not dived by zero')
else:
return x / y
| [
"cegres1@hotmail.com"
] | cegres1@hotmail.com |
79ea1a80639a702a5cfc9746da6bbb6585267637 | f3b32b7bffa362cfac1d5bdba213f72b6818ef75 | /ruby.py | 2a4a87fbca2a291e18f35e94819614c5456528bd | [
"MIT"
] | permissive | pombredanne/rubypython-1 | ec3c635e5e07eaf74d2b8f0e3c304ce0b7daccb3 | c7252fbd188253d6ca92616d72914a21550a97cb | refs/heads/master | 2020-12-30T15:41:25.624816 | 2009-06-13T17:22:58 | 2009-06-13T17:22:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,619 | py | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 1.3.31
#
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _ruby
import new
new_instancemethod = new.instancemethod
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'PySwigObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
rb_eval = _ruby.rb_eval
rb2py = _ruby.rb2py
| [
"valentin.mihov@gmail.com"
] | valentin.mihov@gmail.com |
dc4e498d5e94244fea4ccc62a2671836d7858c62 | 438f8490be1fa3818daad38254a77bb11ba367b3 | /project/settings.py | 6b500d837b395a5d1f67ad16fa8a0d8088cd6b65 | [] | no_license | n7ey233/maximanat | 2ea0b3c80729dd10e6023b053523ebe7e6ba22d8 | 812d7396fe64af85f86e2dd5e257935bde8719e2 | refs/heads/master | 2020-04-13T10:13:01.382912 | 2018-12-28T06:31:19 | 2018-12-28T06:31:19 | 163,133,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,284 | py | """
Django settings for project project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-@xf882li3g_x28_oqt5(=fj8b$*2*9*$hm3(17g^#(klc7pgg'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'maximanat.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"you@example.com"
] | you@example.com |
e599701513fbd7c6bddc7060f99be29af6a44ae9 | 2dc9fe3283b8da25ae76dfd05f971f47b6b8061f | /univariate/univar1a_LearnApply_rt/levels12.py | be4d123d518699b3cc9af6aa38ec1a940190f6ff | [
"MIT"
] | permissive | dpaniukov/RulesFPC | bf773c76463b701f09a03714dcf7a91021e6f4d3 | 3de3fcfd899ac45fe399a805942e17784d99515b | refs/heads/master | 2021-01-11T14:13:37.320455 | 2017-12-01T18:51:42 | 2017-12-01T18:51:42 | 81,146,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,713 | py | #!/usr/bin/env python
import os, sys # system functions
import nipype.interfaces.io as nio # Data i/o
from nipype.interfaces.io import DataSink
import nipype.interfaces.fsl as fsl # fsl
import nipype.interfaces.ants as ants
import nipype.pipeline.engine as pe # pypeline engine
import nipype.interfaces.utility as util # utility
import nipype.algorithms.modelgen as model # model generation
import errno
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
"""
Project info
"""
# Subject to run
subj_list = str(sys.argv[1])
project_dir = "/mnt/net/LaCie/Analysis/RuleSwitch/"
work_dir = "/Users/dmitrii/scratch/RuleSwitch/"
model_id = '_univar1a_LearnApply_rt'
task_id = 1
TR = 2.0
# fwhm_thr=5.999541516002418 #to compare with FSL
fwhm_thr = 8.0
hpcutoff = 100
film_thr = 1000 # default in FSL
film_ms = 5 # this should be Susan mask size, not fwhm
template_brain = fsl.Info.standard_image('MNI152_T1_2mm_brain.nii.gz')
template_mask = fsl.Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz')
wf = pe.Workflow(name='wf')
wf.base_dir = os.path.join(work_dir, "wdir" + str(model_id) + "lvl12")
wf.config = {"execution": {"crashdump_dir": os.path.join(project_dir, 'crashdumps')}}
infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource")
infosource.iterables = ('subject_id', [subj_list])
def get_subjectinfo(subject_id, base_dir, task_id, model_id):
import os
import numpy as np
if subject_id == "Subject003" or subject_id == "Subject011" or subject_id == "Subject016" or subject_id == "Subject020":
run_id = [2, 3, 4, 5, 6, 7, 8]
itk_id = list(np.array(run_id) - 2)
evs_l2 = dict(ev001=[1, 1, 1, 0, 0, 0, 0], ev002=[0, 0, 0, 1, 1, 1, 1])
elif subject_id == "Subject019":
run_id = [1, 2, 3, 4, 5, 6]
itk_id = list(np.array(run_id) - 1)
evs_l2 = dict(ev001=[1, 1, 1, 1, 0, 0], ev002=[0, 0, 0, 0, 1, 1])
else:
run_id = [1, 2, 3, 4, 5, 6, 7, 8]
itk_id = list(np.array(run_id) - 1)
evs_l2 = dict(ev001=[1, 1, 1, 1, 0, 0, 0, 0], ev002=[0, 0, 0, 0, 1, 1, 1, 1])
# Conditions for level 1
condition_info = []
cond_file = os.path.join(base_dir, 'models', 'model%s' % model_id,
'condition_key.txt')
with open(cond_file, 'rt') as fp:
for line in fp:
info = line.strip().split()
condition_info.append([info[0], info[1], ' '.join(info[2:])])
if len(condition_info) == 0:
raise ValueError('No condition info found in %s' % cond_file)
taskinfo = np.array(condition_info)
n_tasks = len(np.unique(taskinfo[:, 0]))
conds = []
if task_id > n_tasks:
raise ValueError('Task id %d does not exist' % task_id)
for idx in range(n_tasks):
taskidx = np.where(taskinfo[:, 0] == 'task%03d' % (idx + 1))
conds.append([condition.replace(' ', '_') for condition
in taskinfo[taskidx[0], 2]])
# Conditions for level 2
condition_info_l2 = []
cond_file_l2 = os.path.join(base_dir, 'models', 'model%s' % model_id,
'condition_key_l2.txt')
with open(cond_file_l2, 'rt') as fp_l2:
for line in fp_l2:
info_l2 = line.strip().split()
condition_info_l2.append([info_l2[0], info_l2[1], ' '.join(info_l2[2:])])
if len(condition_info_l2) == 0:
raise ValueError('No condition info found in %s' % cond_file_l2)
taskinfo_l2 = np.array(condition_info_l2)
n_tasks_l2 = len(np.unique(taskinfo_l2[:, 0]))
conds_l2 = []
if task_id > n_tasks_l2:
raise ValueError('Task id %d does not exist' % task_id)
for idx in range(n_tasks_l2):
taskidx_l2 = np.where(taskinfo_l2[:, 0] == 'task%03d' % (idx + 1))
conds_l2.append([condition_l2.replace(' ', '_') for condition_l2
in taskinfo_l2[taskidx_l2[0], 2]])
return subject_id, model_id, task_id, run_id, conds[task_id - 1], itk_id, evs_l2, conds_l2[task_id - 1]
subjinfo = pe.Node(util.Function(input_names=['subject_id', 'base_dir', 'task_id', 'model_id'],
output_names=['subject_id', 'model_id', 'task_id', 'run_id', 'conds', 'itk_id',
'evs_l2', 'conds_l2'],
function=get_subjectinfo),
name='subjectinfo')
subjinfo.inputs.base_dir = project_dir
subjinfo.inputs.task_id = task_id
subjinfo.inputs.model_id = model_id
datasource = pe.Node(nio.DataGrabber(infields=['subject_id', 'model_id', 'task_id', 'run_id', 'itk_id'],
outfields=['func', 'struct', 'behave', 'contrasts', 'contrasts_l2', 'confound',
'itk_transform', 'composite_transform']), name='grabber')
datasource.inputs.base_directory = project_dir
datasource.inputs.template = '*'
datasource.inputs.field_template = dict(func='%s/bold/run%d/run*_mcf_brain.nii.gz',
struct='%s/anatomy/highres001_BrainExtractionBrain.nii.gz',
behave='%s/model/model%s/onsets/task%03d_run%d/ev*.txt',
contrasts='models/model%s/task_contrasts.txt',
contrasts_l2='models/model%s/task_contrasts_l2.txt',
confound='%s/bold/run%d/confound.txt',
itk_transform='reg/%s/bold/func2standard_mat/_subject_id_%s/_convert2itk%d/affine.txt',
composite_transform='reg/%s/anatomy/anat2standard_mat/_subject_id_%s/output_Composite.h5')
datasource.inputs.template_args = dict(func=[['subject_id', 'run_id']],
struct=[['subject_id']],
behave=[['subject_id', 'model_id', 'task_id', 'run_id']],
contrasts=[['model_id']],
contrasts_l2=[['model_id']],
confound=[['subject_id', 'run_id']],
itk_transform=[['subject_id', 'subject_id', 'itk_id']],
composite_transform=[['subject_id', 'subject_id']])
datasource.inputs.sort_filelist = True
def check_behav_list(behav, run_id):
import numpy as np
run_num = len(run_id)
if isinstance(behav, (str, bytes)):
behav = [behav]
behav_array = np.array(behav).flatten()
num_elements = behav_array.shape[0]
behav_array = behav_array.reshape(run_num, int(num_elements / run_num)).tolist()
return behav_array
reshape_behav = pe.Node(util.Function(input_names=['behav', 'run_id'],
output_names=['behav'],
function=check_behav_list),
name='reshape_behav')
wf.connect([(infosource, subjinfo, [('subject_id', 'subject_id')]), ])
wf.connect(subjinfo, 'subject_id', datasource, 'subject_id')
wf.connect(subjinfo, 'model_id', datasource, 'model_id')
wf.connect(subjinfo, 'task_id', datasource, 'task_id')
wf.connect(subjinfo, 'run_id', datasource, 'run_id')
wf.connect(subjinfo, 'itk_id', datasource, 'itk_id')
wf.connect(datasource, 'behave', reshape_behav, 'behav')
wf.connect(subjinfo, 'run_id', reshape_behav, 'run_id')
"""
Setup preprocessing workflow
----------------------------
Set up a node to define all inputs required for the preprocessing workflow
"""
inputnode = pe.Node(interface=util.IdentityInterface(fields=['func',
'struct', ]),
name='inputspec')
wf.connect([(datasource, inputnode, [('struct', 'struct'), ('func', 'func'), ]), ])
"""
Convert functional images to float representation. Since there can be more than
one functional run we use a MapNode to convert each run.
"""
prefiltered_func_data = pe.MapNode(interface=fsl.ImageMaths(out_data_type='float',
op_string='',
suffix='_dtype'),
iterfield=['in_file'],
name='prefiltered_func_data')
wf.connect(inputnode, 'func', prefiltered_func_data, 'in_file')
"""
Determine the 2nd and 98th percentile intensities of each functional run
"""
getthresh = pe.MapNode(interface=fsl.ImageStats(op_string='-p 2 -p 98'),
iterfield=['in_file'],
name='getthreshold')
wf.connect(prefiltered_func_data, 'out_file', getthresh, 'in_file')
"""
Threshold the first run of the functional data at 10% of the 98th percentile
"""
threshold = pe.MapNode(interface=fsl.ImageMaths(out_data_type='char',
suffix='_thresh'),
iterfield=['in_file'],
name='threshold')
"""
Define a function to get 10% of the intensity
"""
def getthreshop(thresh):
return '-thr %.10f -Tmin -bin' % (0.1 * thresh[0][1])
wf.connect(prefiltered_func_data, 'out_file', threshold, 'in_file')
wf.connect(getthresh, ('out_stat', getthreshop), threshold, 'op_string')
"""
Determine the median value of the functional runs using the mask
"""
medianval = pe.MapNode(interface=fsl.ImageStats(op_string='-k %s -p 50'),
iterfield=['in_file', 'mask_file'],
name='medianval')
wf.connect(prefiltered_func_data, 'out_file', medianval, 'in_file')
wf.connect(threshold, 'out_file', medianval, 'mask_file')
"""
Dilate the mask
"""
dilatemask = pe.MapNode(interface=fsl.ImageMaths(suffix='_dil',
op_string='-dilF'),
iterfield=['in_file'],
name='dilatemask')
wf.connect(threshold, 'out_file', dilatemask, 'in_file')
"""
Mask the motion corrected functional runs with the dilated mask
"""
prefiltered_func_data_thresh = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
op_string='-mas'),
iterfield=['in_file', 'in_file2'],
name='prefiltered_func_data_thresh')
wf.connect(prefiltered_func_data, 'out_file', prefiltered_func_data_thresh, 'in_file')
wf.connect(dilatemask, 'out_file', prefiltered_func_data_thresh, 'in_file2')
"""
Determine the mean image from each functional run
"""
meanfunc2 = pe.MapNode(interface=fsl.ImageMaths(op_string='-Tmean',
suffix='_mean'),
iterfield=['in_file'],
name='meanfunc2')
wf.connect(prefiltered_func_data_thresh, 'out_file', meanfunc2, 'in_file')
"""
Merge the median values with the mean functional images into a coupled list
"""
# Yes, it is Node with iterfield! Not MapNode.
mergenode = pe.Node(interface=util.Merge(2, axis='hstack'),
iterfield=['in1', 'in2'],
name='merge')
wf.connect(meanfunc2, 'out_file', mergenode, 'in1')
wf.connect(medianval, 'out_stat', mergenode, 'in2')
"""
Smooth each run using SUSAN with the brightness threshold set to 75% of the
median value for each run and a mask constituting the mean functional
"""
smooth = pe.MapNode(interface=fsl.SUSAN(),
iterfield=['in_file', 'brightness_threshold', 'usans'],
name='smooth')
smooth.inputs.fwhm = fwhm_thr
"""
Define a function to get the brightness threshold for SUSAN
"""
def getbtthresh(medianvals):
return [0.75 * val for val in medianvals]
def getusans(x):
return [[tuple([val[0], 0.75 * val[1]])] for val in x]
wf.connect(prefiltered_func_data_thresh, 'out_file', smooth, 'in_file')
wf.connect(medianval, ('out_stat', getbtthresh), smooth, 'brightness_threshold')
wf.connect(mergenode, ('out', getusans), smooth, 'usans')
"""
Mask the smoothed data with the dilated mask
"""
maskfunc3 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
op_string='-mas'),
iterfield=['in_file', 'in_file2'],
name='maskfunc3')
wf.connect(smooth, 'smoothed_file', maskfunc3, 'in_file')
wf.connect(dilatemask, 'out_file', maskfunc3, 'in_file2')
"""
Scale each volume of the run so that the median value of the run is set to 10000
"""
intnorm = pe.MapNode(interface=fsl.ImageMaths(suffix='_intnorm'),
iterfield=['in_file', 'op_string'],
name='intnorm')
"""
Define a function to get the scaling factor for intensity normalization
"""
def getinormscale(medianvals):
return ['-mul %.10f' % (10000. / val) for val in medianvals]
wf.connect(maskfunc3, 'out_file', intnorm, 'in_file')
wf.connect(medianval, ('out_stat', getinormscale), intnorm, 'op_string')
"""
Create tempMean
"""
tempMean = pe.MapNode(interface=fsl.ImageMaths(op_string='-Tmean',
suffix='_mean'),
iterfield=['in_file'],
name='tempMean')
wf.connect(intnorm, 'out_file', tempMean, 'in_file')
"""
Perform temporal highpass filtering on the data
"""
highpass = pe.MapNode(
interface=fsl.ImageMaths(op_string='-bptf %d -1 -add' % (hpcutoff / (2 * TR)), suffix='_tempfilt'),
iterfield=['in_file', 'in_file2'],
name='highpass')
wf.connect(tempMean, 'out_file', highpass, 'in_file2')
wf.connect(intnorm, 'out_file', highpass, 'in_file')
"""
Set up LEVEL 1
--------------
"""
"""
Setup a basic set of contrasts
"""
def get_contrasts(contrast_file, task_id, conds):
import numpy as np
contrast_def = np.genfromtxt(contrast_file, dtype=object)
if len(contrast_def.shape) == 1:
contrast_def = contrast_def[None, :]
contrasts = []
for row in contrast_def:
if row[0] != 'task%03d' % task_id:
continue
con = [row[1], 'T', ['ev%03d' % (i + 1) for i in range(len(conds))],
row[2:].astype(float).tolist()]
contrasts.append(con)
return contrasts
contrastgen = pe.Node(util.Function(input_names=['contrast_file',
'task_id', 'conds'],
output_names=['contrasts'],
function=get_contrasts),
name='contrastgen')
wf.connect(subjinfo, 'conds', contrastgen, 'conds')
wf.connect(datasource, 'contrasts', contrastgen, 'contrast_file')
wf.connect(subjinfo, 'task_id', contrastgen, 'task_id')
"""
Set up model fitting workflow
-----------------------------
Use :class:`nipype.algorithms.modelgen.SpecifyModel` to generate design information.
"""
modelspec = pe.MapNode(interface=model.SpecifyModel(), iterfield=['event_files', 'functional_runs'], name="modelspec")
modelspec.inputs.input_units = 'secs'
modelspec.inputs.high_pass_filter_cutoff = hpcutoff
modelspec.inputs.time_repetition = TR
wf.connect(reshape_behav, 'behav', modelspec, 'event_files')
wf.connect(highpass, 'out_file', modelspec, 'functional_runs')
"""
Use :class:`nipype.interfaces.fsl.Level1Design` to generate a run specific fsf
file for analysis
"""
level1design = pe.MapNode(interface=fsl.Level1Design(), iterfield=['session_info'], name="level1design")
level1design.inputs.interscan_interval = TR
level1design.inputs.bases = {'dgamma': {'derivs': True}}
level1design.inputs.model_serial_correlations = True
wf.connect(contrastgen, 'contrasts', level1design, 'contrasts')
wf.connect(modelspec, 'session_info', level1design, 'session_info')
"""
Use :class:`nipype.interfaces.fsl.FEATModel` to generate a run specific mat
file for use by FILMGLS
"""
modelgen = pe.MapNode(interface=fsl.FEATModel(), name='modelgen',
iterfield=['fsf_file', 'ev_files', 'args'])
wf.connect(level1design, 'ev_files', modelgen, 'ev_files')
wf.connect(level1design, 'fsf_files', modelgen, 'fsf_file')
wf.connect(datasource, 'confound', modelgen, 'args')
"""
Use :class:`nipype.interfaces.fsl.FILMGLS` to estimate a model specified by a
mat file and a functional run
"""
modelestimate = pe.MapNode(interface=fsl.FILMGLS(smooth_autocorr=True,
mask_size=film_ms,
threshold=film_thr),
name='modelestimate',
iterfield=['design_file', 'in_file', 'tcon_file'])
wf.connect([(highpass, modelestimate, [('out_file', 'in_file')]),
(modelgen, modelestimate, [('design_file', 'design_file')]),
])
wf.connect(modelgen, 'con_file', modelestimate, 'tcon_file')
"""
Level 2
-----------------------------
Apply Registration
Here we merge copes, varcopes, masks and transformation matrices for each run to register them appropriately.
Then we split them back to merge in time and use in flameo.
"""
"""
Merge transforms
"""
merge_mat = pe.MapNode(util.Merge(2), iterfield=['in2'], name='merge_mat')
wf.connect(datasource, 'itk_transform', merge_mat, 'in2')
wf.connect(datasource, 'composite_transform', merge_mat, 'in1')
def warp_files(copes, varcopes, mat, template_brain):
import nipype.interfaces.ants as ants
out_copes = []
out_varcopes = []
warp = ants.ApplyTransforms()
warp.inputs.input_image_type = 0
warp.inputs.interpolation = 'Linear'
warp.inputs.invert_transform_flags = [False, False]
warp.inputs.terminal_output = 'file'
warp.inputs.reference_image = template_brain
warp.inputs.transforms = mat
for cope in copes:
warp.inputs.input_image = cope
res = warp.run()
out_copes.append(str(res.outputs.output_image))
for varcope in varcopes:
warp.inputs.input_image = varcope
res = warp.run()
out_varcopes.append(str(res.outputs.output_image))
return out_copes, out_varcopes
warpfunc = pe.MapNode(util.Function(input_names=['copes', 'varcopes', 'mat', 'template_brain'],
output_names=['out_copes', 'out_varcopes'],
function=warp_files),
iterfield=['copes', 'varcopes', 'mat'],
name='warpfunc')
warpfunc.inputs.template_brain = template_brain
wf.connect(modelestimate, 'copes', warpfunc, 'copes')
wf.connect(modelestimate, 'varcopes', warpfunc, 'varcopes')
wf.connect(merge_mat, 'out', warpfunc, 'mat')
"""
Setup a set of contrasts for level 2.
"""
def sort_copes(files):
numelements = len(files[0])
outfiles = []
for i in range(numelements):
outfiles.insert(i, [])
for j, elements in enumerate(files):
outfiles[i].append(elements[i])
return outfiles
def get_contrasts_l2(contrast_file, task_id, conds, evs_l2, copes):
import numpy as np
contrast_def = np.genfromtxt(contrast_file, dtype=object)
if len(contrast_def.shape) == 1:
contrast_def = contrast_def[None, :]
contrasts = []
for row in contrast_def:
if row[0] != 'task%03d' % task_id:
continue
con = [row[1], 'T', ['ev%03d' % (i + 1) for i in range(len(conds))],
row[2:].astype(float).tolist()]
contrasts.append(con)
# create EVs for the next MapNode
evs_l2 = [evs_l2] * len(copes)
return contrasts, evs_l2
contrastgen_l2 = pe.Node(util.Function(input_names=['contrast_file', 'task_id', 'conds', 'evs_l2', 'copes'],
output_names=['contrasts', 'evs_l2'],
function=get_contrasts_l2),
name='contrastgen_l2')
wf.connect(subjinfo, 'conds_l2', contrastgen_l2, 'conds')
wf.connect(datasource, 'contrasts_l2', contrastgen_l2, 'contrast_file')
wf.connect(subjinfo, 'task_id', contrastgen_l2, 'task_id')
wf.connect(subjinfo, 'evs_l2', contrastgen_l2, 'evs_l2')
wf.connect(warpfunc, ('out_copes', sort_copes), contrastgen_l2, 'copes')
"""
Flameo has a bug. If one level-1 cope is empty (0 0 0 in EV), flameo produces all copes. Check for the bug and fix it.
"""
def zero_copes_check(evs_l2, copes, varcopes):
import numpy as np
from nipype.interfaces.fsl import ImageStats
# check if copes are empty and fix them
concat_copes = []
concat_varcopes = []
evs_l2_new_dict = {}
for j in range(len(evs_l2)):
evs_l2_new_dict[list(evs_l2)[j]] = []
for i in range(len(copes)):
stats = ImageStats(in_file=copes[i], op_string='-R')
res = stats.run()
if res.outputs.out_stat != [0., 0.]:
concat_copes.append(copes[i])
concat_varcopes.append(varcopes[i])
for j in range(len(evs_l2)):
evs_l2_new_dict[list(evs_l2)[j]].append(evs_l2[list(evs_l2)[j]][i])
return evs_l2_new_dict, concat_copes, concat_varcopes
empty_copes_check = pe.MapNode(util.Function(input_names=['evs_l2', 'copes', 'varcopes'],
output_names=['evs_l2', 'copes', 'varcopes'],
function=zero_copes_check),
iterfield=['evs_l2', 'copes', 'varcopes'],
name='empty_copes_check')
wf.connect(contrastgen_l2, 'evs_l2', empty_copes_check, 'evs_l2')
wf.connect(warpfunc, ('out_copes', sort_copes), empty_copes_check, 'copes')
wf.connect(warpfunc, ('out_varcopes', sort_copes), empty_copes_check, 'varcopes')
"""
Use :class:`nipype.interfaces.fsl.Merge` to merge the copes and
varcopes for each condition
"""
copemerge = pe.MapNode(interface=fsl.Merge(dimension='t'),
iterfield=['in_files'],
name="copemerge")
varcopemerge = pe.MapNode(interface=fsl.Merge(dimension='t'),
iterfield=['in_files'],
name="varcopemerge")
wf.connect(empty_copes_check, 'copes', copemerge, 'in_files')
wf.connect(empty_copes_check, 'varcopes', varcopemerge, 'in_files')
"""
Use MultipleRegressDesign to generate subject and condition
specific level 2 model design files
"""
level2model = pe.MapNode(interface=fsl.MultipleRegressDesign(),
iterfield=['regressors'],
name='l2model')
wf.connect(contrastgen_l2, 'contrasts', level2model, 'contrasts')
wf.connect(empty_copes_check, 'evs_l2', level2model, 'regressors')
"""
Use :class:`nipype.interfaces.fsl.FLAMEO` to estimate a second level model
"""
flameo = pe.MapNode(interface=fsl.FLAMEO(run_mode='fe'), name="flameo",
iterfield=['cope_file', 'var_cope_file', 'design_file', 't_con_file','cov_split_file'])
pickfirst = lambda x: x[0]
wf.connect([(copemerge, flameo, [('merged_file', 'cope_file')]),
(varcopemerge, flameo, [('merged_file', 'var_cope_file')]),
(level2model, flameo, [('design_mat', 'design_file'),
('design_con', 't_con_file'),
('design_grp', 'cov_split_file')]),
])
flameo.inputs.mask_file = template_mask
"""
Saving
"""
datasink = pe.Node(nio.DataSink(), name='sinker')
datasink.inputs.base_directory = os.path.join(project_dir, "level2s", "model" + model_id)
wf.connect(infosource, 'subject_id', datasink, 'container')
wf.connect(highpass, 'out_file', datasink, 'filtered_func_data')
wf.connect([(flameo, datasink, [('stats_dir', 'stats_dir')])])
"""
RUN
"""
# outgraph = wf.run()
outgraph = wf.run(plugin='MultiProc')
| [
"dpanyukov@gmail.com"
] | dpanyukov@gmail.com |
2084462af2fa1dad98ccd92d6717403220a68ca4 | 216111be5a87140b04af1166bc3e50494f15a1c6 | /BDA/bda_algorithm.py | f3b3e485dfe392ed0ae37af6736588b3d65433d2 | [] | no_license | onelieV/pyREE | bcda31dd172dc2ee3cc85c3c604304677a3403c4 | a16f0b8b65152f62a24f52dadba69c28b945329c | refs/heads/master | 2023-07-12T12:36:15.789971 | 2021-08-14T14:49:05 | 2021-08-14T14:49:05 | 396,035,581 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,314 | py | # -*- coding: utf-8 -*-
"""
Created by: Veloci
Created on: 2021/3/11
"""
import numpy as np
from copy import deepcopy
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import StratifiedKFold
from Handler.evaluation import Evaluator
from Handler.loadDS import load_ds_name
import warnings
warnings.filterwarnings('ignore')
class BDA(object):
def __init__(self, X, y, clf, cv, outpath="dataset"):
self.X, self.y, self.clf, self.cv = X, y, clf, cv
self.evar = Evaluator(X, y, clf, cv, outpath)
self.maxEva = max(3000, 100 * int(X.shape[1] ** 0.5))
print("\n最大评估次数:", self.maxEva)
self.A = np.arange(X.shape[1])
def _score_sub(self, ind):
"""
:param ind: ind must be [0,1,..,0,1] type array.
:return:
"""
ind = np.array(ind)
subset = self.A[np.where(ind == 1)]
eva = self.evar.evaluate(subset)
return 0.99 * (1 - eva) + 0.01 * sum(ind) / len(self.A)
def fit(self):
"""
https://github.com/JingweiToo/Binary-Dragonfly-Algorithm-for-Feature-Selection/blob/main/jBDA.m
https://ww2.mathworks.cn/matlabcentral/fileexchange/51032-bda-binary-dragonfly-algorithm
:return:
"""
# self.evar.evaluate(self.A.tolist())
dim = self.X.shape[1] # 特征维数
max_Iter = 100 # 最大迭代次数
N = int(self.maxEva / max_Iter) # 种群数量 >3000/100
X = np.random.randint(0, 2, size=(N, dim)) # 初始化
DX = np.zeros_like(X)
fitF, fitE = np.inf, -np.inf
fits = np.zeros(N)
Xnew = np.zeros_like(X)
Dmax = 6
for t in range(max_Iter):
for i in range(N):
fits[i] = self._score_sub(X[i])
if fits[i] < fitF:
fitF = fits[i]
Xf = X[i]
if fits[i] > fitE:
fitE = fits[i]
Xe = X[i]
w = 0.9 - t * ((0.9 - 0.4) / max_Iter)
rate = 0.1 - t * ((0.1 - 0) / (max_Iter / 2))
if rate < 0:
rate = 0
s = 2 * np.random.rand() * rate
a = 2 * np.random.rand() * rate
c = 2 * np.random.rand() * rate
f = 2 * np.random.rand()
e = rate
Xn, DXn = deepcopy(X), deepcopy(DX)
for i in range(N):
S = -np.sum(Xn - X[i], axis=0)
A = (np.sum(DXn, axis=0) - DXn[i]) / (N - 1)
C = (np.sum(Xn, axis=0) - Xn[i]) / (N - 1) - X[i]
F = Xf - X[i]
E = Xe + X[i]
DX[i] = s * S + a * A + c * C + f * F + e * E + w * DX[i]
DX[i][DX[i] > Dmax] = Dmax
DX[i][DX[i] < -Dmax] = -Dmax
TF = np.abs(DX[i] / (DX[i] ** 2 + 1) ** 0.5)
Xnew[i] = X[i]
index = np.random.rand(*TF.shape) < TF
Xnew[i][index] = 1 - X[i][index]
X = deepcopy(Xnew)
if __name__ == '__main__':
dataset = 'Yale'
X, y = load_ds_name(dataset)
clf = KNeighborsClassifier(n_neighbors=5)
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=0)
slctr = BDA(X, y, clf, cv, outpath=dataset)
slctr.fit()
| [
"veileno@163.com"
] | veileno@163.com |
5b161e0a9d07b0bddab72ace77e7c27caff8d41a | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/89/usersdata/202/62093/submittedfiles/matriz1.py | f2273f8e67d6acee35c26497ab06b264203a4c29 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | # -*- coding: utf-8 -*-
import numpy as np
def cortel1(a):
for i in range(0,a.shape[0],1):
for i in range (0,a.shape[1],1):
if a[i,j]==1:
return i
def cortel2(a):
for j in range(0,a.shape[1],1):
for i in range (0,a.shape[0],1):
if a[i,j]==1:
return j
def cortec1(a):
for j in range (0,a.shape[1],1):
for i in range(0,a.shape[0],1):
if a[i,j]==1:
c2=j
return c2
linhas=int(input('linhas:'))
colunas=int(input('colunas:'))
a=np.zeros((linhas,colunas))
for i in range(0,a.shape[0],1):
for j in range (0,a.shape[1],1):
a[i,j]=int(input('valor:'))
l1=cortel1(a)
l2=cortel2(a)
c1=cortec1(a)
c2=cortec2(a)
print(a[l1:l2+1,c1:c2+1])
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
baf30ad7daf3aed7b41444d3901edb49ed8366f9 | c5e96f413274d91c948e034d1638f1ff05a53df2 | /tests_static.py | e8884b934d79a7b3756b034618b19856b3a69745 | [] | no_license | TransHealthInformationOttawa/backend | e85ac27531d6381e28c15fd83848d464ec88cd19 | 7315f0ed139d80efb742aab060861aaef4274a82 | refs/heads/master | 2021-07-12T02:33:52.345921 | 2017-10-15T16:09:01 | 2017-10-15T16:09:01 | 106,946,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,792 | py | from send_message import upcomingMessages
adrian = {
"id" : "22313131",
"name" : "Adrian",
"lastMessageSent" : "3",
"enabled" : "true"
,
"phone" : "+12267482821",
"schedules" : [{
"id" : "1",
"dayOfWeek": 1, # Monday
"hour": 12,
"minute": 30
},
{
"id" : "2",
"dayOfWeek": 2, # Tuesday
"hour": 12,
"minute": 30
}
,{
"id" : "3",
"dayOfWeek": 6, # Saturday
"hour": 12,
"minute": 30
}
,{
"id" : "4",
"dayOfWeek": 6, # Saturday
"hour": 17,
"minute": 50
}
,{
"id" : "5",
"dayOfWeek": 6, # Saturday
"hour": 17,
"minute": 46
}
,{
"id" : "6",
"dayOfWeek": 6, # Saturday
"hour": 17,
"minute": 12
}
,{
"id" : "7",
"dayOfWeek": 6, # Saturday
"hour": 15,
"minute": 6
}
,{
"id" : "8",
"dayOfWeek": 6, # Saturday
"hour": 15,
"minute": 0
}
,{
"id" : "9",
"dayOfWeek": 6, # Saturday
"hour": 16,
"minute": 7
}],
"messages" : [
{
"id" : "1",
"message" : "You're awesome"
},
{
"id" : "2",
"message" : "You're great"
},
{
"id" : "3",
"message" : "You're super"
},
{
"id" : "4",
"message" : "You're special"
},
{
"id" : "5",
"message" : "You're fantastic"
},
{
"id" : "6",
"message" : "You're loved"
},
{
"id" : "7",
"message" : "You're fun"
},
{
"id" : "8",
"message" : "You're sweet"
},
]
}
evan = {
"id" : "3257535",
"name" : "Evan",
"lastMessageSent" : "0",
"enabled" : "true"
,
"phone" : "+12267482821",
"schedules" : [{
"id" : "1",
"year": 2017,
"dayOfWeek": 1, # Monday
"dayOfMonth": 14
},
{
"id" : "2",
"year": 2017,
"dayOfWeek": 2, # Tuesday
#"month": null,
#"dayOfMonth": null,
"hour": 12,
"minute": 30
}
,{
"id" : "3",
"year": 2018,
"dayOfWeek": 6, # Saturday
"hour": 12,
"minute": 30
}
,{
"id" : "4",
"dayOfWeek": 6, # Saturday
"month": 2,
"hour": 13,
"minute": 30
}
,{
"id" : "5",
"dayOfMonth": 2,
"hour": 15,
"minute": 16
}
,{
"id" : "6",
"minute": 0
}
,{
"id" : "7",
"dayOfMonth": 6, # Saturday
"hour": 15,
"minute": 6
}
,{
"id" : "8",
"dayOfWeek": 6, # Saturday
"month": 4,
"hour": 16,
"minute": 15
}
,{
"id" : "9",
"month": 2,
"dayOfMonth": 29
}],
"messages" : [
{
"id" : "1",
"message" : "You gots this!"
},
{
"id" : "2",
"message" : "Keep up the great work!"
}
]
}
upcomingMessages(evan)
upcomingMessages(evan)
upcomingMessages(adrian)
upcomingMessages(adrian)
upcomingMessages(adrian)
| [
"rhartviksen@industrialagency.ca"
] | rhartviksen@industrialagency.ca |
02dc84b6704747ecf7200f764a9e9413a4ba1696 | 1118fb238b9e933c16ffd41d95e04443d4cc037d | /codechef/python/appyandchef.py | 630cb83b43bd7315b9574f766227a308ba378404 | [] | no_license | Poojavpatel/PythonCodes | 8a38d02594be1dd9ef618cdb796041e693c0f00a | b4c4c7e700d0aaae750120c612b908360037748f | refs/heads/master | 2020-03-28T11:21:53.874927 | 2019-02-28T16:20:54 | 2019-02-28T16:20:54 | 148,205,576 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | # import sys
# t= int(sys.stdin.readline())
t=int(input())
# print(t)
while(t):
n, a, b, k= map(int, input().split())
c=0
# print(n,a,b,k)
for i in range(1,n+1):
if((i%a==0 and i%b!=0) or (i%b==0 and i%a!=0)):
c=c+1
print("Win") if(c>=k) else print("Lose")
t=t-1 | [
"poojavpatel71@gmail.com"
] | poojavpatel71@gmail.com |
a69cb72e82394c61b79df9557717090f8713f8ba | bc20757d76e5550575417b4fa0d0afeafd4f074f | /lab3_2020/venv/bin/easy_install | f29852d6d878d1f1b6982f5ae7befb35933f9d62 | [
"MIT"
] | permissive | mihaimusat/ASC-labs-2020 | 9d4aa08d83bed55d20bfc3d8b22ca8c09b8bbf23 | b6a2b9c4238bb5f81fdedbc078d4ae3d5b3ddc41 | refs/heads/master | 2022-05-30T23:13:15.035950 | 2020-05-05T06:58:10 | 2020-05-05T06:58:10 | 256,525,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | #!/home/mihaimusat/Desktop/lab3_2020/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"mihai_robert.musat@stud.acs.upb.ro"
] | mihai_robert.musat@stud.acs.upb.ro | |
81677e2a018f5302ece7a50d88dc468123fed354 | 6204f7d3b98631c13b1312fdde2d858a09830bde | /run_console.py | 301d549c0ba8e207657105baf6eddc4c89a02ae2 | [
"BSD-3-Clause"
] | permissive | php-technolog/flask-uniteller | dc35423988e3d5dced2107798c2d70cbf5566c88 | 8a3d7b6bc2693582292058e0d7a0aca27f3f34d8 | refs/heads/master | 2020-12-31T05:56:15.320180 | 2013-08-24T12:46:25 | 2013-08-24T12:46:25 | 18,766,046 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | # -*- coding: utf-8 -*-
"""
Входной скрипт для запуска консольных приложений
:copyright: (c) 2013 by Pavel Lyashkov.
:license: BSD, see LICENSE for more details.
"""
from flask.ext.script import Manager
from console import app
from console.view.test import TestCommand
manager = Manager(app)
manager.add_command('test', TestCommand())
manager.run()
| [
"bilbo.kem@gmail.com"
] | bilbo.kem@gmail.com |
0e4d4b52b7acc97954100fb225045d2d21940425 | a59fb5691d03114b5c18e8293353e49d467a992d | /pyscript/ping.py | 8c385831d12df1218305e249fc28ba0b5ae46f38 | [] | no_license | conechan/TTMS | a1f9988699177c047c564d2d205d3ca1a4186147 | 6d53693860e65a173e2cccc5514b3ce51c6b2d1b | refs/heads/master | 2020-08-08T20:18:32.853831 | 2014-05-08T16:40:21 | 2014-05-08T16:40:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | #!/usr/bin/env python
import pxssh
import sys
try:
s = pxssh.pxssh()
hostname = sys.argv[1]
username = 'root'
password = ''
s.login (hostname, username, password, login_timeout = 10)
s.logout()
print "1"
except:
print "0"
| [
"conechan@gmail.com"
] | conechan@gmail.com |
28aab459e7d05dd69f3d821f9390bf83c4fd6dc7 | 88e1ae575f7bdf4c569de217306a379906deb5fe | /src/pt.py | aa51c6ea2369a8e7d70ed68e2dc4ce3a1065f28b | [] | no_license | Yuting-Gao/DSM-decoupling-scene-motion | 884637413642d8c7929c0602e50683f1f22f98c8 | e97976505bd04db1a823c192eeb21fb4b4c24c80 | refs/heads/main | 2023-01-24T01:21:19.504260 | 2020-12-06T04:11:53 | 2020-12-06T04:11:53 | 319,202,713 | 1 | 0 | null | 2020-12-07T04:22:55 | 2020-12-07T04:22:55 | null | UTF-8 | Python | false | false | 14,582 | py | from __future__ import print_function
import sys
import time
import torch
import torch.backends.cudnn as cudnn
from utils.utils import AverageMeter
from data.config import pt_data_config, pt_augmentation_config
from data.dataloader import pt_data_loader_init
import torch.nn as nn
from augment.gen_positive import GenPositive
from augment.gen_negative import GenNegative
from utils.learning_rate_adjust import pt_adjust_learning_rate
from utils.moment_update import moment_update
from model.config import pt_model_config
from loss.config import pt_optim_init
from utils.load_weights import pt_load_weight
from utils.utils import Timer
from datetime import datetime
from loss.tcr import tcr
def flip(x, dim):
indices = [slice(None)] * x.dim()
indices[dim] = torch.arange(x.size(dim) - 1, -1, -1,
dtype=torch.long, device=x.device)
return x[tuple(indices)]
def shift(x, dim):
indices = [slice(None)] * x.dim()
indices[dim] = torch.arange(1, x.size(dim)+1, 1,
dtype=torch.long, device=x.device)
indices[dim] %= x.size(dim)
# print(indices[dim])
return x[tuple(indices)]
def get_shuffle_ids(bsz):
"""generate shuffle ids for ShuffleBN"""
forward_inds = torch.randperm(bsz).long().cuda()
backward_inds = torch.zeros(bsz).long().cuda()
value = torch.arange(bsz).long().cuda()
backward_inds.index_copy_(0, forward_inds, value)
return forward_inds, backward_inds
def pretext_train(args, recorder):
if args.gpus is not None:
print("Use GPU: {} for pretext training".format(args.gpus))
num_class, data_length, image_tmpl = pt_data_config(args)
# print("tp_length is: ", data_length)
train_transforms, test_transforms, eval_transforms = pt_augmentation_config(args)
train_loader, val_loader, eval_loader, train_samples, val_samples, eval_samples = pt_data_loader_init(args,
data_length,
image_tmpl,
train_transforms,
test_transforms,
eval_transforms)
n_data = len(train_loader)
model, model_ema = pt_model_config(args, num_class)
# == optim config==
contrast, criterion, optimizer = pt_optim_init(args, model, n_data)
model = model.cuda()
# == load weights ==
model, model_ema = pt_load_weight(args, model, model_ema, optimizer, contrast)
if args.pt_method in ['dsm', 'moco']:
model_ema = model_ema.cuda()
# copy weights from `model' to `model_ema'
moment_update(model, model_ema, 0)
cudnn.benchmark = True
# optionally resume from a checkpoint
args.start_epoch = 1
# ==================================== our data augmentation method=================================
if args.pt_method in ['dsm', 'dsm_triplet']:
pos_aug = GenPositive()
neg_aug = GenNegative()
# =======================================add message =====================
recorder.record_message('a', '='*100)
recorder.record_message('a', '-'*40+'pretrain'+'-'*40)
recorder.record_message('a', '='*100)
# ====================update lr_decay from str to numpy=========
iterations = args.pt_lr_decay_epochs.split(',')
args.pt_lr_decay_epochs = list([])
for it in iterations:
args.pt_lr_decay_epochs.append(int(it))
timer = Timer()
# routine
print('*'*70+'Step1: pretrain'+'*'*20 + '*'*50)
for epoch in range(args.pt_start_epoch, args.pt_epochs + 1):
timer.tic()
pt_adjust_learning_rate(epoch, args, optimizer)
print("==> training...")
time1 = time.time()
if args.pt_method == "moco":
loss, prob = train_moco(epoch, train_loader, model, model_ema, contrast, criterion, optimizer, args, recorder)
elif args.pt_method == "dsm":
loss, prob = train_dsm(epoch, train_loader, model, model_ema, contrast, criterion, optimizer, args, pos_aug,
neg_aug, recorder)
# loss, prob = epoch * 0.01, 0.02*epoch
elif args.pt_method == "dsm_triplet":
loss = train_dsm_triplet(epoch, train_loader, model, optimizer, args, pos_aug, neg_aug, recorder)
else:
Exception("Not support method now!")
recorder.record_pt_train(loss)
time2 = time.time()
print('epoch {}, total time {:.2f}'.format(epoch, time2 - time1))
timer.toc()
left_time = timer.average_time * (args.pt_epochs - epoch)
message = "Step1: pretrain now loss is: {} left time is : {} now is: {}".format(loss, timer.format(left_time), datetime.now())
print(message)
recorder.record_message('a', message)
state = {
'opt': args,
'model': model.state_dict(),
'contrast': contrast.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
}
recorder.save_pt_model(args, state, epoch)
print("finished pretrain, the trained model is record in: {}".format(recorder.pt_checkpoint))
return recorder.pt_checkpoint
def train_moco(epoch, train_loader, model, model_ema, contrast, criterion, optimizer, opt, recorder):
"""
one epoch training for instance discrimination
"""
print("==> (MoCo) training...")
model.train()
model_ema.eval()
def set_bn_train(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.train()
model_ema.apply(set_bn_train)
batch_time = AverageMeter()
data_time = AverageMeter()
loss_meter = AverageMeter()
prob_meter = AverageMeter()
end = time.time()
for idx, (inputs, _, index) in enumerate(train_loader):
data_time.update(time.time() - end)
# print(inputs[0].size())
bsz = inputs[0].size(0)
# fixed args.batch_size
if bsz < opt.pt_batch_size:
print("batch less than 16, continue")
continue
inputs[0] = inputs[0].float()
inputs[1] = inputs[1].float()
inputs[2] = inputs[2].float()
inputs[0] = inputs[0].cuda()
inputs[1] = inputs[1].cuda()
inputs[2] = inputs[2].cuda()
index = index.cuda(non_blocking=True)
# ===================forward=====================
anchor, positive, negative = inputs
# here a series of data augmentation
# ====================================================postive operation=======================
shuffle_ids, reverse_ids = get_shuffle_ids(bsz)
feat_q, _ = model(anchor)
feat_k, _ = model_ema(positive)
# with torch.no_grad():
# positive = positive[shuffle_ids]
# feat_k = model_ema(positive)
# feat_k = feat_k[reverse_ids]
feat_n, _ = model(negative)
out = contrast(feat_q, feat_k, feat_n, index)
contrast_loss = criterion(out)
loss = contrast_loss
prob = out[:, 0].mean()
# ===================backward=====================
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ===================meters=====================
loss_meter.update(loss.item(), bsz)
prob_meter.update(prob.item(), bsz)
moment_update(model, model_ema, opt.pt_alpha)
torch.cuda.synchronize()
batch_time.update(time.time() - end)
end = time.time()
message = ('MoCo Train: [{0}][{1}/{2}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
'loss {loss.val:.3f} ({loss.avg:.3f})\t'
'prob {prob.val:.3f} ({prob.avg:.3f})'.format(
epoch, idx + 1, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=loss_meter, prob=prob_meter))
# print info
if (idx + 1) % opt.pt_print_freq == 0:
print(message)
recorder.record_message('a', message)
# print(out.shape)
sys.stdout.flush()
return loss_meter.avg, prob_meter.avg
def train_dsm_triplet(epoch, train_loader, model, optimizer, opt, pos_aug, neg_aug, recorder):
"""
one epoch training for instance discrimination
"""
print("==> (DSM triplet) training...")
model.train()
def set_bn_train(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.train()
batch_time = AverageMeter()
data_time = AverageMeter()
loss_meter = AverageMeter()
triplet_loss = nn.TripletMarginLoss(margin=0.5, p=2)
end = time.time()
for idx, (inputs, _, index) in enumerate(train_loader):
data_time.update(time.time() - end)
bsz = inputs[0].size(0)
# fixed args.batch_size
if bsz < opt.pt_batch_size:
print("batch less than 16, continue")
continue
for i in range(len(inputs)):
inputs[i] = inputs[i].float()
inputs[i] = inputs[i].cuda()
# ===================forward=====================
anchor_old, positive, negative = inputs
# here a series of data augmentation
# ====================================================postive operation=======================
anchor = pos_aug(anchor_old)
feat_q = model(anchor)
feat_k = model(positive)
feat_n = model(negative)
intra_loss = triplet_loss(feat_q, feat_k, feat_n)
inter_loss = triplet_loss(feat_q, feat_k, flip(feat_n, 0))
# for j in range(bsz-2):
# inter_loss += triplet_loss(feat_q, feat_k, shift(feat_n, 0))
alpha_1 = 1
alpha_2 = 1
loss = alpha_1 * intra_loss + alpha_2 * inter_loss
# print(loss)
# ===================backward=====================
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ===================meters=====================
loss_meter.update(loss.item(), bsz)
torch.cuda.synchronize()
batch_time.update(time.time() - end)
end = time.time()
message = ('DSM triplet Train: [{0}][{1}/{2}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
'loss {loss.val:.3f} ({loss.avg:.3f})'.format(
epoch, idx + 1, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=loss_meter))
# print info
if (idx + 1) % opt.pt_print_freq == 0:
print(message)
recorder.record_message('a', message)
# print(out.shape)
sys.stdout.flush()
return loss_meter.avg
def train_dsm(epoch, train_loader, model, model_ema, contrast, criterion, optimizer, opt, pos_aug, neg_aug, recorder):
"""
one epoch training for instance discrimination
"""
print("==> (DSM) training...")
model.train()
model_ema.eval()
def set_bn_train(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.train()
model_ema.apply(set_bn_train)
batch_time = AverageMeter()
data_time = AverageMeter()
loss_meter = AverageMeter()
prob_meter = AverageMeter()
end = time.time()
for idx, (inputs, _, index) in enumerate(train_loader):
data_time.update(time.time() - end)
# print(inputs[0].size())
bsz = inputs[0].size(0)
# fixed args.batch_size
if bsz < opt.pt_batch_size:
print("batch less than 16, continue")
continue
inputs[0] = inputs[0].float()
inputs[1] = inputs[1].float()
inputs[2] = inputs[2].float()
inputs[0] = inputs[0].cuda()
inputs[1] = inputs[1].cuda()
inputs[2] = inputs[2].cuda()
index = index.cuda(non_blocking=True)
# ===================forward=====================
anchor_old, positive, negative = inputs
# print(anchor_old.size())
# here a series of data augmentation
# ====================================================postive operation=======================
anchor = pos_aug(anchor_old)
# positive = flip(anchor, 2)
# shuffle_ids, reverse_ids = get_shuffle_ids(bsz)
feat_q, map_q = model(anchor)
feat_k, map_k = model_ema(positive)
# tcr_loss = tcr(map_q, map_k)
# with torch.no_grad():
# positive = positive[shuffle_ids]
# feat_k = model_ema(positive)
# feat_k = feat_k[reverse_ids]
feat_n, _ = model(negative)
out = contrast(feat_q, feat_k, feat_n, index)
contrast_loss = criterion(out)
loss = contrast_loss # + tcr_loss # + sample_loss # + contrast_loss2 # + cls_loss + mixup_loss
# print(contrast_loss, tcr_loss)
prob = out[:, 0].mean()
# ===================backward=====================
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ===================meters=====================
loss_meter.update(loss.item(), bsz)
prob_meter.update(prob.item(), bsz)
moment_update(model, model_ema, opt.pt_alpha)
torch.cuda.synchronize()
batch_time.update(time.time() - end)
end = time.time()
message = ('DSM Train: [{0}][{1}/{2}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
'loss {loss.val:.3f} ({loss.avg:.3f})\t'
'prob {prob.val:.3f} ({prob.avg:.3f})'.format(
epoch, idx + 1, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=loss_meter, prob=prob_meter))
# print info
if (idx + 1) % opt.pt_print_freq == 0:
print(message)
recorder.record_message('a', message)
# print(out.shape)
sys.stdout.flush()
return loss_meter.avg, prob_meter.avg
if __name__ == '__main__':
pretext_train() | [
"awiny@macdeMac-mini.local"
] | awiny@macdeMac-mini.local |
27848bac64853c24e8728415a18bda9f7d36536d | 42cee88f0d7ed6c9ffcece4409406cde35a4d0d3 | /src/stations/stations.py | 4b0c4ac2628312a900556495612a4970998d9e6e | [
"MIT"
] | permissive | ganemone/SublimeBart | 36cfb923228fe24246f7a1cee9d1d31430a2b3e5 | 1fcd72062914891cffac840d814eb129ebd43edf | refs/heads/master | 2021-01-18T19:27:24.659351 | 2015-03-06T19:18:49 | 2015-03-06T19:18:49 | 31,030,064 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | stations = [
'12th St. Oakland City Center',
'16th St. Mission (SF)',
'19th St. Oakland',
'24th St. Mission (SF)',
'Ashby (Berkeley)',
'Balboa Park (SF)',
'Bay Fair (San Leandro)',
'Castro Valley',
'Civic Center (SF)',
'Coliseum/Oakland Airport',
'Colma',
'Concord',
'Daly City',
'Downtown Berkeley',
'Dublin/Pleasanton',
'El Cerrito del Norte',
'El Cerrito Plaza',
'Embarcadero (SF)',
'Fremont',
'Fruitvale (Oakland)',
'Glen Park (SF)',
'Hayward',
'Lafayette',
'Lake Merritt (Oakland)',
'MacArthur (Oakland)',
'Millbrae',
'Montgomery St. (SF)',
'North Berkeley',
'North Concord/Martinez',
'Orinda',
'Pittsburg/Bay Point',
'Pleasant Hill',
'Powell St. (SF)',
'Richmond',
'Rockridge (Oakland)',
'San Bruno',
'San Francisco Int\'l Airport',
'San Leandro',
'South Hayward',
'South San Francisco',
'Union City',
'Walnut Creek',
'West Dublin',
'West Oakland'
]
| [
"ganemone@gmail.com"
] | ganemone@gmail.com |
9a4bb9e6199c5e9910b99b9c6280d592ddcbbdc5 | 131957d80f1b0ede2259ae699723f789362c4b3c | /rules.py | a3fad2f10db669d6c2fd418cd3a9a579132caef3 | [] | no_license | Harkphoenix/python------1 | 0e6861bb5a49af1a1e73bde225019df43dcc0c49 | 667151730767fb0fe2cce6792b841802829968da | refs/heads/master | 2020-05-29T16:06:48.671945 | 2014-06-05T13:44:22 | 2014-06-05T13:44:22 | 20,526,534 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,515 | py | #coding=utf-8
class Rule:
"""
所有规则的父类,因为所有的规则类都有action方法,且调用形式几乎一样,所以
在父类中设置一个action函数即可.condition函数用来判断是不是符合该规则,然后通过type和父类中的action函数,调用相应
的处理函数
"""
def action(self, block, handler):
handler.start(self.type)
handler.feed(block)
handler.end(self.type)
return True
class HeadingRule(Rule):
type = 'heading'
def condition(self, block):
return not'\n' in block and len(block) <= 70 and not block[-1] == ':' #回返回是真是假
class TitleRule(HeadingRule):
type = 'title'
first = True
def condition(self, block):
if not self.first: return False
self.firt = False
return HeadingRule.condition(self, block)
class ListItemRule(Rule):
type = 'listitem'
def condition(self, block):
return block[0] == '-'
def action(self, block, handler):
handler.start(self.type)
handler.feed(block[1:].strip())
handler.end(self.type)
return True
class ListRule(ListItemRule):
type = 'list'
inside = False
def condition(self, block):
return True
def action(self, block, handler):
if not self.inside and ListItemRule.condition(self,block):
handler.start(self.type)
self.inside = 1
elif self.inside and not ListItemRule.condition(self, block):
handler.end(self.type)
self.inside = False
return False
class ParagraphRule(Rule):
type = 'paragraph'
def condition(self, block):
return True | [
"296597641@qq.com"
] | 296597641@qq.com |
943046ca83cc744a00369e1e7ddfec263a7dc795 | ad23b164febd12d5c6d97cfbcd91cf70e2914ab3 | /webtestdata/wsgi.py | 882a4bbee8f33c55053afc3819608ab439306db9 | [] | no_license | wawj901124/webtestdata | 9eedf9a01dec2c157725299bda9a42e8d357ef0b | 54f6412566fce07ece912760c5caea73ede819cb | refs/heads/master | 2022-12-09T14:18:38.125191 | 2021-04-25T07:54:07 | 2021-04-25T07:54:07 | 175,773,318 | 1 | 1 | null | 2022-12-08T02:39:15 | 2019-03-15T07:49:16 | Python | UTF-8 | Python | false | false | 399 | py | """
WSGI config for webtestdata project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webtestdata.settings")
application = get_wsgi_application()
| [
"410287958@qq.com"
] | 410287958@qq.com |
28771bd59d9da4ee947ab6016c1d38117cd78f90 | 715a96cc7977b818471770d66f2ef4723bd31723 | /anonim1.py | a826bf328a2232674dace11f3108a82f325ce148 | [] | no_license | karimmkamal/autobotupgrade | 565692b2d784589b808ad926547a28d1feef760d | f6251be79580103c7d897b619c34b237f5abfadf | refs/heads/main | 2023-04-04T08:36:12.360405 | 2021-04-04T19:25:19 | 2021-04-04T19:25:19 | 354,543,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,046 | py | from telethon import TelegramClient, events
import os
import json
import telethon
api_id = 3388504
api_hash = 'fcb26adccf95496b5b258a3f51f6ab59'
client = TelegramClient('unfban', api_id, api_hash)
# crypto winner - crypto auto signals
source_target = { # this is not your channel, right?yes
-1001316654669: [-1001474202691], # source_target = {-1001408408426: [-1001285034799]} wait because it will be on the other anonim not this oneç yes
}
#AlphaTradeZone® Premium | 1316654669
#CryptoAutoZignals - Premium | 1376355233
#Crypto winner | 1285034799
#CryptoAlpha - Premium | 1474202691
with open('messages.json', encoding='UTF-8') as json_file:
messages = json.load(json_file)
@client.on(events.NewMessage(chats=list(source_target.keys())))
async def incoming(event: telethon.events.newmessage.NewMessage.Event):
global messages
channel = event.chat_id
print(event.message.id)
if str(channel) not in messages:
messages[str(channel)] = {}
for i in source_target[channel]:
messages[str(channel)][str(i)] = {}
message_in = event.message
if event.media:
media_path = await message_in.download_media()
else:
media_path = ''
text_of_post = message_in.text
if len(media_path) > 0:
try:
for i in source_target[channel]:
message_out = await client.send_file(entity=i, file=media_path, caption="{}".format(text_of_post))
messages[str(channel)][str(i)][str(event.message.id)] = str(message_out.id)
os.remove(media_path)
except Exception as e:
print(e)
else:
for i in source_target[channel]:
message_out = await client.send_message(entity=i, message="{}".format(text_of_post))
messages[str(channel)][str(i)][str(event.message.id)] = str(message_out.id)
with open('messages.json', 'w', encoding='UTF-8') as file:
json.dump(messages, file)
@client.on(events.MessageEdited(chats=list(source_target.keys())))
async def editing(event: telethon.events.messageedited.MessageEdited):
global messages
channel = event.chat_id
for i in source_target[channel]:
await client.edit_message(entity=i, message=int(messages[str(channel)][str(i)][str(event.message.id)]),
text=event.message.text)
@client.on(events.MessageDeleted(chats=list(source_target.keys())))
async def deleting(event: telethon.events.messagedeleted.MessageDeleted.Event):
global messages
channel = event.chat_id
for i in source_target[channel]:
ids_to_delete = []
for j in event.deleted_ids:
ids_to_delete.append(messages[str(channel)][str(i)][str(j)])
await client.delete_messages(entity=i, message_ids=ids_to_delete)
client.start()
client.run_until_disconnected()
| [
"noreply@github.com"
] | noreply@github.com |
dcb9a544cce84c43cf9b3d7b349db60e8139ccde | 9ce822c07edef943dc519a7ab3916f5a667e114a | /location_and_settings/location_and_settings/doctype/location_list/test_location_list.py | 28da20eda77448d48672ee27fa1509ed7b30bbe6 | [
"MIT"
] | permissive | hrgadeha/location_and_setting | 250fec37c2e63ce7c31b41ac52e50bea1e333392 | 770a7a719ce66bfe699dc805839a972063ff8ab6 | refs/heads/master | 2020-04-25T03:43:09.423485 | 2019-02-28T12:09:31 | 2019-02-28T12:09:31 | 172,486,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Hardik Gadesha and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestLocationList(unittest.TestCase):
pass
| [
"you@example.com"
] | you@example.com |
1dfdb0663b213da63d247a05390a8588915edb6d | 6156d7b73431af55e7163d0259aae928707d8b74 | /code/pyscraper/scrape_opensecret_company.py | 4467d95a8c37b40b9428c5b7ac5653c94177aae2 | [] | no_license | surajsakaram/Quartz_Database_Project | 7a01d00da37f14fe5459a262ce27ea432ec74943 | 589f9ef11a2d0fd1acd6057018c89c7f3477e3f8 | refs/heads/master | 2020-05-04T13:34:54.362288 | 2019-04-26T05:57:11 | 2019-04-26T05:57:11 | 179,165,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,719 | py | from bs4 import BeautifulSoup as bs
import urllib3 as ul
import sys, csv, os
import re
base_url = "https://www.opensecrets.org/orgs/recips.php?id="
cycle_arr = [cycle for cycle in range(1998, 2020, 2)]
org_data_header = ["Recipient", "Office", "Contributions(Total)", "Cycle"]
# supress urllib3 warning
ul.disable_warnings(ul.exceptions.InsecureRequestWarning)
http = ul.PoolManager()
def swap_column_arr(arr, a, b):
temp = arr[a]
arr[a] = arr[b]
arr[b] = temp
def read_org_id():
with open('opensecrets_org_id.csv', 'r') as f:
data_list = []
cline = 0
for line in csv.reader(f):
if cline > 0:
data_list.append(line)
cline += 1
print(data_list)
return data_list
def get_org_data(org_id, cycle):
page = 1
loop = True
all_data = []
## scrape data based on <tbody>
while loop:
url = base_url+org_id+"&page="+str(page)+"&cycle="+str(cycle)
# get a response from http request
response = http.request('GET', url)
print("page", page)
# if request is not successful, quit the program
if response.status != 200:
print("status:",response.status)
sys.exit(1)
# convert html to beautiful soup object
soup = bs(response.data, "html.parser")
# stop looping if no more result can be found
tbody = soup.find("tbody")
if not tbody:
if page == 1:
print("no result found...")
else:
print("no more record...")
loop = False
continue
# iterate all table row
for tr in tbody.find_all("tr"):
this_row = []
# iterate all table data
for td in tr.find_all("td"):
this_row.append(re.sub(' +',' ',td.text.strip().replace("\n","\t").replace("\t","")))
this_row.append(cycle)
#print(this_row)
all_data.append(this_row)
page += 1
return all_data
def write_org_data_csv(org_data_list, org_id):
path_dir = "../../data/opensecrets_company_csv_dump/"
with open(path_dir+'opensecrets_org_summary_'+org_id+'.csv', 'w') as f:
writer = csv.writer(f)
for line in org_data_list:
writer.writerow(line)
print(org_id,"done...")
if __name__ == '__main__':
for item in read_org_id():
[orgname, orgid] = item
org_data_list = [org_data_header]
for cycle in cycle_arr:
print(orgname, cycle)
org_data_list.extend(get_org_data(orgid, cycle))
print(len(org_data_list))
write_org_data_csv(org_data_list, orgid)
| [
"clarencecc88@gmail.com"
] | clarencecc88@gmail.com |
899be08fcd62614210b61f051519e01e1e445197 | fb7a7a85e52748458be58e7a73c42477822cfbd9 | /HotelModel/Input_variable_specialday.py | 7f2eb2a80ebde3007d85abdb6806e0006f53ee5d | [] | no_license | gbarrios318/Hotel | 6991d36531aad28562b232e56872ac9e9461c0d9 | 44f95038cc7735631852032673355420f64875ce | refs/heads/master | 2021-01-01T17:48:22.303564 | 2015-03-19T14:46:17 | 2015-03-19T14:46:17 | 22,519,583 | 0 | 0 | null | 2014-10-05T21:37:55 | 2014-08-01T17:12:02 | Java | UTF-8 | Python | false | false | 1,330 | py | from buildingspy.io.outputfile import Reader
import matplotlib.pyplot as plt
from pylab import *
import numpy as np
rcParams['legend.loc'] = 'best'
r=Reader('HotelModel_annual.mat','dymola')
(t1,y1)=r.values('Loa.Loa')
(t2,y2)=r.values('GueRooDomWatDem.y[1]')
(t3,y3)=r.values('KitHotWatDem.y[1]')
y1=y1/1000
t1d=[]
t2d=[]
t3d=[]
y1d=[]
y2d=[]
y3d=[]
for i in range(len(y1)):
if t1[i]>3715200 and t1[i]<3801600:
t1d.append(t1[i])
y1d.append(y1[i])
for i in range(len(y2)):
if t2[i]>3715200 and t2[i]<3801600:
t2d.append(t2[i])
y2d.append(y2[i])
for i in range(len(y3)):
if t3[i]>3715200 and t3[i]<3801600:
t3d.append(t3[i])
y3d.append(y3[i])
xlabind=[]
xlab=[]
x=(t1d[len(t1d)-1]-t1d[0])/3600
for i in range(int(round(x+1))):
xlabind.append(t1d[0]+3600*i)
xlab.append(str(i))
print xlabind
print xlab
ax1 = plt.subplot(2,1,1)
ax1.plot(t1d,y1d,color='black')
ax1.set_ylabel('Cooling\Heating Load(kW)')
xtic=[1]
xticks(xtic,'')
ax2 = plt.subplot(2,1,2)
ax2.plot(t2d,y2d,color='black',label='Guest room',linestyle='--')
ax2.plot(t3d,y3d,color='black',label='Kitchen')
ax2.set_ylabel('Hot Water Demand(kg/s)')
ax2.set_xlabel('Hours')
ax2.legend(fontsize='10')
xticks(xlabind,xlab,fontsize='10')
plt.savefig('power.png',bbox_inches='tight')
print 'the optimization is completed' | [
"jhy1987@gmail.com"
] | jhy1987@gmail.com |
f20db49417e89075fd79a012fb21dcd8d1588862 | 97f7f4a1e46887d4c8ce2b525b3725bcf04d034f | /vote/models.py | 692bdb230a6ccce49401603b5cc3ed7ae5cee07c | [] | no_license | gusibi/Django-Vote | a11b2c01d8120ab13ab6ad7b3994a058d9a34994 | 54b024500ddcaf020ed83624ef69a5ffef9b5547 | refs/heads/master | 2020-04-15T13:42:49.271375 | 2012-09-15T11:39:10 | 2012-09-15T11:39:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.db import models
from django_openid_auth.models import User
class New(models.Model):
title = models.CharField(max_length=20)
pubtime = models.DateTimeField(auto_now_add=True)
ups = models.IntegerField(default=0)
downs = models.IntegerField(default=0)
hot = models.FloatField(default=0)
def __unicode__(self):
return u'%d' % self.id
class UserVote(models.Model):
user = models.ForeignKey(User)
new = models.ForeignKey(New)
up = models.IntegerField(default=0)
down = models.IntegerField(default=0)
ipad = models.CharField(max_length=130)
| [
"czongxiao@gmail.com"
] | czongxiao@gmail.com |
a8f2b0a915a0ce17516075ff24af8ae63282afac | b143b5e49d0fc05e2677312ee495810ac2ba8ecb | /lsnc_website/lsnc_website/wsgi.py | 011b5d582071d87efcd88b81a4c011ed34da7e14 | [] | no_license | suse-grit/shopping_mall | d5c8d7748f124c51004489b9434c1c7aef64c8c0 | 0b363fcd472d182711dc0027a0bafd0d48a92e8b | refs/heads/master | 2022-12-01T19:14:13.345617 | 2020-08-07T03:34:03 | 2020-08-07T03:34:03 | 285,711,701 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for lsnc_website project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lsnc_website.settings')
application = get_wsgi_application()
| [
"1129212285@qq.com"
] | 1129212285@qq.com |
500a055718ce7e47f17d04abf91b4efc3b508abf | d251e0e4ee63f7d45e22899c190741b359014074 | /day_06/apps/user/model.py | dc52ca29e7d6dfee74bc03e59ddf056191d84178 | [] | no_license | forwardhuan/flask_learning | e3c3acf66bae3a3ca912f02345f2813d556830cc | ad461f90036c8fe8a64393c7fbc111951302659e | refs/heads/master | 2023-01-20T04:36:57.856849 | 2020-10-30T09:33:10 | 2020-10-30T09:33:10 | 307,320,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
from datetime import datetime
from exts import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(15), nullable=False)
password = db.Column(db.String(64), nullable=False)
phone = db.Column(db.String(11), nullable=True)
isdelete = db.Column(db.BOOLEAN, default=False)
rdatetime = db.Column(db.DateTime, default=datetime.now())
def __str__(self):
return self.username
| [
"forward_m@126.com"
] | forward_m@126.com |
6d304899c960e6aba1fd14191bad52280778cb3b | f3d3ab1fd1fc4551b9b123e6c3da81e4e59eb4b1 | /4.图像像素读写.py | 4a1414cd03952496eba80f70af4d8c3ff3cce2e1 | [] | no_license | Aismarter/OpencvExample | 95aa1447cb96f5121feb27dbc122590e63c1f7ff | 504e1f29d7df29f8feb3f85aaf4317738581f5fd | refs/heads/master | 2021-03-10T15:02:48.595695 | 2020-03-09T15:03:28 | 2020-03-09T15:03:28 | 246,463,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | import cv2 as cv
src = cv.imread("./pictures/factory.jpg")
cv.namedWindow("input", cv.WINDOW_AUTOSIZE)
cv.imshow("input", src)
h, w, ch = src.shape # shape 返回数组的函数与列数
print("h, w, ch", h, w, ch)
for row in range(h):
for col in range(w):
b, g, r = src[row, col]
b = 255 - b
g = 255 - g
r = 255 - r # 取反以后可以获得不同的像素色彩值
src[row, col] = [b, g, r]
cv.namedWindow("output", cv.WINDOW_AUTOSIZE)
cv.imshow("output", src)
cv.waitKey(0)
cv.destroyAllWindows()
| [
"zhangzhiyong@cmsr.chinamobile.com"
] | zhangzhiyong@cmsr.chinamobile.com |
4acbd6f8080c6891ed31cd0f7d753359cf7cc165 | ca4a5bbbd1b26d529a8a59ad1f6408a711433396 | /GenerateInputsElec.py | 1932061f0beb6971fd4e3aef4d39cdb97cb24856 | [] | no_license | twood1/LongTermForecasting | 9196bcfd7e7c65a0f63305cd794ea09745d16c00 | 3bf5c56214e2d0ed2bcfefdd0eb1eb625581f89b | refs/heads/master | 2021-09-05T16:31:13.500396 | 2018-01-29T16:33:26 | 2018-01-29T16:33:26 | 117,894,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,707 | py | import numpy as np
def getZeroIndex(array):
i = 0
while i < len(array):
if array[i] == 0:
return i
i += 1
return i
def getNums():
print("opening file")
myfile = open('./Data/TrainingElectrictyDS.csv', 'r')
lines = []
i = 0
for line in myfile:
if i == 0:
i += 1
continue
toadd = list(map(float, line.split(",")))
lines.append(toadd)
arr1,arr2,arr3,arr4 = [],[],[],[]
for line in lines:
i = 0
while i < len(line)-1:
if line[i] == 0:
break
arr1.append(line[i])
arr2.append(line[i+1])
arr3.append(line[i+2])
arr4.append(line[i+3])
i += 4
return [np.mean(arr1),np.mean(arr2),np.mean(arr3),np.mean(arr4)]\
,[np.std(arr1),np.std(arr2),np.std(arr3),np.std(arr4)]
def getInputs():
means,stds = getNums()
print("opening file")
myfile = open('./Data/TrainingElectrictyDS.csv','r')
lines = []
i = 0
print("got here.elec1")
for line in myfile:
if i == 0:
i += 1
continue
toadd = list(map(float,line.split(",")))
lines.append(toadd)
i = 0
inputs,targets = [],[]
while i < len(lines):
inputElec,f1,f2,f3 = [],[],[],[]
t1,t2,t3 = [],[],[]
arr2 = lines[i]
arr = arr2[:-1]
zeroIdx = getZeroIndex(arr)
j = 0
while j < len(arr):
if j >= zeroIdx:
inputElec.append(arr[j])
f1.append(arr[j + 1])
f2.append(arr[j + 2])
f3.append(arr[j + 3])
else:
inputElec.append((arr[j] - means[0]) / stds[0])
f1.append((arr[j + 1] - means[1]) / stds[1])
f2.append((arr[j + 2] - means[2]) / stds[2])
f3.append((arr[j + 3] - means[3]) / stds[3])
j += 4
if zeroIdx != 516:
nextArr = lines[i+1][:-1]
t1.append((nextArr[zeroIdx + 1] - means[1]) / stds[1])
t2.append((nextArr[zeroIdx + 2] - means[2]) / stds[2])
t3.append((nextArr[zeroIdx + 3] - means[3]) / stds[3])
else:
t1.append((arr[zeroIdx - 3] - means[3]) / stds[3])
t2.append((arr[zeroIdx - 2] - means[2]) / stds[2])
t3.append((arr[zeroIdx - 1] - means[1]) / stds[1])
inputs.append([inputElec, f1, f2, f3])
toadd = float(arr2[-1])
toadd = [(toadd - means[0]) / stds[0]]
targets.append([toadd, t1, t2, t3])
i += 1
return inputs,targets
#vals,targets = getInputs()
#print(retval[0],retval[1]) | [
"goeagles4life5@gmail.com"
] | goeagles4life5@gmail.com |
ab6ff50c83eec5211789c2aff0d895d1483a143c | 01f157e11ed5c4a5d520d9c57d63f94b51bf9e20 | /resnet_caffe.py | 13d198fb93d9b8c31808eaae35474fb4f6cca615 | [
"MIT"
] | permissive | noobgrow/pointing_game | 0461bfc8d251734577cec3045273edd4cb235520 | a167f9ab358413d6503753dd91eacec7fdfa0794 | refs/heads/master | 2022-04-10T02:56:49.055617 | 2019-06-11T18:10:37 | 2019-06-11T18:10:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,075 | py | import copy
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torchvision import models
class ResNetCaffe(models.resnet.ResNet):
def __init__(self, block, layers, num_classes=1000):
super(ResNetCaffe, self).__init__(block, layers, num_classes)
self.conv1 = nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2),
padding=(3, 3), bias=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0,
ceil_mode=True)
for i in range(2, 5):
getattr(self, 'layer%d'%i)[0].conv1.stride = (2,2)
getattr(self, 'layer%d'%i)[0].conv2.stride = (1,1)
class Scale(nn.Module):
def __init__(self, channels):
super(Scale, self).__init__()
self.weight = Parameter(torch.Tensor(channels))
self.bias = Parameter(torch.Tensor(channels))
self.channels = channels
def __repr__(self):
return 'Scale(channels = %d)' % self.channels
def forward(self, x):
nb, nc, nh, nw = x.shape
x = x * self.weight.view(1, nc, 1, 1)
x = x + self.bias.view(1, nc, 1, 1)
return x
def replace_every_module(parent_module, orig_module_class, replace_func):
if isinstance(parent_module, nn.Sequential):
module_dict = OrderedDict()
elif isinstance(parent_module, nn.Module):
new_parent_module = copy.deepcopy(parent_module)
else:
assert (False)
for (k, v) in parent_module._modules.items():
# print v
if isinstance(v, orig_module_class):
child_module = replace_func(v)
elif len(v._modules.items()) > 0:
child_module = replace_every_module(v,
orig_module_class,
replace_func)
else:
child_module = v
if isinstance(parent_module, nn.Sequential):
module_dict[k] = child_module
elif isinstance(parent_module, nn.Module):
setattr(new_parent_module, k, child_module)
if isinstance(parent_module, nn.Sequential):
return nn.Sequential(module_dict)
elif isinstance(parent_module, nn.Module):
return new_parent_module
def batchnorm_replace_func(x):
assert isinstance(x, nn.BatchNorm2d)
num_features = x.num_features
new_batchnorm = nn.BatchNorm2d(num_features, momentum=0.9, affine=False)
scale = Scale(num_features)
return nn.Sequential(new_batchnorm, scale)
def convert_batchnorm(model):
model = replace_every_module(model, nn.BatchNorm2d, batchnorm_replace_func)
return model
def load_resnet50(checkpoint_path=None):
state_dict = torch.load(checkpoint_path)
assert 'fc.weight' in state_dict
num_classes, _ = state_dict['fc.weight'].shape
model = ResNetCaffe(models.resnet.Bottleneck, [3, 4, 6, 3], num_classes)
model = convert_batchnorm(model)
model.load_state_dict(state_dict)
model.eval()
return model
| [
"ruthcfong@gmail.com"
] | ruthcfong@gmail.com |
07ccbaa13946f30e8d2d81bdcc3c948f8adb3036 | 5eff9df4d276e83c68ce843d58868499858f701a | /Interview/Trees/binary_tree_traversal.py | e5a7ce276633e535f5c96cfc7a75b9b0cfffea65 | [] | no_license | arunraman/Code-Katas | b6723deb00caed58f0c9a1cafdbe807e39e96961 | 7fe3582fa6acf59a2620fe73e1e14bd8635bbee8 | refs/heads/master | 2023-03-04T17:27:44.037145 | 2023-03-02T21:09:53 | 2023-03-02T21:09:53 | 25,232,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | from binarytree import Node as Treenode
class Solution():
def preOrder(self, root):
if root == None:
return
print root.value,
self.preOrder(root.left)
self.preOrder(root.right)
def inOrder(self, root):
if root == None:
return
self.inOrder(root.left)
print root.value,
self.inOrder(root.right)
def postOrder(self, root):
if root == None:
return
self.postOrder(root.left)
self.postOrder(root.right)
print root.value,
S = Solution()
root = Treenode(1)
root.left = Treenode(2)
root.right = Treenode(3)
root.left.left = Treenode(8)
root.left.right = Treenode(12)
root.right.left = Treenode(3)
root.right.right = Treenode(25)
print root
S.preOrder(root)
print "\n"
S.inOrder(root)
print "\n"
S.postOrder(root) | [
"arunraman.19@gmail.com"
] | arunraman.19@gmail.com |
297c24423544d24e0d62626c1e8c9d65622d9986 | 9c5083a7d1d95a80917d6a30d501e67a1dc01aaf | /Lesson2/problem1/problem1.py | 09993693ad77e1763d34cb9c67b6b55b08ac56ff | [] | no_license | emilieaviles/otherlesson | 3e9d93f732bd3a12f1a7580199fb8ec87e4f57f1 | abd90e8982d4dd364c6bae6e35ad4f72e7a23361 | refs/heads/master | 2020-04-19T19:24:31.559183 | 2019-01-30T17:46:42 | 2019-01-30T17:46:42 | 168,387,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | classes = ["english","math","history","science","gym","health","advisory","production"]
for thing in classes:
print("You have been enrolled in " + thing + ".")
print() | [
"noreply@github.com"
] | noreply@github.com |
345e69a557ad41d9aae7895d883495769eee2017 | 41b4702e359e3352116eeecf2bdf59cb13c71cf2 | /full_model_walker_param/utils/env_utils.py | 110ef30017c069549d041f0bfb487b464dfec838 | [] | no_license | CaralHsi/Multi-Task-Batch-RL | b0aad53291c1713fd2d89fa4fff4a85c98427d4d | 69d29164ab7d82ec5e06a929ed3b96462db21853 | refs/heads/master | 2022-12-22T19:23:45.341092 | 2020-10-01T00:05:36 | 2020-10-01T00:05:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,899 | py | import os
from gym import Env
from gym.spaces import Box, Discrete, Tuple
import numpy as np
from env.hopper import HopperVelEnv
from env.half_cheetah import HalfCheetahVelEnv
from env.ant_goal import AntGoalEnv
from env.ant_dir import AntDirEnv
from env.humanoid_dir import HumanoidDirEnv
from env.humanoid_dir_openai import HumanoidDirEnvOpenAI
from env.humanoid_goal_ndone import HumanoidGoalEnvNDone
from env.walker_param import Walker2DRandParamsEnv
def get_dim(space):
if isinstance(space, Box):
return space.low.size
elif isinstance(space, Discrete):
return space.n
elif isinstance(space, Tuple):
return sum(get_dim(subspace) for subspace in space.spaces)
elif hasattr(space, 'flat_dim'):
return space.flat_dim
else:
raise TypeError("Unknown space: {}".format(space))
class ProxyEnv(Env):
def __init__(self, wrapped_env):
self._wrapped_env = wrapped_env
self.action_space = self._wrapped_env.action_space
self.observation_space = self._wrapped_env.observation_space
@property
def wrapped_env(self):
return self._wrapped_env
def reset(self, **kwargs):
return self._wrapped_env.reset(**kwargs)
def step(self, action):
return self._wrapped_env.step(action)
def render(self, *args, **kwargs):
return self._wrapped_env.render(*args, **kwargs)
@property
def horizon(self):
return self._wrapped_env.horizon
def terminate(self):
if hasattr(self.wrapped_env, "terminate"):
self.wrapped_env.terminate()
def __getattr__(self, attr):
if attr == '_wrapped_env':
raise AttributeError()
return getattr(self._wrapped_env, attr)
def __getstate__(self):
"""
This is useful to override in case the wrapped env has some funky
__getstate__ that doesn't play well with overriding __getattr__.
The main problematic case is/was gym's EzPickle serialization scheme.
:return:
"""
return self.__dict__
def __setstate__(self, state):
self.__dict__.update(state)
def __str__(self):
return '{}({})'.format(type(self).__name__, self.wrapped_env)
class NormalizedBoxEnv(ProxyEnv):
"""
Normalize action to in [-1, 1].
Optionally normalize observations and scale reward.
"""
def __init__(
self,
env,
reward_scale=1.,
obs_mean=None,
obs_std=None,
):
ProxyEnv.__init__(self, env)
self._should_normalize = not (obs_mean is None and obs_std is None)
if self._should_normalize:
if obs_mean is None:
obs_mean = np.zeros_like(env.observation_space.low)
else:
obs_mean = np.array(obs_mean)
if obs_std is None:
obs_std = np.ones_like(env.observation_space.low)
else:
obs_std = np.array(obs_std)
self._reward_scale = reward_scale
self._obs_mean = obs_mean
self._obs_std = obs_std
ub = np.ones(self._wrapped_env.action_space.shape)
self.action_space = Box(-1 * ub, ub)
def estimate_obs_stats(self, obs_batch, override_values=False):
if self._obs_mean is not None and not override_values:
raise Exception("Observation mean and std already set. To "
"override, set override_values to True.")
self._obs_mean = np.mean(obs_batch, axis=0)
self._obs_std = np.std(obs_batch, axis=0)
def _apply_normalize_obs(self, obs):
return (obs - self._obs_mean) / (self._obs_std + 1e-8)
def step(self, action):
lb = self._wrapped_env.action_space.low
ub = self._wrapped_env.action_space.high
scaled_action = lb + (action + 1.) * 0.5 * (ub - lb)
scaled_action = np.clip(scaled_action, lb, ub)
wrapped_step = self._wrapped_env.step(scaled_action)
next_obs, reward, done, info = wrapped_step
if self._should_normalize:
next_obs = self._apply_normalize_obs(next_obs)
return next_obs, reward * self._reward_scale, done, info
def __str__(self):
return "Normalized: %s" % self._wrapped_env
def domain_to_env(name):
from gym.envs.mujoco import HalfCheetahEnv, \
InvertedPendulumEnv, HumanoidEnv, \
HopperEnv, AntEnv, Walker2dEnv
return {
'invertedpendulum': InvertedPendulumEnv,
'humanoid': HumanoidEnv,
'halfcheetah': HalfCheetahEnv,
'halfcheetah-vel': HalfCheetahVelEnv,
'hopper': HopperEnv,
'hopper-vel': HopperVelEnv,
'ant': AntEnv,
'ant-goal': AntGoalEnv,
'ant-dir': AntDirEnv,
'humanoid-dir':HumanoidDirEnv,
'humanoid-openai-dir': HumanoidDirEnvOpenAI,
'humanoid-ndone-goal': HumanoidGoalEnvNDone,
'walker2d': Walker2dEnv,
'walker-param': Walker2DRandParamsEnv,
}[name]
def domain_to_epoch(name):
return {
'invertedpendulum': 300,
'humanoid': 9000,
'halfcheetah': 5000,
'halfcheetah-vel': 50,
'hopper': 50,
'hopper-vel': 50,
'ant-goal': 590,
'ant-dir': 590,
'ant': 5000,
'humanoid-dir':590,
'humanoid-openai-dir':590,
'humanoid-ndone-goal': 590,
'walker2d': 5000,
'walker-param': 390,
}[name]
def domain_to_num_goals(name):
return {
'halfcheetah-vel': 32,
'hopper-vel': 16,
'ant-goal': 32,
'ant-dir': 32,
'humanoid-dir': 32,
'humanoid-openai-dir': 10,
'humanoid-ndone-goal': 10,
'walker-param': 32,
}[name]
def env_producer(domain, seed, goal=None):
env = domain_to_env(domain)(goal=goal)
env.seed(seed)
env = NormalizedBoxEnv(env)
return env
| [
"jil021@eng.ucsd.edu"
] | jil021@eng.ucsd.edu |
3973203794a335401a2e5cfa6e3206483a4d7116 | d26b3bbf0192cc334e5ac431c753ebcbf2baeb1a | /l10n_cn_hr_payroll/__init__.py | 6adc439b170cc365b31453ea0481a8ba0709b7a9 | [] | no_license | davgit/Xero-2 | 1d566357174d15d4f3b15cc849ce9f32f0c9ef3a | 6477d844fde3f3b8f91d21b15ee7f8986a505de5 | refs/heads/master | 2021-01-21T20:49:47.585328 | 2013-02-16T08:13:22 | 2013-02-16T08:13:22 | 22,778,180 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import l10n_cn_hr_payroll
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"wangsong1233276@sina.com"
] | wangsong1233276@sina.com |
12df22b99d69a82d6d320852bbfe46620ef47780 | 8e552c996efc06db8cf243066d96f9c8f88e76f3 | /modules/profile/files/hcloud/usr/lib/python2.7/site-packages/cloudinit/sources/helpers/hetzner.py | 1eba5e08c58fb2f18726a1528a82de8129c4bbd9 | [] | no_license | elconas/hetznercloud-control | 66575d9aee131913ff26e2685d7648349649a18e | 193a297cd49e20b005897ad86fc4800644d8a2cc | refs/heads/master | 2021-04-03T09:33:38.092523 | 2018-03-20T07:04:31 | 2018-03-20T07:04:31 | 124,956,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,565 | py | # Author: Jonas Keidel <jonas.keidel@hetzner.de>
#
# This file is part of cloud-init. See LICENSE file for license information.
import json
import random
from cloudinit import log as logging
from cloudinit import net as cloudnet
from cloudinit import url_helper
from cloudinit import util
LOG = logging.getLogger(__name__)
def read_metadata(url, timeout=2, sec_between=2, retries=30):
response = url_helper.readurl(url, timeout=timeout,
sec_between=sec_between, retries=retries)
if not response.ok():
raise RuntimeError("unable to read metadata at %s" % url)
return util.load_yaml(response.contents.decode())
def read_userdata(url, timeout=2, sec_between=2, retries=30):
response = url_helper.readurl(url, timeout=timeout,
sec_between=sec_between, retries=retries)
if not response.ok():
raise RuntimeError("unable to read metadata at %s" % url)
return response.contents.decode()
def add_local_ip(nic=None):
nic = get_local_ip_nic()
LOG.debug("selected interface '%s' for reading metadata", nic)
if not nic:
raise RuntimeError("unable to find interfaces to access the"
"meta-data server. This droplet is broken.")
ip_addr_cmd = ['ip', 'addr', 'add', '169.254.0.1/16', 'dev', nic]
ip_link_cmd = ['ip', 'link', 'set', 'dev', nic, 'up']
if not util.which('ip'):
raise RuntimeError("No 'ip' command available to configure local ip "
"address")
try:
(result, _err) = util.subp(ip_addr_cmd)
LOG.debug("assigned local ip to '%s'", nic)
(result, _err) = util.subp(ip_link_cmd)
LOG.debug("brought device '%s' up", nic)
except Exception:
util.logexc(LOG, "local ip address assignment to '%s' failed.", nic)
raise
return nic
def get_local_ip_nic():
nics = [f for f in cloudnet.get_devicelist() if cloudnet.is_physical(f)]
if not nics:
return None
return min(nics, key=lambda d: cloudnet.read_sys_net_int(d, 'ifindex'))
def remove_local_ip(nic=None):
ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic]
ip_link_cmd = ['ip', 'link', 'set', 'dev', nic, 'down']
try:
(result, _err) = util.subp(ip_addr_cmd)
LOG.debug("removed all addresses from %s", nic)
(result, _err) = util.subp(ip_link_cmd)
LOG.debug("brought device '%s' down", nic)
except Exception as e:
util.logexc(LOG, "failed to remove all address from '%s'.", nic, e)
| [
"reg@elconas.de"
] | reg@elconas.de |
d8886e88937323eb625f4951e4a73b8b82235212 | 8a42be3f930d8a215394a96ad2e91c95c3b7ff86 | /Build/Instalation/GeneralDb/Marathon/MarathonTests_1.1/LargeFile_Edit/TestCases/V65_Changes/Diff_TwoLayouts1.py | f446d02edef1afedb4cb381315227b3bf6fde9a1 | [] | no_license | java-tools/jrec | 742e741418c987baa4350390d126d74c0d7c4689 | 9ece143cdd52832804eca6f3fb4a1490e2a6f891 | refs/heads/master | 2021-09-27T19:24:11.979955 | 2017-11-18T06:35:31 | 2017-11-18T06:35:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,811 | py | useFixture(default)
def test():
from Modules import commonBits
java_recorded_version = '1.6.0_03'
if window('Record Editor'):
commonBits.selectOldFilemenu(select_menu, 'Utilities', 'Compare Menu')
click('*2')
click('Choose File')
if window('Open'):
select(commonBits.selectPane(), 'Ams_LocDownload_20041228_Extract.txt')
click('Open')
close()
commonBits.setRecordLayout(select, 'ams Store')
click('Right')
select('TabbedPane', '')
click('Choose File')
if window('Open'):
select(commonBits.selectPane(), 'Ams_LocDownload_20041228_Extract2.txt')
click('Open')
close()
commonBits.setRecordLayout(select, 'ams Store')
click('Right')
select('TabbedPane', '')
click('Right')
select('TabbedPane', '')
click('Compare')
select('Table', 'cell:Loc Name,11(Highpoint City)')
assert_p('Table', 'Text', 'St Marys', 'Loc Name,12')
select('Table', 'cell:Loc Name,14(Bass Hill)')
assert_p('Table', 'Content', '[[, , , , , , , , , , , , ], [, Inserted, 1, TAR, 5839, DC, DC - Taras Ave, , 30-68 Taras Ave, Altona North, 3025, VIC, A], [, , , , , , , , , , , , ], [, Inserted, 2, TAR, 5850, DC, VIC West Ad Support, , Lot 2 Little Boundary Rd, Laverton, 3028, VIC, A], [, Old, 4, TAR, 5035, ST, Rockdale, Building B, Portside DC, 2-8 Mc Pherson Street, Botany, 2019, NSW, A], [, New, 6, , 5096, , Canberra Civic, Target Canberra, Canberra City Centre, Akuna Ave, Canberra, 2601, ACT, ], [, Old, 5, TAR, 5037, ST, Miranda, Westfield Shoppingtown, Cnr. Urunga Pde & The Kingsway, Miranda, 2228, NSW, A], [, New, 7, , 5012, , Ringwood, Ringwood, Seymour Street, Ringwood, 3134, VIC, ], [, Old, 6, TAR, 5052, ST, Eastwood, Marayong Offsite Reserve, 11 Melissa Place, Marayong, 2148, NSW, A], [, New, 8, , 5030, , Epping, Epping Plaza Shopping Centre, Cnr. High & Cooper Streets, Epping, 3076, VIC, ], [, Old, 7, TAR, 5055, ST, Leichhardt, Marketown, Marion Street, Leichhardt, 2040, NSW, A], [, New, 9, , 5054, , Highpoint City, Laverton, Lot 2, Cnr Lt Boundry & Old Geelong Road, Laverton, 3028, VIC, ], [, Old, 8, TAR, 5060, ST, St Marys, St. Mary\'s, Charles Hackett Drive, St Mary\'s, 2760, NSW, A], [, New, 10, , 5062, , Castletown, Townsville, Cnr. Woolcock St. & Kings Road, Townsville, 4810, QLD, ], [, Old, 9, TAR, 5070, ST, Bass Hill, Bass Hill Plaza, 753 Hume Highway, Bass Hill, 2197, NSW, A], [, New, 11, , 5138, , Cairns Central, Cairns, Cnr. McLeod & Aplin Streets, Cairns, 4870, QLD, ], [, Old, 10, TAR, 5074, ST, Campbelltown, Campbelltown Mall, 303 Queen Street, Campbelltown, 2560, NSW, A], [, New, 12, , 5141, , The Willows, Thuringowa Central, Cnr Thuringowa Drive & Range Rd, Thuringowa Central, 4817, QLD, ], [, Old, 11, TAR, 5078, ST, Warringah Mall, Frenchs Forest, Units 2-3, 14 Aquatic Drive, Frenchs Forest, 2086, NSW, A], [, New, 13, , 5146, , Palmerston, Palmerston Shopping Centre, Temple Terrace, Palmerston, 0830, NT, ], [, Old, 12, TAR, 5081, ST, Ashfield, Ashfield Mall, Knox Street, Ashfield, 2131, NSW, A], [, New, 14, , 5002, , Coffs Harbour, Coffs Harbour, Cnr. Park Beach Road & Pacific Hwy, Coffs Harbour, 2450, , ], [, Old, 13, TAR, 5085, ST, Roselands, Condell park, Unit 2, 39-41 Allingham Street, Condell Park, 2200, NSW, A], [, New, 15, , 5966, DC, Huntingwood DC, Huntingwood DC, 35 Huntingwood Drive, Huntingwood, 2148, , ], [, , , , , , , , , , , , ], [, Inserted, 16, TAR, 5967, DC, Hendra DC, Hendra DC, Cnr Headly Ave & Nudgee Road, Hendra, 4011, QLD, A], [, , , , , , , , , , , , ], [, Inserted, 17, TAR, 5968, DC, Beverly DC, Beverly DC, 117 Main Street, Beverly, 5009, SA, A]]')
select('Table', 'cell:Loc Name,14(Bass Hill)')
click('All Included Lines')
select('Table', 'cell:Loc Addr Ln1,8(Marayong)')
assert_p('Table', 'Content', '[[, , , , , , , , , , , , ], [, Inserted, 1, TAR, 5839, DC, DC - Taras Ave, , 30-68 Taras Ave, Altona North, 3025, VIC, A], [, , , , , , , , , , , , ], [, Inserted, 2, TAR, 5850, DC, VIC West Ad Support, , Lot 2 Little Boundary Rd, Laverton, 3028, VIC, A], [, Old, 1, TAR, 5015, ST, Bankstown, Bankstown, Unit 2, 39-41 Allingham Street, Condell Park, 2200, NSW, A], [, New, 3, , , , , , , , , , ], [, Old, 2, TAR, 5019, ST, Penrith, Penrith, 58 Leland Street, Penrith, 2750, NSW, A], [, New, 4, , , , , , , , , , ], [, Old, 3, TAR, 5033, ST, Blacktown, Marayong, Dock 2, 11 Melissa Place, Marayong, 2148, NSW, A], [, New, 5, , , , , , , , , , ], [, Old, 4, TAR, 5035, ST, Rockdale, Building B, Portside DC, 2-8 Mc Pherson Street, Botany, 2019, NSW, A], [, New, 6, , 5096, , Canberra Civic, Target Canberra, Canberra City Centre, Akuna Ave, Canberra, 2601, ACT, ], [, Old, 5, TAR, 5037, ST, Miranda, Westfield Shoppingtown, Cnr. Urunga Pde & The Kingsway, Miranda, 2228, NSW, A], [, New, 7, , 5012, , Ringwood, Ringwood, Seymour Street, Ringwood, 3134, VIC, ], [, Old, 6, TAR, 5052, ST, Eastwood, Marayong Offsite Reserve, 11 Melissa Place, Marayong, 2148, NSW, A], [, New, 8, , 5030, , Epping, Epping Plaza Shopping Centre, Cnr. High & Cooper Streets, Epping, 3076, VIC, ], [, Old, 7, TAR, 5055, ST, Leichhardt, Marketown, Marion Street, Leichhardt, 2040, NSW, A], [, New, 9, , 5054, , Highpoint City, Laverton, Lot 2, Cnr Lt Boundry & Old Geelong Road, Laverton, 3028, VIC, ], [, Old, 8, TAR, 5060, ST, St Marys, St. Mary\'s, Charles Hackett Drive, St Mary\'s, 2760, NSW, A], [, New, 10, , 5062, , Castletown, Townsville, Cnr. Woolcock St. & Kings Road, Townsville, 4810, QLD, ], [, Old, 9, TAR, 5070, ST, Bass Hill, Bass Hill Plaza, 753 Hume Highway, Bass Hill, 2197, NSW, A], [, New, 11, , 5138, , Cairns Central, Cairns, Cnr. McLeod & Aplin Streets, Cairns, 4870, QLD, ], [, Old, 10, TAR, 5074, ST, Campbelltown, Campbelltown Mall, 303 Queen Street, Campbelltown, 2560, NSW, A], [, New, 12, , 5141, , The Willows, Thuringowa Central, Cnr Thuringowa Drive & Range Rd, Thuringowa Central, 4817, QLD, ], [, Old, 11, TAR, 5078, ST, Warringah Mall, Frenchs Forest, Units 2-3, 14 Aquatic Drive, Frenchs Forest, 2086, NSW, A], [, New, 13, , 5146, , Palmerston, Palmerston Shopping Centre, Temple Terrace, Palmerston, 0830, NT, ], [, Old, 12, TAR, 5081, ST, Ashfield, Ashfield Mall, Knox Street, Ashfield, 2131, NSW, A], [, New, 14, , 5002, , Coffs Harbour, Coffs Harbour, Cnr. Park Beach Road & Pacific Hwy, Coffs Harbour, 2450, , ], [, Old, 13, TAR, 5085, ST, Roselands, Condell park, Unit 2, 39-41 Allingham Street, Condell Park, 2200, NSW, A], [, New, 15, , 5966, DC, Huntingwood DC, Huntingwood DC, 35 Huntingwood Drive, Huntingwood, 2148, , ], [, , , , , , , , , , , , ], [, Inserted, 16, TAR, 5967, DC, Hendra DC, Hendra DC, Cnr Headly Ave & Nudgee Road, Hendra, 4011, QLD, A], [, , , , , , , , , , , , ], [, Inserted, 17, TAR, 5968, DC, Beverly DC, Beverly DC, 117 Main Street, Beverly, 5009, SA, A]]')
select('Table', 'cell:Loc Addr Ln1,8(Marayong)')
close()
| [
"bruce_a_martin@b856f413-25aa-4700-8b60-b3441822b2ec"
] | bruce_a_martin@b856f413-25aa-4700-8b60-b3441822b2ec |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.