blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c09d5c966445fbb9e33b591d8fbe85a5f91dbd13 | afacdea8ec0d47cf011171b118fbac16edad235f | /collective/youtube_rst/tests.py | b063f7d89baa5854f14c79a313265a00b34e923a | [] | no_license | collective/collective.youtube_rst | 9736b3cd79c914a756ffa6d1f6cca0357526d322 | 73d7aaff5d8ba3f283c39eee51cdbbe2b0f65820 | refs/heads/master | 2023-03-22T10:57:19.948190 | 2011-07-05T13:35:23 | 2011-07-05T13:35:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,437 | py | import unittest
#from zope.testing import doctestunit
#from zope.component import testing
from Testing import ZopeTestCase as ztc
from Products.Five import fiveconfigure
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import PloneSite
ptc.setupPloneSite()
import collective.youtube_rst
class TestCase(ptc.PloneTestCase):
class layer(PloneSite):
@classmethod
def setUp(cls):
fiveconfigure.debug_mode = True
ztc.installPackage(collective.youtube_rst)
fiveconfigure.debug_mode = False
@classmethod
def tearDown(cls):
pass
def test_suite():
return unittest.TestSuite([
# Unit tests
#doctestunit.DocFileSuite(
# 'README.txt', package='collective.youtube_rst',
# setUp=testing.setUp, tearDown=testing.tearDown),
#doctestunit.DocTestSuite(
# module='collective.youtube_rst.mymodule',
# setUp=testing.setUp, tearDown=testing.tearDown),
# Integration tests that use PloneTestCase
#ztc.ZopeDocFileSuite(
# 'README.txt', package='collective.youtube_rst',
# test_class=TestCase),
#ztc.FunctionalDocFileSuite(
# 'browser.txt', package='collective.youtube_rst',
# test_class=TestCase),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| [
"guido.stevens@cosent.nl"
] | guido.stevens@cosent.nl |
417a9c86d6cf0e60446d13fbaa43104cd89c1a44 | b0f4b12ec6b14659b252f19776eb297366c9f330 | /代码/day3-5/A.FileDemo.py | 1bfc45d54864ee1dccb3618fe339ea82646998b0 | [] | no_license | vothin/code | a77259db4a3c4630bed293f979a49b676a1bd7c4 | d2b7819fd3687e0a011988fefab3e6fd70bb014a | refs/heads/master | 2020-08-31T15:48:28.155535 | 2020-01-09T08:21:57 | 2020-01-09T08:21:57 | 218,725,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,389 | py | '''
open
r 以只读方式打开文件。文件的指针将会放在文件的开头。这是默认模式。
rb 以二进制格式打开一个文件用于只读。文件指针将会放在文件的开头。这是默认模式。
r+ 打开一个文件用于读写。文件指针将会放在文件的开头。
rb+ 以二进制格式打开一个文件用于读写。文件指针将会放在文件的开头。
w 打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
wb 以二进制格式打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
w+ 打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
wb+ 以二进制格式打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
a 打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。
ab 以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。
a+ 打开一个文件用于读写。如果该文件已存在,文件指针将会放在文件的结尾。文件打开时会是追加模式。如果该文件不存在,创建新文件用于读写。
ab+ 以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。如果该文件不存在,创建新文件用于读写。
'''
'''
函数语法
open(name[, mode[, buffering]]) 文件句柄 = open('文件路径', '模式',编码方式)。
name : 一个包含了你要访问的文件名称的字符串值。
mode : mode 决定了打开文件的模式:只读,写入,追加等。所有可取值见如下的完全列表。这个参数是非强制的,默认文件访问模式为只读(r)。
buffering : 如果 buffering 的值被设为 0,就不会有寄存。如果 buffering 的值取 1,访问文件时会寄存行。
如果将 buffering 的值设为大于 1 的整数,表明了这就是的寄存区的缓冲大小。如果取负值,寄存区的缓冲大小则为系统默认。
示例: f = open('test.txt',"r")
file 对象方法
file.read([size]) size未指定则返回整个文件,如果文件大小>2倍内存则有问题.f.read()读到文件尾时返回""(空字串)
file.readline() 返回一行
file.readlines([size]) 返回包含size行的列表,size 未指定则返回全部行
for line in f: print line #通过迭代器访问
f.write("hello\n") #如果要写入字符串以外的数据,先将他转换为字符串.
f.tell() 返回一个整数,表示当前文件指针的位置(就是到文件头的比特数).
f.seek(偏移量,[起始位置]) 用来移动文件指针.
f.close() 打开文件之后一定要关闭,否则文件内容会丢失:
'''
| [
"zy757161350@qq.com"
] | zy757161350@qq.com |
59f6a967c5e7cee149d584504368c8e5f98f0ec7 | 80fdbb5a1fd8815b7f343451c61456b38d635bfe | /regression.py | a8570660a74ccb2c390cb1fcaf73759a5b9cb6dd | [] | no_license | donnate/Financial-Networks | 8416df19f457bc73db161e52e473c3356c169125 | 9eedb24590f04d6a9c2f2d620011d64a8e5371bb | refs/heads/master | 2021-08-23T06:31:01.908940 | 2017-12-03T22:33:34 | 2017-12-03T22:33:34 | 112,968,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,682 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 30 00:35:49 2017
@author: cdonnat
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
def split_OIS(data,k=5,size_blocks=1):
if size_blocks==1:
#test=data.index.tolist()
test=range(data.shape[0])
np.random.shuffle(test)
K=int(np.floor(data.shape[0]/k))
test_ind={}
for t in range(k):
if t<k-1:
test_ind[t]=test[K*t:K*(t+1)]
else:
test_ind[k]=test[K*t:]
else:
test=np.arange(0,data.shape[0],size_blocks)
end=test[-1]
np.random.shuffle(test)
K=int(np.floor(len(test)/k))
test_ind={}
for t in range(k):
l=[]
if t==k-1:
end=len(test)
else:
end=K*(t+1)
for tt in test[K*t:end]:
add=tt+np.arange(size_blocks)
if add[-1]>data.shape[0]-1:
add=np.arange(tt,data.shape[0])
l+=list(add)
test_ind[t]=l
#data.index[l]
return test_ind
def regress_stock_against_market(stock_return,r_m,K=5):
test_split=split_OIS(stock_return,k=K,size_blocks=5)
MSE=pd.DataFrame(np.zeros((K,stock_return.shape[1])),index=range(K),columns=stock_return.columns)
R2=pd.DataFrame(np.zeros((K,stock_return.shape[1])),index=range(K),columns=stock_return.columns)
coeff=pd.DataFrame(np.zeros((K,stock_return.shape[1])),index=range(K),columns=stock_return.columns)
intercept=pd.DataFrame(np.zeros((K,stock_return.shape[1])),index=range(K),columns=stock_return.columns)
for k in range(K):
model=LinearRegression()
test_set=np.array([False]*stock_return.shape[0])
test_set[test_split[k]]=True
X=r_m.as_matrix()[~test_set]
X_test=r_m.as_matrix()[test_set]
for u in stock_return.columns:
Y=np.array(stock_return[u].tolist())[~test_set]
Y_test=np.array(stock_return[u].tolist())[test_set]
Y[np.isnan(Y)]=0
Y_test[np.isnan(Y_test)]=0
model.fit(X.reshape([-1,1]),Y.reshape([-1,1]))
coeff.loc[k,u]=model.coef_[0][0]
intercept.loc[k,u]=model.intercept_[0]
R2.loc[k,u]=model.score(X.reshape([-1,1]),Y.reshape([-1,1]))
pred=model.predict(X_test.reshape([-1,1]))
MSE.loc[k,u]=mean_squared_error(Y_test, pred)
return coeff,intercept,MSE,R2 | [
"claire.donnat@gmail.com"
] | claire.donnat@gmail.com |
d10b7b01d6011c2dbc30798e45dd994837446420 | 2493ee7de77a2686d6445f965a164f0a633fcad7 | /dealingProbs/python/workspace/shows.py | 6e13b2982eca2c7d18e3f0e06e6a7073a651838e | [] | no_license | Ashishpurbey/warHammer | 9eaec4e0ac5cb30bf251a6610390192c4a8f62bb | 2cf4069fa0f7c8afbfa3f3bf91e51783526080b5 | refs/heads/main | 2023-03-24T13:51:37.003361 | 2021-03-13T19:03:29 | 2021-03-13T19:03:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | a = input().lower()
s = a.count('danil')+a.count('olya')+a.count('salva')+a.count('ann')+a.count('nikita')
print("YNEOS"[(s!=1)::2]) | [
"games.princeraj@gmail.com"
] | games.princeraj@gmail.com |
43ec1ff0a8346bfbc781977b51f2246423709f81 | f7f9aa0081e83f8caa1d62fb1b5ab6877318c73c | /backend/stardy/migrations/0004_auto_20171208_0250.py | b2a3e48ef17ef6c98a99c9e21642f86b3e02db48 | [] | no_license | melodyjs/stardy | 5718bb1e9d5af4851319aef987aa14cbe7ee5632 | e6698f9e848beae722bbc8976f6e4af360164d09 | refs/heads/master | 2021-08-31T17:07:25.451366 | 2017-12-22T05:18:54 | 2017-12-22T05:18:54 | 106,075,347 | 0 | 0 | null | 2017-12-22T05:02:39 | 2017-10-07T06:04:40 | Uno | UTF-8 | Python | false | false | 573 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-08 02:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stardy', '0003_groupplc_material'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='profile_address',
),
migrations.AddField(
model_name='user',
name='profile_image',
field=models.ImageField(null=True, upload_to=''),
),
]
| [
"rmh0313@gmail.com"
] | rmh0313@gmail.com |
73d211c48ec15c7864cf6044fdda6124495b1f9d | e823451826156ea96d83ae8d2b4518987761e371 | /ukb/weak_supervision/numbskull/numbskull/inference.py | 7afc3bb28d6245f24c7f65429012d5cdc2f4c60f | [
"Apache-2.0"
] | permissive | ProWorkNR/snow-cardiac | f12e972b593d66ef55ba8c8ea4cb77de16297373 | 3177dde898a65b1d7f385b78e4f134de3852bea5 | refs/heads/master | 2023-03-15T20:29:35.603573 | 2019-09-13T20:34:05 | 2019-09-13T20:34:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,625 | py | """TODO."""
from __future__ import print_function, absolute_import
import numba
from numba import jit
import numpy as np
import math
from numbskull.udf import *
@jit(nopython=True, cache=True, nogil=True)
def gibbsthread(shardID, nshards, var_copy, weight_copy, weight, variable,
factor, fmap, vmap, factor_index, Z, cstart,
count, var_value, weight_value, sample_evidence, burnin):
"""TODO."""
# Indentify start and end variable
nvar = variable.shape[0]
start = (shardID * nvar) // nshards
end = ((shardID + 1) * nvar) // nshards
# TODO: give option do not store result, or just store tally
for var_samp in range(start, end):
if variable[var_samp]["isEvidence"] == 0 or sample_evidence:
v = draw_sample(var_samp, var_copy, weight_copy, weight, variable,
factor, fmap, vmap, factor_index, Z[shardID],
var_value, weight_value)
var_value[var_copy][var_samp] = v
if not burnin:
if variable[var_samp]["dataType"] == 0:
count[cstart[var_samp]] += v
else:
count[cstart[var_samp] + v] += 1
@jit(nopython=True, cache=True, nogil=True)
def draw_sample(var_samp, var_copy, weight_copy, weight, variable, factor,
fmap, vmap, factor_index, Z, var_value, weight_value):
"""TODO."""
cardinality = variable[var_samp]["cardinality"]
for value in range(cardinality):
Z[value] = np.exp(potential(var_samp, value, var_copy, weight_copy,
weight, variable, factor, fmap,
vmap, factor_index, var_value,
weight_value))
for j in range(1, cardinality):
Z[j] += Z[j - 1]
z = np.random.rand() * Z[cardinality - 1]
return np.argmax(Z[0:cardinality] >= z)
@jit(nopython=True, cache=True, nogil=True)
def potential(var_samp, value, var_copy, weight_copy, weight, variable, factor,
fmap, vmap, factor_index, var_value, weight_value):
"""TODO."""
p = 0.0
varval_off = value
if variable[var_samp]["dataType"] == 0:
varval_off = 0
vtf = vmap[variable[var_samp]["vtf_offset"] + varval_off]
start = vtf["factor_index_offset"]
end = start + vtf["factor_index_length"]
for k in range(start, end):
factor_id = factor_index[k]
p += weight_value[weight_copy][factor[factor_id]["weightId"]] * \
eval_factor(factor_id, var_samp, value, var_copy, variable,
factor, fmap, var_value)
return p
FACTORS = {
# Factor functions for boolean variables
"IMPLY_NATURAL": 0,
"OR": 1,
"EQUAL": 3,
"AND": 2,
"ISTRUE": 4,
"LINEAR": 7,
"RATIO": 8,
"LOGICAL": 9,
"IMPLY_MLN": 13,
# Factor functions for categorical variables
"AND_CAT": 12,
"OR_CAT": 14,
"EQUAL_CAT_CONST": 15,
"IMPLY_NATURAL_CAT": 16,
"IMPLY_MLN_CAT": 17,
# Factor functions for generative models for data programming.
#
# These functions accept two types of categorical variables:
#
# y \in {1, -1} corresponding to latent labels, and
# l \in {1, 0, -1} corresponding to labeling function outputs.
#
# The values of y are mapped to Numbskull variables y_index
# via {-1: 0, 1: 1}, and
# the values of l are mapped to Numbskull variables l_index
# via {-1: 0, 0: 1, 1: 2}.
# h(y) := y
"DP_GEN_CLASS_PRIOR": 18,
# h(l) := l
"DP_GEN_LF_PRIOR": 19,
# h(l) := l * l
"DP_GEN_LF_PROPENSITY": 20,
# h(y, l) := y * l
"DP_GEN_LF_ACCURACY": 21,
# h(l) := y * l * l
"DP_GEN_LF_CLASS_PROPENSITY": 22,
# l_2 fixes errors made by l_1
#
# h(y, l_1, l_2) := if l_1 == 0 and l_2 != 0: -1,
# elif l_1 == -1 * y and l_2 == y: 1,
# else: 0
"DP_GEN_DEP_FIXING": 23,
# l_2 reinforces the output of l_1
#
# h(y, l_1, l_2) := if l_1 == 0 and l_2 != 0: -1,
# elif l_1 == y and l_2 == y: 1,
# else: 0
"DP_GEN_DEP_REINFORCING": 24,
# h(l_1, l_2) := if l_1 != 0 and l_2 != 0: -1, else: 0
"DP_GEN_DEP_EXCLUSIVE": 25,
#h(l_1, l_2) := if l_1 == l_2: 1, else: 0
"DP_GEN_DEP_SIMILAR": 26,
"CORAL_GEN_DEP_SIMILAR": 27,
}
for (key, value) in FACTORS.items():
exec("FUNC_" + key + " = " + str(value))
@jit(nopython=True, cache=True, nogil=True)
def eval_factor(factor_id, var_samp, value, var_copy, variable, factor, fmap,
var_value):
"""TODO."""
####################
# BINARY VARIABLES #
####################
fac = factor[factor_id]
ftv_start = fac["ftv_offset"]
ftv_end = ftv_start + fac["arity"]
if fac["factorFunction"] == FUNC_IMPLY_NATURAL:
for l in range(ftv_start, ftv_end):
v = value if (fmap[l]["vid"] == var_samp) else \
var_value[var_copy][fmap[l]["vid"]]
if v == 0:
# Early return if body is not satisfied
return 0
# If this point is reached, body must be true
l = ftv_end - 1
head = value if (fmap[l]["vid"] == var_samp) else \
var_value[var_copy][fmap[l]["vid"]]
if head:
return 1
return -1
elif factor[factor_id]["factorFunction"] == FUNC_OR:
for l in range(ftv_start, ftv_end):
v = value if (fmap[l]["vid"] == var_samp) else \
var_value[var_copy][fmap[l]["vid"]]
if v == 1:
return 1
return -1
elif factor[factor_id]["factorFunction"] == FUNC_EQUAL:
v = value if (fmap[ftv_start]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_start]["vid"]]
for l in range(ftv_start + 1, ftv_end):
w = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v != w:
return -1
return 1
elif factor[factor_id]["factorFunction"] == FUNC_AND \
or factor[factor_id]["factorFunction"] == FUNC_ISTRUE:
for l in range(ftv_start, ftv_end):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v == 0:
return -1
return 1
elif factor[factor_id]["factorFunction"] == FUNC_LINEAR:
res = 0
head = value if (fmap[ftv_end - 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_end - 1]["vid"]]
for l in range(ftv_start, ftv_end - 1):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v == head:
res += 1
# This does not match Dimmwitted, but matches the eq in the paper
return res
elif factor[factor_id]["factorFunction"] == FUNC_RATIO:
res = 1
head = value if (fmap[ftv_end - 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_end - 1]["vid"]]
for l in range(ftv_start, ftv_end - 1):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v == head:
res += 1
# This does not match Dimmwitted, but matches the eq in the paper
return math.log(res) # TODO: use log2?
elif factor[factor_id]["factorFunction"] == FUNC_LOGICAL:
head = value if (fmap[ftv_end - 1]["vid"] == var_samp) \
else var_value[var_copy][fmap[ftv_end - 1]["vid"]]
for l in range(ftv_start, ftv_end - 1):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v == head:
return 1
return 0
elif factor[factor_id]["factorFunction"] == FUNC_IMPLY_MLN:
for l in range(ftv_start, ftv_end - 1):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v == 0:
# Early return if body is not satisfied
return 1
# If this point is reached, body must be true
l = ftv_end - 1
head = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][l]
if head:
return 1
return 0
#########################
# CATEGORICAL VARIABLES #
#########################
elif factor[factor_id]["factorFunction"] == FUNC_AND_CAT \
or factor[factor_id]["factorFunction"] == FUNC_EQUAL_CAT_CONST:
for l in range(ftv_start, ftv_end):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v != fmap[l]["dense_equal_to"]:
return 0
return 1
elif factor[factor_id]["factorFunction"] == FUNC_OR_CAT:
for l in range(ftv_start, ftv_end):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v == fmap[l]["dense_equal_to"]:
return 1
return -1
elif factor[factor_id]["factorFunction"] == FUNC_IMPLY_NATURAL_CAT:
for l in range(ftv_start, ftv_end - 1):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v != fmap[l]["dense_equal_to"]:
# Early return if body is not satisfied
return 0
# If this point is reached, body must be true
l = ftv_end - 1
head = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][l]
if head == fmap[l]["dense_equal_to"]:
return 1
return -1
elif factor[factor_id]["factorFunction"] == FUNC_IMPLY_MLN_CAT:
for l in range(ftv_start, ftv_end - 1):
v = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][fmap[l]["vid"]]
if v != fmap[l]["dense_equal_to"]:
# Early return if body is not satisfied
return 1
# If this point is reached, body must be true
l = ftv_end - 1
head = value if (fmap[l]["vid"] == var_samp) \
else var_value[var_copy][l]
if head == fmap[l]["dense_equal_to"]:
return 1
return 0
#####################
# DATA PROGRAMMING #
# GENERATIVE MODELS #
#####################
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_CLASS_PRIOR:
y_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
return 1 if y_index == 1 else -1
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_LF_PRIOR:
l_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
if l_index == 0:
return -1
elif l_index == 1:
return 0
else:
return 1
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_LF_PROPENSITY:
l_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
return 0 if l_index == 1 else 1
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_LF_ACCURACY:
y_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
l_index = value if fmap[ftv_start + 1]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 1]["vid"]]
if l_index == 1:
return 0
# First part of below condition is simpler because
# the index for value -1 is 0 for both variables
elif y_index == l_index or (y_index == 1 and l_index == 2):
return 1
else:
return -1
elif factor[factor_id]["factorFunction"] == \
FUNC_DP_GEN_LF_CLASS_PROPENSITY:
y_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
l_index = value if fmap[ftv_start + 1]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 1]["vid"]]
if l_index == 1:
return 0
elif y_index == 1:
return 1
else:
return -1
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_DEP_FIXING:
y_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
l1_index = value if fmap[ftv_start + 1]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 1]["vid"]]
l2_index = value if fmap[ftv_start + 2]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 2]["vid"]]
if l1_index == 1:
return -1 if l2_index != 1 else 0
elif l1_index == 0 and l2_index == 2 and y_index == 1:
return 1
elif l1_index == 2 and l2_index == 0 and y_index == 0:
return 1
else:
return 0
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_DEP_REINFORCING:
y_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
l1_index = value if fmap[ftv_start + 1]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 1]["vid"]]
l2_index = value if fmap[ftv_start + 2]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 2]["vid"]]
if l1_index == 1:
return -1 if l2_index != 1 else 0
elif l1_index == 0 and l2_index == 0 and y_index == 0:
return 1
elif l1_index == 2 and l2_index == 2 and y_index == 1:
return 1
else:
return 0
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_DEP_EXCLUSIVE:
l1_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
l2_index = value if fmap[ftv_start + 1]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 1]["vid"]]
return 0 if l1_index == 1 or l2_index == 1 else -1
elif factor[factor_id]["factorFunction"] == FUNC_DP_GEN_DEP_SIMILAR:
l1_index = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
l2_index = value if fmap[ftv_start + 1]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 1]["vid"]]
return 1 if l1_index == l2_index else 0
elif factor[factor_id]["factorFunction"] == FUNC_CORAL_GEN_DEP_SIMILAR:
v1 = value if fmap[ftv_start]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start]["vid"]]
v2 = value if fmap[ftv_start + 1]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + 1]["vid"]]
card1 = variable[fmap[ftv_start]["vid"]]["cardinality"]
card2 = variable[fmap[ftv_start + 1]["vid"]]["cardinality"]
assert(card1 == 2 or card1 == 3)
assert(card2 == 2 or card2 == 3)
if (card1 == card2):
return 1 if v1 == v2 else 0
if card2 == 2:
v1, v2 = v2, v1
return 1 if ((v1 == 0) and (v2 == 0)) or ((v1 == 1) and (v2 == 2)) else 0
else:
for i in range(UdfStart.shape[0] - 1):
if (factor[factor_id]["factorFunction"] >= UdfStart[i]) and (factor[factor_id]["factorFunction"] < UdfStart[i + 1]):
# This is a valid UDF
fid = factor[factor_id]["factorFunction"] - UdfStart[i]
if fid < LfCount[i]:
# LF Accuracy
u = udf(UdfMap[UdfCardinalityStart[i] + fid], var_samp, value, var_copy, var_value, fmap, ftv_start)
y = value if fmap[ftv_start + UdfCardinality[UdfCardinalityStart[i] + fid]]["vid"] == var_samp else \
var_value[var_copy][fmap[ftv_start + UdfCardinality[UdfCardinalityStart[i] + fid]]["vid"]]
y = 2 * y - 1
return u * y
else:
# Correlation
pass
# FUNC_UNDEFINED
print("Error: Factor Function", factor[factor_id]["factorFunction"],
"( used in factor", factor_id, ") is not implemented.")
raise NotImplementedError("Factor function is not implemented.")
| [
"varma.paroma@gmail.com"
] | varma.paroma@gmail.com |
3b38693fcb860bf230f3477c11266a0c39046c6e | ed30d695e6e598888148170f6a92a31ff49dbcef | /Lesson3-Particle_Filters/quizzes/moving_robot.py | ead8a9914d45ef5211393076dd906dfe053bb52d | [] | no_license | archie1983/CS373_AI_for_robotics_Udacity | 828152407a63f75c3b111d6f5fc006f040881806 | 83b4f5adfe3250b427f18ec3f948aa82f4127143 | refs/heads/master | 2020-06-26T17:32:17.412072 | 2020-02-10T17:46:15 | 2020-02-10T17:46:15 | 199,700,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,649 | py | #!/usr/bin/python
#
# Make a robot called myrobot that starts at
# coordinates 30, 50 heading north (pi/2).
# Have your robot turn clockwise by pi/2, move
# 15 m, and sense. Then have it turn clockwise
# by pi/2 again, move 10 m, and sense again.
#
# Your program should print out the result of
# your two sense measurements.
#
# Don't modify the code below. Please enter
# your code at the bottom.
from math import *
import random
landmarks = [[20.0, 20.0], [80.0, 80.0], [20.0, 80.0], [80.0, 20.0]]
world_size = 100.0
class robot:
def __init__(self):
self.x = random.random() * world_size
self.y = random.random() * world_size
self.orientation = random.random() * 2.0 * pi
self.forward_noise = 0.0;
self.turn_noise = 0.0;
self.sense_noise = 0.0;
def set(self, new_x, new_y, new_orientation):
if new_x < 0 or new_x >= world_size:
raise ValueError, 'X coordinate out of bound'
if new_y < 0 or new_y >= world_size:
raise ValueError, 'Y coordinate out of bound'
if new_orientation < 0 or new_orientation >= 2 * pi:
raise ValueError, 'Orientation must be in [0..2pi]'
self.x = float(new_x)
self.y = float(new_y)
self.orientation = float(new_orientation)
def set_noise(self, new_f_noise, new_t_noise, new_s_noise):
# makes it possible to change the noise parameters
# this is often useful in particle filters
self.forward_noise = float(new_f_noise);
self.turn_noise = float(new_t_noise);
self.sense_noise = float(new_s_noise);
def sense(self):
Z = []
for i in range(len(landmarks)):
dist = sqrt((self.x - landmarks[i][0]) ** 2 + (self.y - landmarks[i][1]) ** 2)
dist += random.gauss(0.0, self.sense_noise)
Z.append(dist)
return Z
def move(self, turn, forward):
if forward < 0:
raise ValueError, 'Robot cant move backwards'
# turn, and add randomness to the turning command
orientation = self.orientation + float(turn) + random.gauss(0.0, self.turn_noise)
orientation %= 2 * pi
# move, and add randomness to the motion command
dist = float(forward) + random.gauss(0.0, self.forward_noise)
x = self.x + (cos(orientation) * dist)
y = self.y + (sin(orientation) * dist)
x %= world_size # cyclic truncate
y %= world_size
# set particle
res = robot()
res.set(x, y, orientation)
res.set_noise(self.forward_noise, self.turn_noise, self.sense_noise)
return res
def Gaussian(self, mu, sigma, x):
# calculates the probability of x for 1-dim Gaussian with mean mu and var. sigma
return exp(- ((mu - x) ** 2) / (sigma ** 2) / 2.0) / sqrt(2.0 * pi * (sigma ** 2))
def measurement_prob(self, measurement):
# calculates how likely a measurement should be
prob = 1.0;
for i in range(len(landmarks)):
dist = sqrt((self.x - landmarks[i][0]) ** 2 + (self.y - landmarks[i][1]) ** 2)
prob *= self.Gaussian(dist, self.sense_noise, measurement[i])
return prob
def __repr__(self):
return '[x=%.6s y=%.6s orient=%.6s]' % (str(self.x), str(self.y), str(self.orientation))
def eval(r, p):
sum = 0.0;
for i in range(len(p)): # calculate mean error
dx = (p[i].x - r.x + (world_size/2.0)) % world_size - (world_size/2.0)
dy = (p[i].y - r.y + (world_size/2.0)) % world_size - (world_size/2.0)
err = sqrt(dx * dx + dy * dy)
sum += err
return sum / float(len(p))
#### DON'T MODIFY ANYTHING ABOVE HERE! ENTER CODE BELOW ####
def play_around_with_robot():
myrobot = robot()
# Make a robot called myrobot that starts at
# coordinates 30, 50 heading north (pi/2).
# Have your robot turn clockwise by pi/2, move
# 15 m, and sense. Then have it turn clockwise
# by pi/2 again, move 10 m, and sense again.
myrobot.set(30, 50, pi / 2)
print myrobot.sense()
myrobot = myrobot.move(-pi / 2, 15)
print myrobot.sense()
myrobot = myrobot.move(-pi / 2, 10)
print myrobot.sense()
# Now add noise to your robot as follows:
# forward_noise = 5.0, turn_noise = 0.1,
# sense_noise = 5.0.
# Once again, your robot starts at 30, 50,
# heading north (pi/2), then turns clockwise
# by pi/2, moves 15 meters, senses,
# then turns clockwise by pi/2 again, moves
# 10 m, then senses again.
myrobot = robot()
myrobot.set_noise(5.0, 0.1, 5.0)
myrobot.set(30, 50, pi / 2)
print myrobot.sense()
myrobot = myrobot.move(-pi / 2, 15)
print myrobot.sense()
myrobot = myrobot.move(-pi / 2, 10)
print myrobot.sense()
def move_and_sense(robot, movement):
# Now our main robot moves and senses it's position relative to the landmarks.
robot = robot.move(movement[0], movement[1])
Z = robot.sense()
return (Z, robot)
def move_particles(particles, movement):
# Now we want to simulate robot
# motion with our particles.
# Each particle should turn by 0.1
# and then move by 5 - same as myrobot.
p2 = []
for i in range(len(particles)):
r = particles[i]
p2.append(r.move(movement[0], movement[1]))
return p2
def get_weights_of_particles(particles, base_measurement):
# Now we want to give weight to our
# particles. This code will assign weights
# to 1000 particles in the list.
w = []
for i in range(len(particles)):
measurement_probability = particles[i].measurement_prob(base_measurement)
w.append(measurement_probability)
return w
def resample_particles(particles, weights):
# In this exercise, try to write a program that
# will resample particles according to their weights.
# Particles with higher weights should be sampled
# more frequently (in proportion to their weight).
p3 = []
w_total = sum(weights) # total W
norm_w = [wn / w_total for wn in weights] # normalized weights
#from numpy.random import choice
#p3 = choice(p, len(p), p=norm_w, replace=True)
# Now let's implement a choice based on weights, but not with numpy. We don't even need to normalize for that.
max_w = max(weights)
index = random.randrange(0, len(particles), 1) # or index = int(random.random() * N)
beta = 0.0
p3 = []
for i in range(len(particles)):
beta = beta + random.uniform(0, 2 * max_w) # or beta += random.random() * 2.0 * mw
while weights[index] < beta:
beta = beta - weights[index]
index = (index + 1) % N
p3.append(particles[index])
return p3
# Evaluate the quality of the given particle set.
# It calculates the average Euclidian distances between the particles
# and the actual robot and then returns that as a measure for quality.
# The lower the number, the better the quality.
#
# It also takes into account that the world is cyclic (what falls off
# the left side, appear on the right and similar with other borders.)
def eval(r, p):
sum = 0.0;
for i in range(len(p)): # calculate mean error
dx = (p[i].x - r.x + (world_size/2.0)) % world_size - (world_size/2.0)
dy = (p[i].y - r.y + (world_size/2.0)) % world_size - (world_size/2.0)
err = sqrt(dx * dx + dy * dy)
sum += err
return sum / float(len(p))
#play_around_with_robot()
# Having played with the robot, we'll now create one that we'll work with. We will also
# create 1000 points (other - virtual - robots) at random coordinates. We'll move those
# points same as our main robot and then see how their distances to the landmarks match
# our main robot's distances.
# We will take a measurement from the landmarks and compare that measurement with
# 1000 other random points that have moved by the same amount.
myrobot = robot()
# This is how we'll move our robot (and the points - or particles)
default_movement = (0.1, 5)
# Generating 1000 random points (particles) - initial possible robot locations
N = 1000
p = []
Z = []
for i in range(N):
r = robot()
r.set_noise(0.05, 0.05, 5.0) # we need some measurement, move and turn noise, otherwise weight calculation with measurement_prob(...) will give division by 0
p.append(r)
# Now we'll move, sense, weight and re-sample particles a few times
T = 10
print "quality of model before work: ", eval(myrobot, p)
for i in range(T):
# Now our main robot moves and senses it's position relative to the landmarks.
(Z, myrobot) = move_and_sense(myrobot, default_movement)
# Now we want to simulate robot
# motion with our particles.
# Each particle should turn by 0.1
# and then move by 5 - same as myrobot.
p = move_particles(p, default_movement)
#print p
# Now we want to give weight to our
# particles. This code will assign weights
# to 1000 particles in the list.
w = get_weights_of_particles(p, Z)
#print w # we see that most of the particles have a very low (to the power of -<large number>) probability. We'll need to drop those and keep ones with higher probability.
# In this exercise, try to write a program that
# will resample particles according to their weights.
# Particles with higher weights should be sampled
# more frequently (in proportion to their weight).
p = resample_particles(p, w)
print "quality of work so far: ", eval(myrobot, p)
#print p
| [
"arturs.elksnis@ggtg.net"
] | arturs.elksnis@ggtg.net |
79abefc6d94f617db0cb32e061e348c200d0fa78 | c501d5ec838fc8ee745f8eb0a2478ceeaebb6d7d | /budget_app/urls.py | 933e9cb0a25549dc53f48aafd7b57e0992773de7 | [
"MIT"
] | permissive | MikeTheCanuck/TB-playground | c07ff5e29a0f24bb75fc92a4e310e7a043bcf11b | f063a4d198bae2f1164449d491a0d38c3d8e61be | refs/heads/master | 2021-01-22T22:16:22.836844 | 2017-03-20T02:05:05 | 2017-03-20T02:05:05 | 85,524,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | from django.conf.urls import url
from . import views
from rest_framework_swagger.views import get_swagger_view
schema_view = get_swagger_view(title='hackoregon_budget')
# Listed in alphabetical order.
urlpatterns = [
url(r'^$', schema_view),
url(r'^kpm/$', views.ListKpm.as_view()),
url(r'^ocrb/$', views.ListOcrb.as_view()),
url(r'^history/$', views.ListBudgetHistory.as_view()),
url(r'^code/$', views.ListLookupCode.as_view()),
] | [
"mikethecanuck@gmail.com"
] | mikethecanuck@gmail.com |
932820f602f4b01aa0257ca4d4967626ae474638 | 52c7d6896d904eff2953872c547544273b45c694 | /excersise_comprehension_cretor.py | a4d70afd62d0c7cd8b4474aaa8c262f11ef3bbf4 | [] | no_license | dhamejanishivam/Python-Backup | d1530acc1500b58e5b50a08d468a115a6321072b | ed78b51653d5ea3136302392f12b897356475a4c | refs/heads/main | 2023-02-03T02:00:06.509549 | 2020-12-24T19:10:57 | 2020-12-24T19:10:57 | 324,220,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | # Not able to make
# List or Dictionary or set
# List = []
# Dictionary = {}
# Set =
print("How many items you want to insert ")
a = int(input())
print("What comprehension do you want to make \n"
"Press 1 for list \n"
"Press 2 for Dictionary \n"
"Press 3 for Set \n")
e = int(input())
if e==1 :
f = []
for c in range(a):
print("Enter item \n")
d = input()
f.append(d)
print(f)
elif e==2 :
f={}
for c in range(a):
print("Enter item \n")
d = input()
print(f)
| [
"dhamejanishivam@gmail.com"
] | dhamejanishivam@gmail.com |
59e9115e1564e5314ef296a5606b8d037d91aa53 | 734372d7601bae8fafdd592f1c21a919e27032d7 | /gym/gym/envs/classic_control/rayleigh_without_cache.py | 60b125b6deb96cb995c530dba2c84c2299785791 | [] | no_license | zhangxr-wspn/RLforBeamforming | 1e31d62d83ebc7f133aca4b58663bc4a300180eb | 2f9e389f842d9b17b83b31b017f51c07599050b8 | refs/heads/master | 2020-03-25T04:10:54.182491 | 2018-08-20T06:44:56 | 2018-08-20T06:44:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,964 | py | from __future__ import division
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
from os import path
class RayleighEnvWithoutCache(gym.Env):
metadata = {
'render.modes' : ['human', 'rgb_array'],
'video.frames_per_second' : 30
}
def __init__(self):
self.Nt = 2
self.K_User = 2
self.M_Group = 2
# self.max_length=10**6
# self.max_distance=3 # maximum distance 3km
self.high_fading = np.array(10*np.ones(2*self.Nt*self.K_User*self.M_Group),dtype='float32') # fading
# self.high_length = np.array([self.max_length, self.max_length]) # length
# self.low_length = np.array([0,0]) #l1_packet,l2_packet
self.high_action = np.array(np.append([np.ones(self.Nt*self.M_Group*2)],[1])) #w1(Nt*2),w2(Nt*2),rho
self.low_action = np.array(np.append([-1*np.ones(self.Nt*self.M_Group*2)],[0]))
self.observation_space = spaces.Box(low=-self.high_fading,high=self.high_fading,dtype='float32')
self.action_space = spaces.Box(low=self.low_action, high=self.high_action, dtype='float32')
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self,u):
# l1_packet, l2_packet = self.state[-2:] # number of packet
u_beamformer = u[0:self.Nt*self.M_Group*2]/((np.sum(u[0:self.Nt*self.M_Group*2]**2))**0.5) # beamformer unified power
u_unify = np.append(u_beamformer,u[-1]) # added power ratio
# self.last_u = u_unify # for rendering
E_transmit = 10 # transmission power
EtoN = E_transmit/self.Nt # transmission power
# sigma = 0.01
sigma = 10**(-5.4) # noise power
###########################Path loss########################
distance = np.array([0.02,0.05,0.02,0.05],dtype=float) # distance for user_m,k (11,12,21,22) , in km
# beta = np.array([10,20,30,40],dtype=float) # path loss for user_m,k (11,12,21,22)
beta = np.array(10**((-128.1-37.6*np.log10(distance))/10))
##########################Beamformer########################
w = np.array(np.zeros([self.Nt,self.M_Group]),dtype=complex) # Nt*M complex beamformer matrix
for m in range(self.M_Group):
for nt in range(self.Nt):
w[nt,m] = np.array([u_beamformer[m*2*self.Nt+nt*2] + u_beamformer[m*2*self.Nt+nt*2+1]*1j])
r1 = u_unify[-1] # power ratio
r2 = 1-r1
r=np.array([r1,r2])
########################Channel matrix#########################
# h=np.random.randn(self.Nt,self.K_User*self.M_Group)\
# +np.multiply(1j,np.random.randn(self.Nt,self.K_User*self.M_Group)) # channel fading for 2 users in 2 groups
h=np.zeros([self.Nt,self.K_User*self.M_Group], dtype='complex')# channel fading for 2 users in 2 groups
index_temp=0
for m in range(self.M_Group):
for k in range(self.K_User):
for nt in range(self.Nt):
h[nt,m*self.M_Group+k]=\
np.array(self.state[index_temp]+1j*self.state[index_temp+1])
index_temp+=2
g=np.array(h)
for nt in range(self.Nt):
g[nt,:]=np.multiply(h[nt,:],beta) # fast fading multiply path loss
#g=np.array([np.multiply(h[0,:],beta), np.multiply(h[1,:],beta)]) # 2*1 channel matrix, g11,g12,g21,g22
sinr=np.array(np.zeros([2,2]),dtype='float32')
sinr[0,0]=r[0]*EtoN*np.linalg.norm(np.matmul(g[:,0:1].conj().T,w[:,0:1]))\
/(r[1]*EtoN*np.linalg.norm(np.matmul(g[:,0:1].conj().T,w[:,1:2]))+sigma**2)
sinr[0,1]=r[0]*EtoN*np.linalg.norm(np.matmul(g[:,1:2].conj().T,w[:,0:1]))\
/(r[1]*EtoN*np.linalg.norm(np.matmul(g[:,1:2].conj().T,w[:,1:2]))+sigma**2)
sinr[1,0]=r[1]*EtoN*np.linalg.norm(np.matmul(g[:,2:3].conj().T,w[:,1:2]))\
/(r[0]*EtoN*np.linalg.norm(np.matmul(g[:,2:3].conj().T,w[:,0:1]))+sigma**2)
sinr[1,1]=r[1]*EtoN*np.linalg.norm(np.matmul(g[:,3:4].conj().T,w[:,1:2]))\
/(r[0]*EtoN*np.linalg.norm(np.matmul(g[:,3:4].conj().T,w[:,0:1]))+sigma**2)
# print("############\n")
# print("state:", self.state,"u:",u,"w:",w,"r:",r,"g:",g,"sinr:",sinr)
# B=10**7 #bandwith
# QAM=4 #QAM Order
# mu=np.array([B*np.log2(1+sinr[0,:2].min()),B*np.log2(1+sinr[1,:2].min())])/np.log2(QAM) # Blog2(1+SINR) [bps] / (bit/packet)
# lmbda = np.array([5*10**6,5*10**6],dtype=float) # Arrival rate
# print(mu)
# costs = (0.5*l1_packet/lmbda[0]+0.5*l2_packet/lmbda[1])*10**6 # delay [us]
# if sinr.min() == 0:costs=100000
# else: costs = sinr.min()**(-1)
costs = 10**(-sinr.min())
# rnd = np.random.rand()
# newl1_packet = (l1_packet+1)*np.bool(rnd<=lmbda[0]/(lmbda[0]+mu[0]))+(l1_packet-1)*np.bool(rnd>lmbda[0]/(lmbda[0]+mu[0]))
# newl2_packet = (l2_packet+1)*np.bool(rnd<=lmbda[1]/(lmbda[1]+mu[1]))+(l2_packet-1)*np.bool(rnd>lmbda[1]/(lmbda[1]+mu[1]))
# if newl1_packet>=self.max_length:newl1_packet=self.max_length
# elif newl1_packet<=0:newl1_packet=0
# if newl2_packet>=self.max_length:newl2_packet=self.max_length
# elif newl2_packet<=0:newl2_packet=0
# self.state = np.array([newl1_packet, newl2_packet])
self.state = self.np_random.randn(2*self.Nt*self.K_User*self.M_Group)
return self.state, -costs, False, {}
###########################pass in the Nt number##################
def reset(self):
self.state = self.np_random.randn(2*self.Nt*self.K_User*self.M_Group)# Initial length
self.last_u = None
return self.state
def close(self):
if self.viewer: self.viewer.close()
| [
"zhangxr.wspn@gmail.com"
] | zhangxr.wspn@gmail.com |
2ab6da6665b221d173fcea2342ba2b33a443534a | 157e53c0ec95a8e01b12fdf3fd085a8939a53384 | /pruebas5.py | d43828f4821c83eaad1f7bd048683c327633c6c8 | [] | no_license | GersonJor/codigos-python | c2ab0916774da76d1c1f707b26c4360ef960b712 | 48920b66850c728372ff43d73d5f66915f656891 | refs/heads/main | 2023-05-02T13:38:10.849861 | 2021-05-24T05:26:09 | 2021-05-24T05:26:09 | 370,234,212 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | def fun(n):
return lambda a:a*n
def run():
dobler=fun(2)
print(dobler(11))
if __name__=='__main__':
run() | [
"74192534+GersonJor@users.noreply.github.com"
] | 74192534+GersonJor@users.noreply.github.com |
ab8e0228a51da210a55b6cbcb2ca25bc6d5d24ae | 5565b5340983fa4a6a6c6a35d427128542421aed | /filexfergui.py | 904e73edafe64ac0e48e31e702eb846a5fbec67c | [] | no_license | raineGriffin/Tech-Academy-Work | 1516d06efe24403c25ac8725d4aedffcdda47c65 | 0b901b1625693b3dfd66e8ac007fff3e13a40e55 | refs/heads/master | 2020-05-29T09:16:52.816601 | 2016-09-28T18:05:10 | 2016-09-28T18:05:10 | 69,042,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,846 | py | from Tkinter import *
from ttk import *
import filexfer
import tkFileDialog
import sqlite3
class Transfertool:
def reader(self):
for row in self.c.execute(self.lastDate): self.strdate.set("Last check was on " + str(row))
def __init__(self, root):
self.conn = sqlite3.connect('xfertimes.db')
self.c = self.conn.cursor()
root.resizable(False, False)
self.c.execute("CREATE TABLE IF NOT EXISTS cTimes(ID INT, date TEXT, checktime REAL)")
self.lastDate = "SELECT date FROM cTimes GROUP BY date HAVING MAX(checktime)"
self.strdate = StringVar()
self.reader()
self.frame1 = Frame(root, width = 256, height = 128, padding = 10)
self.frame2 = Frame(root, width = 256, height = 128, padding = 10)
self.frame1.grid(row = 1, column = 0, rowspan = 2, columnspan = 2)
self.frame2.grid(row = 4, column = 0, rowspan = 2, columnspan = 2)
label_header = Label(root, text = "File Transfer Tool").grid(row=0,column=0,columnspan=2)
label_from = Label(self.frame1, text = "Choose the folder to be scanned for movable files.")
label_from.grid(row=1,column=0, columnspan=2)
self.filestring1 = StringVar()
self.filloc1 = Entry(self.frame1, width = 50, textvariable = self.filestring1)
self.filloc1.grid(row = 2, column = 0, columnspan = 2)
browse1 = Button(self.frame1, text = "Browse", command = lambda: self.Openfilepath(self.filloc1))
browse1.grid(row = 3, column = 0,pady=5, columnspan=2)
label_to = Label(self.frame2, text = "Choose the folder that the files are to go to.")
label_to.grid(row=4,column=0,pady=5, columnspan=2)
self.filestring2 = StringVar()
self.filloc2 = Entry(self.frame2, width = 50, textvariable = self.filestring2)
self.filloc2.grid(row = 5, column = 0, columnspan = 2)
browse2 = Button(self.frame2, text = "Browse", command = lambda: self.Openfilepath(self.filloc2))
browse2.grid(row =6, column = 0,pady=5, columnspan=2)
self.lastCheck = Label(root, textvariable = self.strdate)
self.lastCheck.grid(row = 9, column = 0, pady=5, columnspan=2)
commit = Button(root, text = "Commit Transfer", command = self.Commit)
commit.grid(row=10, column=0, columnspan=2, pady=15)
def Openfilepath(self,filloc):
self.path = tkFileDialog.askdirectory()
filloc.delete(0, END)
filloc.insert(0, self.path)
def Commit(self):
filexfer.checkMTime(self.filloc1.get(),self.filloc2.get(),self.conn)
self.reader()
def main():
root = Tk()
transfertool = Transfertool(root)
root.mainloop()
if __name__ == "__main__": main()
| [
"noreply@github.com"
] | raineGriffin.noreply@github.com |
716cc2c81ec577e777a6a3cfc47ba680a6cadfc7 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_plectrums.py | 0d3dda010f944bbbef6409f78aeac191753a0607 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py |
from xai.brain.wordbase.nouns._plectrum import _PLECTRUM
#calss header
class _PLECTRUMS(_PLECTRUM, ):
def __init__(self,):
_PLECTRUM.__init__(self)
self.name = "PLECTRUMS"
self.specie = 'nouns'
self.basic = "plectrum"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
6bc50086ac2db0c3d2baf48708d73ad07aea0aed | ac76b08c0fe545a23e78acdb42b3273b24ccb544 | /tweets/migrations/0001_initial.py | 21b7fda7cf4b10e2882cd069f8fbae7df20bc6f3 | [] | no_license | HarshPandita/tweetLikef | 09f40d90dbc7c45a4b0bb620d51b6ecf29752d10 | 7e405a1f576fbd1c7d7b50589cad67d9fa596471 | refs/heads/master | 2023-02-07T15:53:52.086067 | 2020-12-26T19:17:25 | 2020-12-26T19:17:25 | 324,621,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | # Generated by Django 3.0.5 on 2020-12-11 12:26
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Tweet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(blank=True, null=True)),
('image', models.FileField(blank=True, null=True, upload_to='images/')),
],
),
]
| [
"harsh.gp2000@gmail.com"
] | harsh.gp2000@gmail.com |
7815604a4051af01935361e7b7859ccd85e3e71b | ea393959886a5cd13da4539d634f2ca0bbcd06a2 | /283.py | b2b4f2cad4536764cd733094eaf98757b705c7b1 | [] | no_license | zhangchizju2012/LeetCode | f605f35b82f16282559af71e4e61ec2629a90ebc | 0c4c38849309124121b03cc0b4bf39071b5d1c8c | refs/heads/master | 2020-04-05T12:12:14.810639 | 2018-08-09T10:24:52 | 2018-08-09T10:24:52 | 81,021,830 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 14 00:24:22 2017
@author: zhangchi
"""
class Solution(object):
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
indexList = []
for index, item in enumerate(nums):
if item == 0:
indexList.append(index)
indexList.append(len(nums)) # 相当于最后也有个0,配合一下后面的处理
count = 0
for i in xrange(len(indexList)-1):
nums[indexList[i]-count:indexList[i+1]-count-1] = nums[indexList[i]+1:indexList[i+1]]
count += 1 #每次往后挪动一次,相当于每次有个0的位置被空出来了,所以前面要减掉count,且count每次加一
for i in xrange(indexList[-1]-count,len(nums)):
nums[i] = 0
#return nums
s = Solution()
print s.moveZeroes([]) | [
"zhangchizju2012@zju.edu.cn"
] | zhangchizju2012@zju.edu.cn |
c2891505a4a211beadb3847ea5dcf959546934c2 | 2f4f412201626e9d1e64fd3d3f6c93f077bcc94b | /PacketSniffer/mail_sniffer.py | ec2eec1c93d7528c00bf83906f8ee048cb6c9d8b | [] | no_license | OtsukaTomoaki/HackSecurityPython | 8de0d7264652a31d822a1b9a7e2f01923c5a5579 | 2814d5ddd7592ae473b10d932112bde10c346af0 | refs/heads/main | 2023-05-02T07:20:38.787317 | 2021-03-21T06:03:07 | 2021-03-21T06:03:07 | 341,581,927 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | from scapy.all import *
#パケット処理用コールバック関数
def packet_callback(packet):
if packet[TCP].payload:
mail_packet = str(packet[TCP].payload)
if 'user' in mail_packet.lower() or 'pass' in mail_packet.lower():
print(f'[*]Server: {packet[IP].dst}')
print(f'[*]{packet[TCP].payload}')
#print(packet.show())
#スニッファーを起動
sniff(prn=packet_callback, count=1) | [
"ootsuka.ootsuka.ootsuka.26@gmail.com"
] | ootsuka.ootsuka.ootsuka.26@gmail.com |
02055222a2d2da4ddc31f27d0224086ca163fd89 | fdc1d6a47cba0cd7ef44a344eb04a7e0c7013d95 | /manage.py | 7fba6490425fbc4b8cfe97107bd07dd8a25a2dff | [] | no_license | VascoMonteiroNeto/WebApp_Music_Room | 05e8ecc3f7ea05c7c4ab61125102661772aa00cf | 274a83d08e869b960cadca45a6f89a5db41b7fa0 | refs/heads/main | 2023-08-29T23:30:50.363536 | 2021-10-27T04:33:55 | 2021-10-27T04:33:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'music_room.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"vascomonteironeto@gmail.com"
] | vascomonteironeto@gmail.com |
6cad723431e386e4106f0a12faf151bc4287c355 | 8356d48b650049c058fdc2161982088e5fe37c77 | /Spark-Example-Word-Count/WordCountAll.py | 29987c584453d5b7e9a35bd260f986fab714fc04 | [
"BSD-3-Clause"
] | permissive | AvantikaDG/MET-CS777 | 810dfa5dbd55cf8eec8fe829776817d2bed0cfd0 | 6dd20ffaa2fa55f08671f07565ed975ce947055c | refs/heads/master | 2021-06-18T04:37:43.519580 | 2021-05-25T17:20:49 | 2021-05-25T17:20:49 | 209,424,351 | 0 | 0 | BSD-3-Clause | 2019-09-18T23:55:51 | 2019-09-18T23:55:51 | null | UTF-8 | Python | false | false | 480 | py | from __future__ import print_function
import sys
from operator import add
from pyspark import SparkContext
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: wordcount <file> <output> ", file=sys.stderr)
exit(-1)
sc = SparkContext(appName="PythonWordCount")
lines = sc.textFile(sys.argv[1])
counts = lines.flatMap(lambda x: x.split(' ')).map(lambda x: (x, 1)).reduceByKey(add)
counts.saveAsTextFile(sys.argv[2])
sc.stop() | [
"kiat@bu.edy"
] | kiat@bu.edy |
900bbc907bb10a759b672147517f8448c7ef5e21 | ef54d37f8a3303013ca7469871a320d303957ed7 | /robo4.2/fusion/tests/wpst_crm/feature_tests/C7000/Supershaw_TAA_FA_DA/validate.py | d1b1f709416576fdb725e7dd9fe4c24c42439338 | [] | no_license | richa92/Jenkin_Regression_Testing | d18badfcf16bda682dfe7bcbbd66f54a9a27a58d | 24a74926170cbdfafa47e972644e2fe5b627d8ff | refs/heads/master | 2020-07-12T10:01:59.099137 | 2019-08-27T12:14:53 | 2019-08-27T12:14:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py |
'''
This module contains the code to get the IP's of the
ethernet networks. Using the IP's it can login to the
server and execute the diskspd commands to start the traffic.
Diskspd results or ouput will be redirected to the log file
'''
import paramiko
import os
import time
import re
import threading
import Queue
def execute_diskspd(ip, username, passwd, diskspd_cmd):
'''
Execute the diskSPD tool Command
'''
try:
single_cmd = "psexec \\\\" + ip + " -u " + username + " -p " + passwd + " " +\
diskspd_cmd
output = os.system(single_cmd)
return (output)
except Exception as e:
return (e)
def validate_windows_lun_count(ip, username, passwd, diskspd_cmd):
output = execute_diskspd(ip,
username, passwd, diskspd_cmd)
with open("C:\\WINDOWSLUN.txt") as f:
lines = f.readlines()
print lines
count = 0
for i in lines:
if "3PARdata" in i:
count = count + 1
print count
return count
| [
"akul@SAC0MKUVCQ.asiapacific.hpqcorp.net"
] | akul@SAC0MKUVCQ.asiapacific.hpqcorp.net |
c0434733549778bcecc5885783042c73ee14eacc | 3715424dac30fb81a73381bad36d2195f68fe630 | /code_kata/kata02/kata02.py | bd4e3cb3c361ea657a57c03bda5f69946b8fe991 | [] | no_license | digorithm/coding_practice | 788ce70b4dc90f1a07a01a06777d099ddc742824 | e7689c938161d7fc3ed55feefa1aecedccc9e65a | refs/heads/master | 2016-08-05T20:57:42.470956 | 2016-01-13T00:29:55 | 2016-01-13T00:29:55 | 38,053,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,485 | py | """Write a binary chop method that takes an integer search target and a sorted array of integers. It should return the integer index of the target in the array, or -1 if the target is not in the array. The signature will logically be:
chop(int, array_of_int) -> int
You can assume that the array has less than 100,000 elements. For the purposes of this Kata, time and memory performance are not issues (assuming the chop terminates before you get bored and kill it, and that you have enough RAM to run it)."""
# for profiling line by line, on terminal:
# kernprof -l -v kata02.py
# remove comments on @profile
@profile
def chop(l, value):
low = 0
high = len(l)-1
while low <= high:
mid = (low+high)//2
if l[mid] > value:
high = mid-1
elif l[mid] < value:
low = mid+1
else:
return mid
return -1
@profile
def recursive_chop(l, value, low = 0, high = -1):
if not l:
return -1
if(high == -1):
high = len(l)-1
if low == high:
if l[low] == value:
return low
else:
return -1
mid = (low+high)//2
if l[mid] > value:
return recursive_chop(l, value, low, mid-1)
elif l[mid] < value:
return recursive_chop(l, value, mid+1, high)
else:
return mid
if __name__ == '__main__':
l = [x for x in xrange(10000000)]
value = 6000
print recursive_chop(l,value)
print chop(l, value)
| [
"rodrigo.araujo@jusbrasil.com.br"
] | rodrigo.araujo@jusbrasil.com.br |
b39de8ac76a254b4e8291b224d42dd1a005694ed | 906e04f77ab90b61238b016590c89e42adb202f2 | /Clases/calibration.py | 98f4367ce5a44fe7872b44cdc20e0509ddc18422 | [] | no_license | Kolark/ProyectoFinalVisionArtificial | 9db6ac757f32590b459533bd89edccb357db78d9 | 6b59105fa799993728636afb54309db5cf97f486 | refs/heads/main | 2023-05-03T23:32:49.427765 | 2021-05-25T20:17:56 | 2021-05-25T20:17:56 | 369,999,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,661 | py | # import cv2
# import numpy as np
# fotograma = cv2.imread("testimage2.png")
# # captura = cv2.VideoCapture(0)
# while(True):
# # disponible, fotograma = captura.read()
# height,width,channels = fotograma.shape
# # if disponible == True:
# if True:
# # cv2.rectangle(fotograma,(0,0),(height,width),(0,255,0),20)
# # r = cv2.selectROI(im)
# cut = fotograma[0:500,0:500]
# hsv = cv2.cvtColor (cut, cv2.COLOR_BGR2HSV)
# h, s, v = cv2.split (hsv)
# mean1 = h.mean()
# mean2 = s.mean()
# mean3 = v.mean()
# stdevm1 = np.std(h)
# print("h " + str(mean1) + " - stdev: " + str(stdevm1))
# print("s" + str(mean2))
# print("v " + str(mean3))
# cv2.imshow("Segmentado",cut)
# cv2.imshow("Segmentado3",fotograma)
# ch = 0xFF & cv2.waitKey(1)
# if ch == ord('q'):
# break
# cv2.destroyAllWindows()
import cv2
import numpy as np
class CalibrationClass:
@staticmethod
def Calibrate(ROI):
hsv = cv2.cvtColor (ROI, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split (hsv)
HueMean = h.mean()
SatMean = s.mean()
ValMean = v.mean()
HueSTD = np.std(h)
SatSTD = np.std(s)
ValSTD = np.std(v)
HueMIN = (HueMean-HueSTD*3) % 179
SatMIN = np.clip(SatMean-SatSTD*5,0,255)
ValMIN = np.clip(ValMean-ValSTD*5,0,255)
HueMAX = (HueMean+HueSTD*3) % 179
SatMAX = np.clip(SatMean+SatSTD*5,0,255)
ValMAX = np.clip(ValMean+ValSTD*5,0,255)
return np.array((HueMIN,SatMIN,ValMIN)),np.array((HueMAX,SatMAX,ValMAX))
| [
"47009873+Kolark@users.noreply.github.com"
] | 47009873+Kolark@users.noreply.github.com |
e83a1bb49aaba6e0a6d02b5b2b54b2d0c40de627 | fc375ac455be07c99c11665f8844faed4729b853 | /survey.py | d9c507cf96a9e396e82322ce8a8f8b3b0203e555 | [] | no_license | IAmSherbet/python-project | 9af9d7c35e215e66c76b9492dd9b6f391a70d8bd | db97e715a17351d82d1e3285641868e20818d68e | refs/heads/master | 2021-07-15T18:51:25.631558 | 2017-10-23T00:48:01 | 2017-10-23T00:48:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy import create_engine
Base = declarative_base()
class Question(Base):
_tablename_ = 'QUESTION'
questionId = Column(Integer, primary_key=True)
title = Column(String)
class Survey(object):
def create_table(self):
engine = create_engine('sqlite:///surveys.db')
Base.metadata.create_all(engine)
def insert_question(self, id, question):
engine = create_engine('sqlite:///surveys.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
newQuestion = Question(id=id, question=question)
session.add(newQuestion)
session.commit()
session.close()
survey = Survey()
try:
survey.create_table()
except:
print("Survey already there.")
#Insert records into the table
#survey.insert_question('003','What is your name?')
#Search the tables in the database
#library.search_quesiton('Agile Design')
| [
"sbajracharya@trioxis.com"
] | sbajracharya@trioxis.com |
7d0fa9b4b4f4b3082220c3ee9b07b146fdbbd204 | 9cbd088a0f7288acee3c1d736ef85e516b86d8fe | /twitter_tools.py | f3b7643e42816d3d937696a696eca0c0ddfeb875 | [] | no_license | fjccoin/twitbots | 91ba75a8123c9c21cf20d3e235075f5e7b0ebd5d | 513a6df705034aeb61b0d7ea2fccfe6c722160d9 | refs/heads/master | 2020-04-08T02:42:31.602419 | 2015-11-12T08:38:01 | 2015-11-12T08:38:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,719 | py | import nltk
from collections import OrderedDict, defaultdict
import re
import requests
from bs4 import BeautifulSoup
from urlparse import urlparse
SLEEP_COMMAND = ' go to sleep'
WAKE_COMMAND = ' wake up'
QUIET_COMMAND = ' no reply'
LOUD_COMMAND = ' reply on'
ADMIN_ID = 21455761
def filter_tweet(tweet, userid, botname, friends=None):
skip = False
sleep = False
wake = False
debug = False
end_debug = False
# filter RTs
if tweet.get('retweet_count') > 0:
skip = True
# only reply to target user
sender = None
""" tweets to reply to:
if sender is owner and not a reply
if sender if owner's friend and mentions my name
"""
try:
sender = tweet.get('user').get('id')
if sender not in [userid, ADMIN_ID] + friends:
skip = True
except:
sender = None
skip = True
t = tweet.get('text')
if not t:
skip = True
else:
t = t.lower()
if t[:3] == "rt ":
skip = True
if sender in [userid, ADMIN_ID]:
if SLEEP_COMMAND in t:
sleep = True
elif WAKE_COMMAND in t:
wake = True
if QUIET_COMMAND in t:
debug = True
elif LOUD_COMMAND in t:
end_debug = True
if tweet.get('in_reply_to_status_id') and botname not in t:
skip = True
if t[0] == "@" and botname not in t:
skip = True
elif botname not in t:
skip = True
elif tweet.get('in_reply_to_status_id'):
skip = True
return skip, sleep, wake, debug, end_debug
def word_count(sentence, words):
s = nltk.word_tokenize(sentence)
return len(set(s) & set(words))
def ok_tweet(c, minlen, maxlen):
if c.endswith(':') or c.endswith(','):
return False
if len(c) > maxlen or len(c) < minlen:
return False
else:
return True
GARBAGE = [",", "--", "\'s", ".", "``","n\'t","\'\'",")","(","%","!","\'","?","percent",":"]
# semantic tools
def remove_stopwords(documents, sents=False):
texts = []
for d in documents:
if sents:
doc = d #d[0]+d[1]
else:
doc = documents[d]
doc = clean_str(doc)
tokens = nltk.word_tokenize(doc.lower())
tokens = [t for t in tokens if t not in nltk.corpus.stopwords.words('english')]
tokens = [t for t in tokens if t not in GARBAGE]
texts.append(tokens)
return texts
def clean_str(text):
# remove words that start with @
# remove urls
y = " ".join(filter(lambda x:(x[0]!='@' and x[:4]!='http'), text.split()))
return re.sub('[#$*|]', '', y)
def remove_infreq(inputs, minfreq):
frequency = defaultdict(int)
for text in inputs:
for token in text:
frequency[token] += 1
texts = [[token for token in text if frequency[token] > minfreq]
for text in inputs]
return texts
NEWS_DOMAINS = "thenewyorktimes moneybeat"
""" deal with urls in tweets """
def pull_headlines(tweet):
ent = tweet.get('entities')
urls = ent.get('urls')
t = ""
if urls:
for u in urls:
try:
url = u.get('expanded_url')
r = requests.get(url)
headlines = BeautifulSoup(r.content).find('title')
if not headlines:
headlines = BeautifulSoup(r.content).find('h1')
# remove domain
domain = '{uri.netloc}'.format(uri=urlparse(url)) + NEWS_DOMAINS
hwords = [h for h in headlines.getText().split() if h.lower() not in domain]
t = "%s %s" % (t,' '.join(hwords))
except:
continue
# also pull quoted tweets
if tweet.get('is_quote_status'):
try:
quote = tweet.get('quoted_status').get('text')
except:
quote = ''
t+=quote
return t
""" break and chunk tweets """
def send_tweet(api, tweet, id_orig=None, username=None):
twit = api.request('statuses/update', {'status': username + tweet, 'in_reply_to_status_id': id_orig})
# if too long, break it up
r = twit.response.json()
if username:
maxlen = 139-len(username)
else:
maxlen = 139
if r.get('errors'):
tweets = break_tweet(tweet, maxlen)
id_str = id_orig
for rt in tweets:
t = api.request('statuses/update', {'status': username + rt, 'in_reply_to_status_id': id_str})
rt_resp = t.response.json()
if rt_resp.get('errors'):
continue
else:
id_str = rt_resp.get('id_str')
def chunks(l, n):
"""Yield successive n-sized chunks from l.
Chunks prioritize commas. after that, spaces
"""
q = []
total = 0
remainder = l
while len(remainder) > 0:
if len(remainder) <= n:
q.append(remainder[:idx])
break
x = remainder[:n]
idx = x.rfind(',')
if idx > 0:
if idx > 50:
q.append(remainder[:idx+1])
remainder = remainder[idx+1:]
continue
idx = x.rfind(' ')
q.append(remainder[:idx])
remainder = remainder[idx+1:]
#for i in xrange(0, len(l), n):
# yield l[i:i+n]
return q
def break_tweet(tweet, n):
# first break into sentences
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
rtweets = sent_detector.tokenize(tweet.strip())
for idx, rt in enumerate(rtweets):
if len(rt) > n:
clauses = rt.split('\n')
for cdx, c in enumerate(clauses):
d = '?'
commas = [e+d for e in c.split(d) if e != '']
commas[-1] = commas[-1][:-1]
clauses[cdx:cdx+len(commas)] = commas
rtweets[idx:idx+len(clauses)] = clauses
for idx, rt in enumerate(rtweets):
if len(rt) > n:
chunkt = chunks(rt, n)
rtweets[idx:idx+len(chunkt)] = chunkt
return rtweets
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
def create_tweet(text, username):
""" create a tweet from mult long sentences
This process will vary by user.
"""
# up to 2 tweets
#maxlen = 263-2*len(username)
maxlen = 139-len(username)
for t in text:
if ok_tweet(t, 40, maxlen):
return t
# go through again and break them up
else:
sents = sent_detector.tokenize(t)
for s in sents:
if ok_tweet(s, 40, maxlen):
return s
return None | [
"elaine.ou@gmail.com"
] | elaine.ou@gmail.com |
16f5f3e683d884969d7b2a96646d43ae6d346d91 | 91b2fb1fb6df216f2e365c3366bab66a567fc70d | /Week06/每日一题/857. 雇佣 K 名工人的最低成本.py | a439d49b3ad77a70ab1c5a3a7846aa901ac77d1d | [] | no_license | hrz123/algorithm010 | d17aee642f03f607a7984beb099eec18f2de1c8e | 817911d4282d2e226518b3533dff28282a91b3d4 | refs/heads/master | 2022-12-20T14:09:26.365781 | 2020-10-11T04:15:57 | 2020-10-11T04:15:57 | 270,178,423 | 1 | 0 | null | 2020-06-07T03:21:09 | 2020-06-07T03:21:09 | null | UTF-8 | Python | false | false | 5,322 | py | # 857. 雇佣 K 名工人的最低成本.py
import heapq
from typing import List
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
v = list(zip(quality, wage))
v.sort(key=lambda t: t[1] / t[0])
priority_queue = []
ans = float('inf')
total = 0
for q, w in v:
total += q
heapq.heappush(priority_queue, -q)
if len(priority_queue) > K:
total += heapq.heappop(priority_queue)
if len(priority_queue) == K:
ans = min(ans, total * w / q)
return ans
# 给工资的钱取决于两点,与最大的工资质量比成正比,这些人的质量总和成正比
# 我们要同时减小这两个元素
# 我们沿着工资质量比,和这些人总体的质量这条曲线的边界,找最小值
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
v = list(zip(quality, wage))
v.sort(key=lambda e: e[1] / e[0])
heap = []
res = float('inf')
_sum_q = 0
for q, w in v:
_sum_q += q
heapq.heappush(heap, -q)
if len(heap) == K:
res = min(res, _sum_q * w / q)
_sum_q += heapq.heappop(heap)
return res
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
zv = list(zip(quality, wage))
zv.sort(key=lambda x: x[1] / x[0])
heap = []
res = float('inf')
q_sum = 0
for q, w in zv:
q_sum += q
heapq.heappush(heap, -q)
if len(heap) == K:
res = min(res, q_sum * w / q)
q_sum += heapq.heappop(heap)
return res
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
zv = list(zip(quality, wage))
zv.sort(key=lambda x: x[1] / x[0])
heap = []
q_sum = 0
res = float('inf')
for q, w in zv:
q_sum += q
heapq.heappush(heap, -q)
if len(heap) == K:
res = min(res, q_sum * w / q)
q_sum += heapq.heappop(heap)
return res
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
zv = list(zip(quality, wage))
zv.sort(key=lambda x: x[1] / x[0])
heap = []
res = float('inf')
q_sum = 0
for q, w in zv:
q_sum += q
heapq.heappush(heap, -q)
if len(heap) == K:
res = min(res, q_sum * w / q)
q_sum += heapq.heappop(heap)
return res
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
zv = list(zip(wage, quality))
zv.sort(key=lambda x: x[0] / x[1])
heap = []
res = float('inf')
qs = 0
for w, q in zv:
qs += q
heapq.heappush(heap, -q)
if len(heap) == K:
res = min(res, w / q * qs)
qp = -heapq.heappop(heap)
qs -= qp
return res
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
zv = list(zip(wage, quality))
zv.sort(key=lambda x: x[0] / x[1])
heap = []
res = float('inf')
qs = 0
for w, q in zv:
qs += q
heapq.heappush(heap, -q)
if len(heap) == K:
res = min(res, w / q * qs)
qp = -heapq.heappop(heap)
qs -= qp
return res
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
zv = [*zip(quality, wage)]
zv.sort(key=lambda x: x[1] / x[0])
heap = []
q_sum = 0
res = float('inf')
for q, w in zv:
heapq.heappush(heap, -q)
q_sum += q
if len(heap) == K:
res = min(res, q_sum * w / q)
q_sum += heapq.heappop(heap)
return res
class Solution:
def mincostToHireWorkers(self, quality: List[int], wage: List[int],
K: int) -> float:
zv = [*zip(quality, wage)]
zv.sort(key=lambda x: x[1] / x[0])
q_sum = 0
heap = []
res = float('inf')
for q, w in zv:
q_sum += q
heapq.heappush(heap, -q)
if len(heap) == K:
res = min(res, q_sum * w / q)
q_sum += heapq.heappop(heap)
return res
def main():
sol = Solution()
quality = [10, 20, 5]
wage = [70, 50, 30]
K = 2
res = sol.mincostToHireWorkers(quality, wage, K)
print(res)
quality = [3, 1, 10, 10, 1]
wage = [4, 8, 2, 2, 7]
K = 3
res = sol.mincostToHireWorkers(quality, wage, K)
print(res)
if __name__ == '__main__':
main()
| [
"2403076194@qq.com"
] | 2403076194@qq.com |
fe3dab0ec46f2d9d468c1bfb23131d74b61c7c20 | 4985a202523466fdd691db65f1ac89599103fb71 | /projects/hackathon_2021/api/hack_api/models/base.py | bf055dcd494726d0b4fd6c6776301e21919801d3 | [] | no_license | nikeethr/data_engineering | be949d6c9f94863b33a33422e690077d260cde02 | 2b3e06ffa39ab4a9a3b5ecabc3aeeb21f9fa9fbd | refs/heads/master | 2023-06-09T15:34:11.120882 | 2023-05-28T09:32:07 | 2023-05-28T09:32:07 | 239,729,052 | 0 | 0 | null | 2021-03-22T09:36:13 | 2020-02-11T09:58:07 | Python | UTF-8 | Python | false | false | 80 | py | # define mongo db here:
from flask_pymongo import PyMongo
mongo_db = PyMongo()
| [
"nikeeth.ramanathan@gmail.com"
] | nikeeth.ramanathan@gmail.com |
d751e9b7d38adf03bc3309f3b6b37f91dd074ff4 | 3ddbc9d43da6f51c3c4f82e7811253985b133185 | /3.py | da5166ec5f74234b4cb7ad8a4ea2918f976b2daf | [
"MIT"
] | permissive | kavinduaj12/High-School-ICT | 814ed5881bb26b1bfdf27c8d7d1d15849d728def | 7d011c2f31010ac9ef992f1257b87874c173956a | refs/heads/main | 2023-08-31T12:10:37.949630 | 2021-10-28T01:54:54 | 2021-10-28T01:54:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | from turtle import*
# Output 1
color("gray")
dot(220,"pink")
begin_fill()
for i in range(12):
right(30)
for i in range(8):
forward(40)
right(45)
color("purple")
end_fill()
# Output 2
color("blue")
dot(220,"pink")
begin_fill()
for i in range(12):
right(30)
for i in range(8):
forward(40)
right(45)
color("black")
end_fill()
| [
"noreply@github.com"
] | kavinduaj12.noreply@github.com |
f8627c33660ec23c33b0b79825a2d68e8cebf6c9 | d69b868f7ffebd3b005ccf9fb5dca10d9e1035cb | /model.py | 2f8ecc92685b802995a5eb73b3c27757ac0ea77b | [] | no_license | srmorgan1/DRLND_P2_Continuous-Control | a99ea4ba6dfe79dc112b49ea5f3b5e91582bc3d5 | ada4d69b02bebf135c3a9167429cc7f52ec0f17e | refs/heads/master | 2020-04-19T13:16:21.882630 | 2019-02-06T21:37:28 | 2019-02-06T21:37:28 | 168,213,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,693 | py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return -lim, lim
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=256, fc2_units=128):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
return F.tanh(self.fc3(x))
class Critic(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, fcs1_units=256, fc2_units=128):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fcs1_units (int): Number of nodes in the first hidden layer
fc2_units (int): Number of nodes in the second hidden layer
"""
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
self.fcs1 = nn.Linear(state_size, fcs1_units)
self.fc2 = nn.Linear(fcs1_units+action_size, fc2_units)
self.fc3 = nn.Linear(fc2_units, 1)
self.reset_parameters()
def reset_parameters(self):
self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
xs = F.relu(self.fcs1(state))
x = torch.cat((xs, action), dim=1)
x = F.relu(self.fc2(x))
return self.fc3(x)
| [
"srmorgan1@gmail.com"
] | srmorgan1@gmail.com |
80fdc39e33b15f159f03bb4a86900a93b049a811 | f22219d709b6837f55fb101f4bfaf38e14998c3d | /venv/Scripts/easy_install-script.py | 7828271d108bf712747709737402fd3d21f27a1f | [] | no_license | sauravraj1/hostel | 93b96b945223d40e286712cfe2d0fa260c2f1c3e | 9455f651350dd57f61e6cad13ca477c59cf16da9 | refs/heads/master | 2022-02-25T09:27:49.494727 | 2019-10-02T09:12:02 | 2019-10-02T09:12:02 | 212,297,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | #!C:\Users\saura\PycharmProjects\hostelallotment\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"50795506+sauravraj1@users.noreply.github.com"
] | 50795506+sauravraj1@users.noreply.github.com |
1e722c8b3d71456db9c90dd4ee5c9bde1a02f8c7 | 7dfb5942ae4721b7084bde958d632dd90096328a | /function_generator/error_models.py | b75cd3ae00e6daae1112f1a68f604e5b2ace591f | [
"Apache-2.0"
] | permissive | blackwer/function_generator | f392ae0041f56d235a959ce3e54c1f865baf3cca | 91025e67a2c64009f0384ee35466bb04f0819fce | refs/heads/master | 2021-11-26T03:20:49.104389 | 2021-11-10T14:23:34 | 2021-11-10T14:23:34 | 219,051,758 | 9 | 2 | Apache-2.0 | 2021-11-09T13:34:12 | 2019-11-01T19:42:48 | C++ | UTF-8 | Python | false | false | 291 | py | import numpy as np
def standard_error_model(coefs, f):
return np.abs(coefs[-2:]).max()/max(1, np.abs(coefs[0]))
def relative_error_model(coefs, f):
return np.abs(coefs[-2:]).max()/np.abs(coefs[0])
def new_error_model(coefs, f):
return np.abs(coefs[-2:]).max()/np.abs(f).min()
| [
"dstein@flatironinstitute.org"
] | dstein@flatironinstitute.org |
52a4d27d2d45abfa176ad4c8edd1e8e1b6e7298c | 1b126876948b3d05f89e058d4642405f192fb858 | /src/strava_api/Client.py | ff70413ffc370f22346a23c172543126be8f72e8 | [
"MIT"
] | permissive | yknot/strava_api | 6ecc972132156432cdc4e19ffe23fd5045fa765a | b31080b8718a6c26399cfc7c36b77f36a2bed1d3 | refs/heads/master | 2023-05-25T04:51:02.822053 | 2020-07-18T04:44:35 | 2020-07-18T04:44:35 | 279,205,963 | 0 | 0 | MIT | 2023-05-23T00:04:21 | 2020-07-13T04:01:33 | Python | UTF-8 | Python | false | false | 1,091 | py | """Main module."""
import requests
from .Athlete import Athlete
class Client:
"""Class to manage your Strava API Client"""
def __init__(
self, client_id: str, client_secret: str, auth_token: str, refresh_token: str
) -> None:
"""initialize client with application attributes"""
self.client_id = client_id
self.client_secret = client_secret
self.auth_token = auth_token
self.refresh_token = refresh_token
# create variables
self.athlete = None
def set_athlete(self, auth_code: str) -> None:
try:
response = requests.post(
url="https://www.strava.com/oauth/token",
params={
"client_id": self.client_id,
"client_secret": self.client_secret,
"code": auth_code,
"grant_type": "authorization_code",
},
)
self.athlete = Athlete(response.json())
except requests.exceptions.RequestException:
print("HTTP Request failed")
| [
"a.yale9@gmail.com"
] | a.yale9@gmail.com |
8b9668570559776e162f73e7e1af576424f0f3af | 57d84d2046d8f39cfc8552424c6df07779a723ea | /2_DateStructure/7.2.py | 64826ad831de136629725607c2ef85c2f662c9ce | [] | no_license | gaomc66/Py4e | 21457d19912d1981f3e528e581c63c2cd52ca0eb | bfaaf0d2b3b4e68bd84780a225d05accad9a9160 | refs/heads/master | 2021-04-03T08:37:04.023215 | 2018-03-10T15:13:00 | 2018-03-10T15:13:04 | 124,666,202 | 0 | 0 | null | 2018-03-10T17:49:14 | 2018-03-10T15:00:54 | Python | UTF-8 | Python | false | false | 347 | py | fname = input("Enter file name: ")
fh = open(fname)
count = 0
sum = 0
for line in fh:
if line.startswith("X-DSPAM-Confidence:"):
index = line.find(":")
line = line.rstrip()
value = line[index+1:]
fval = float(value)
sum = sum + fval
count = count + 1
print("Average spam confidence:"sum/count)
| [
"gaomc@MengchenGao.lan"
] | gaomc@MengchenGao.lan |
1bf262face9118bd16196e2038b52111eba67778 | 410113ecc55fdaefc4adfd6b9740fa78f188397b | /blue_steganography.py | 610181a86fa0fd2dee282178bcca6b3d367eb4ee | [] | no_license | joncoop/blue-steganography | a25ea4b713b433c6e73bd1d62f68250414262358 | 07acd00c5447d8b9dab5430381217c969cde9787 | refs/heads/master | 2021-12-15T09:07:18.142677 | 2021-12-09T14:43:57 | 2021-12-09T14:43:57 | 122,228,467 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,921 | py | import pygame
from itertools import product
pygame.init()
# config
stop_flag = "[[:stop:]]"
def text_to_binary(text):
'''
Converts ASCII text to a string binary digits. Each character will be
represented as 8 bits.
'''
binary_str = ""
for c in text:
d = ord(c)
b = bin(d)
b = b[2:]
while len(b) < 8:
b = "0" + b
binary_str += b
return binary_str
def binary_to_text(binary_str):
'''
Converts a string binary digits to ASCII text. Each character will be
represented as 8 bits.
'''
result = ""
# separate binary string into 8 bit chunks
chunks = [binary_str[i: i+8] for i in range(0, len(binary_str), 8)]
for c in chunks:
d = int(c, 2)
if d <= 126:
result += str(chr(d))
return result
def hide_message(message_file_path, original_image_path, encoded_image_path):
'''
Hides a secret message inside an image file.
'''
# read message from text file and append stop flag
with open(message_file_path, 'r') as f:
message = f.read()
message += stop_flag
# convert message to binary
binary_str = text_to_binary(message)
# load the original image file as a surface
surf = pygame.image.load(original_image_path)
width = surf.get_width()
height = surf.get_height()
# loop through message and adjust pixels
i = 0
x = 0
y = 0
while i < len(binary_str):
loc = [x, y]
color = surf.get_at(loc)
blue = color.b
bit = int(binary_str[i])
even = blue % 2 == 0
if (even and bit == 1) or (not even and bit == 0):
blue += 1
if blue > 255:
blue -= 2
color.b = blue
surf.set_at(loc, color)
i += 1
x += 1
if x == width:
x = 0
y += 1
# save the new image
pygame.image.save(surf, encoded_image_path)
print("Success! Your secret message was hidden in '" + encoded_image_path + "'.")
def extract_message(encoded_image_path, extracted_message_path):
'''
Extracts a secret message from an image file.
'''
# load image as surface
surf = pygame.image.load(encoded_image_path)
width = surf.get_width()
height = surf.get_height()
# build binary digit string from image
binary_str = ""
for y in range(height):
for x in range(width):
loc = [x, y]
color = surf.get_at(loc)
blue = color.b
binary_str += str(blue % 2)
# convert binary string to text
message = binary_to_text(binary_str)
# truncate any characters after stop flag
end = message.find(stop_flag)
message = message[:end]
# write extracted message to file
with open(extracted_message_path, 'w') as f:
f.write(message)
print("Success! Your secret message was extracted to '" + extracted_message_path + "'.")
| [
"noreply@github.com"
] | joncoop.noreply@github.com |
b33759539b2bc335df52bacedf3a8424c3ec86c0 | c8da3539397dbd49388719fb6d8720db61e859a7 | /catkin_ws/build/hector_slam/hector_geotiff_plugins/catkin_generated/pkg.develspace.context.pc.py | b98873d668c4614483b338c03fe900e4a959193b | [] | no_license | pinkpinkheart/ROS | a465c9e967cd1c71da7648a62d1cc8af342b70df | bd91772e24b72d466a90d2dd65f54be4be49ce99 | refs/heads/master | 2023-03-12T17:55:40.650415 | 2021-03-03T09:20:00 | 2021-03-03T09:20:00 | 344,137,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "hector_geotiff_plugins"
PROJECT_SPACE_DIR = "/home/cy/workspace/ROS/catkin_ws/devel"
PROJECT_VERSION = "0.3.5"
| [
"123456789@qq.com"
] | 123456789@qq.com |
53b1c7d3220b8bc9a71718816c8d902ef0db02b4 | 15693b1346ae9c1e73d2dc94996abbf6dc8ed0d0 | /microblog/commons/logging.py | c1f3495ea27e64e279b046c6ec940b64dced5dc5 | [] | no_license | artem-artiukhov/bym | f1482bb604a0f77a974c4606583de0c44cd06705 | 83ff72c47b406ce7e5c5314109c90221a09e6a8c | refs/heads/master | 2021-01-03T06:42:45.350423 | 2020-02-18T12:35:56 | 2020-02-18T12:35:56 | 239,962,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 968 | py | import logging
from logging.config import dictConfig
from flask.logging import default_handler
from microblog.config import LOG_LEVEL
log = logging.getLogger()
log.addHandler(default_handler)
def setup_logging():
dictConfig({
'version': 1,
'formatters': {
'default': {
'format': '[%(asctime)s.%(msecs)03d] [%(levelname)s] '
'[%(module)s:%(lineno)s] [%(name)s] %(message)s',
'datefmt': '%b/%d/%Y %H:%M:%S'
}},
'handlers': {
'wsgi': {
'class': 'logging.StreamHandler',
'stream': 'ext://flask.logging.wsgi_errors_stream',
'formatter': 'default'
}},
'root': {
'level': LOG_LEVEL,
'handlers': ['wsgi']
},
'loggers': {
'alembic': {
'level': LOG_LEVEL,
'handlers': ['wsgi']
}}
})
| [
"artem.artiukhov@chromeriver.com"
] | artem.artiukhov@chromeriver.com |
8948694eb6302a0c7f31884b3092f6dbf76baa48 | f85f18d4c252c70cde9be5a3b3052ccac4e7202c | /en250/test.py | d33e706eca432b8658f1d416dc75380d75346302 | [] | no_license | tbjag/spring_2020 | 67ca78e439ab82da8839614b25b63d0561acc2ff | 9b6fc792a3aaa513dc984e7b20a043d9c8b0b659 | refs/heads/master | 2020-12-14T16:34:06.288247 | 2020-05-14T01:38:57 | 2020-05-14T01:38:57 | 234,808,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,361 | py | import pandas as pd
import random
import time
import math
#define move set
MOVES = [0,1,2]
#define area
BOUND_X_MIN = 0
BOUND_Y_MIN = 0
BOUND_X_MAX = 100
BOUND_Y_MAX = 100
def print_state(person):
print("%d is at (%d,%d) and is %ssick" %( person.id, person.x, person.y, "" if person.is_sick else "not "))
#define class person with position
class Person:
def __init__(self, id, pos_x, pos_y):
self.id = id
self.x = pos_x
self.y = pos_y
self.is_sick = False
self.goto_x = random.randrange(BOUND_X_MAX)
self.goto_y = random.randrange(BOUND_Y_MAX)
def move(self):
#random move set, if at edge, then do nothing
if(self.x == self.goto_x and self.y == self.goto_y):
#set new random coords if arrived at destination
self.goto_x = random.randrange(BOUND_X_MAX)
self.goto_y = random.randrange(BOUND_Y_MAX)
elif(self.x == self.goto_x):
self.y += 1
elif(self.y == self.goto_y):
self.x += 1
else:
move = random.choice(MOVES)
if(move == 0):
if ((self.goto_x - self.x) > 0):
self.x += 1
else:
self.x -= 1
elif(move == 1):
if ((self.goto_y - self.y) > 0):
self.y += 1
else:
self.y -= 1
#calculate manhattan distance abs(posx - posx)
def within_area(person1, person2, prox):
if(abs(person1.x-person2.x) <= prox and abs(person1.y-person2.y) <= prox):
return True
else:
return False
def main():
arr = []
for i in range(30):
arr.append(Person(i, 0, i*2))
print_state(arr[i])
#create a bunch of classes
#make one person sicl
arr[0].is_sick = True
for time_step in range(400):
for i in range(30):
arr[i].move()
#can optimize this part
for lol in range(30):
for gey in range(30):
if(lol != gey):
if(within_area(arr[lol], arr[gey], 1)):
if(arr[lol].is_sick or arr[gey].is_sick):
arr[lol].is_sick = True
arr[gey].is_sick = True
#print out states
for i in range(30):
print_state(arr[i])
""" days = 0
count = 0
while(count < 29):
for i in range(30):
arr[i].move()
#can optimize this part
for lol in range(30):
for gey in range(30):
if(lol != gey):
if(within_area(arr[lol], arr[gey], 1)):
if(arr[lol].is_sick or arr[gey].is_sick):
arr[lol].is_sick = True
arr[gey].is_sick = True
count = 0
for jk in arr:
if jk.is_sick:
count += 1
days += 1
print(days) """
main() | [
"tbjagodits@gmail.com"
] | tbjagodits@gmail.com |
6cfd92aa461d101a5e5fddc21a4af1ee7405e679 | 6516b1e0c064d532107874d61c068ca2a0153dd5 | /1018.py | a239ba9ea5c818d10268594d5d8f81fd95766a0d | [] | no_license | mukeshjnvk/URI-Online-Judge | a27f469aa49a832bea516d14fa390bab86663d8d | a7c5f2470f721a5717975d6db52df71ccdf2c4fc | refs/heads/master | 2016-09-06T06:57:22.396144 | 2015-03-06T11:44:18 | 2015-03-06T11:44:18 | 30,833,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 992 | py |
def change(mon):
notes = [100, 50, 20, 10, 5, 2]
print 'NOTAS:'
r = int(mon[0])
for n in notes:
rem = r / n
if rem > 0:
print '{0} nota(s) de R$ {1}.00'.format(rem, n)
r = r - (rem * n)
else:
print '{0} nota(s) de R$ {1}.00'.format(rem, n)
mon[1] = int(mon[1]) * 0.01
if r == 1:
mon[1] = 1 + mon[1]
change2(mon[1])
def change2(r):
notes = [100, 50, 25, 10, 5, 1]
r = int(r * 100)
print 'MOEDAS:'
for n in notes:
rem = r / n
if rem > 0:
# print rem
print '{0} moeda(s) de R$ {1:.2f}'.format(int(rem), n*.01)
r = r - (rem * n)
# print 'r = ',r
else:
# print 'r = ',r
print '{0} moeda(s) de R$ {1:.2f}'.format(int(rem), n*.01)
def main():
r = raw_input()
s = r.split('.')
change(s)
main() | [
"mukeshjnvk@gmail.com"
] | mukeshjnvk@gmail.com |
023a38ff50e4e24571543cd0cffe8cf9a480fa52 | e22780e6d16b108f2bc8d8b3adc04f39221e7e0c | /tests/test_Cartesian3d.py | 4818ad3296475b9ef667fb015485bef55fc63f10 | [
"MIT"
] | permissive | tdegeus/GMatElastoPlasticFiniteStrainSimo | 0228181b817a01707a7e1dc39ef0b1c39e2548f4 | dfaf83798d41b33c3d807a11774dc4d0ed195bfb | refs/heads/main | 2022-10-22T13:07:05.061360 | 2022-10-15T14:35:35 | 2022-10-15T14:35:35 | 157,088,081 | 2 | 0 | MIT | 2022-09-19T04:28:42 | 2018-11-11T14:34:38 | C++ | UTF-8 | Python | false | false | 1,432 | py | import unittest
import GMatElastoPlasticFiniteStrainSimo.Cartesian3d as GMat
import GMatTensor.Cartesian3d as tensor
import numpy as np
class Test_main(unittest.TestCase):
""" """
def test_Epseq_Sigeq(self):
A = np.zeros((2, 3, 3, 3))
A[..., 0, 1] = 1
A[..., 1, 0] = 1
self.assertTrue(np.allclose(GMat.Epseq(A), 2 / np.sqrt(3) * np.ones(A.shape[:-2])))
self.assertTrue(np.allclose(GMat.Sigeq(A), np.sqrt(3.0) * np.ones(A.shape[:-2])))
def test_Strain(self):
shape = [2, 3]
gamma = np.random.random(shape)
F = tensor.Array2d(shape).I2
F[..., 0, 0] = 1 + gamma
F[..., 1, 1] = 1 / (1 + gamma)
Eps = np.zeros_like(F)
Eps[..., 0, 0] = np.log(1 + gamma)
Eps[..., 1, 1] = -np.log(1 + gamma)
self.assertTrue(np.allclose(GMat.Strain(F), Eps))
def test_Elastic(self):
shape = [2, 3]
mat = GMat.Elastic2d(
K=np.random.random(shape),
G=np.random.random(shape),
)
gamma = np.random.random(shape)
mat.F[..., 0, 0] = 1 + gamma
mat.F[..., 1, 1] = 1 / (1 + gamma)
mat.refresh()
Sig = np.zeros_like(mat.F)
Sig[..., 0, 0] = 2 * mat.G * np.log(1 + gamma)
Sig[..., 1, 1] = -2 * mat.G * np.log(1 + gamma)
self.assertTrue(np.allclose(mat.Sig, Sig))
if __name__ == "__main__":
unittest.main()
| [
"tdegeus@users.noreply.github.com"
] | tdegeus@users.noreply.github.com |
e6ff19f50acd0acf2886ec23aec7887b9ca7134b | 28f759802af540793018684087505168edf6be38 | /Tkinter_GUI/Spraybuild_cell.py | d3140cb2b45935a7d6bd2dc290b465e72f042a45 | [] | no_license | Jkwnlee/python | 0608284bddce4bd63e67ff47a0521333761f95bc | b21e6eb92a35fb29b8c3cf7dafc34b50b954c8c8 | refs/heads/master | 2021-12-29T16:51:19.640537 | 2021-12-14T07:04:51 | 2021-12-14T07:04:51 | 139,732,731 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,612 | py | #!/bin/python
##/usr/bin/env python
##
# coding: utf-8
## Functions for Amorphous Generator
import os,sys,random,datetime
# import commands
import JH_lib as jh
import numpy as np
import pandas as pd
from subprocess import check_output
##########################################
# Define distance calculation function
##########################################
def point_in_box_vector(point, box):
ini2point=[point[j]-box['BoxOrigin'][j] for j in range(3)]
if np.dot(ini2point, box['BoxVeca']) > 0 and np.dot(ini2point, box['BoxVecb']) > 0 and np.dot(ini2point, box['BoxVecc']) > 0 and point[2] > box['BoxOrigin'][2] and point[2] < box['BoxOrigin'][2] +box['BoxVeca'][2]+box['BoxVecb'][2]+box['BoxVecc'][2] :
return True
else:
return False
def point_in_box_simple(point, box, unitcell):
if box['BoxOrigin'][0] < point[0] < unitcell[0][0] and box['BoxOrigin'][1] < point[1] < unitcell[1][1] and box['BoxOrigin'][2] < point[2] < unitcell[2][2]:
return True
else:
return False
def distance(a,b) :
return sum([(x-y)**2.0 for x,y in zip(a,b)])**0.5 ;
def add_atom_in_box(totatom, Box, InsertAtomDF,OutputPath, unitcell):
exAtomPosition = []
minDistance = InsertAtomDF['radious'].sum()/InsertAtomDF.shape[0] *2.5
for j in range(InsertAtomDF.shape[0]):
label = InsertAtomDF.element.iloc[j]
N_atom = InsertAtomDF.N_atom.iloc[j]
for natom in range(N_atom):
exAtomPosition.append([label,0,0,0,'T','T','T'])
exAtomPositionDF = pd.DataFrame(exAtomPosition, columns=['label','x', 'y', 'z', 'rx','ry','rz'])
for newatom in range(InsertAtomDF.N_atom.sum()):
tot_attempt = 0
condition = True
while condition and tot_attempt < InsertAtomDF.N_atom.sum()* 1000 :
NewAtomPosition0 = [ random.random() for i in range(3)]
while min(np.dot (NewAtomPosition0, unitcell)) < minDistance/2 and tot_attempt < InsertAtomDF.N_atom.sum()* 1000:
NewAtomPosition0 = [ random.random() for i in range(3)]
NewAtomPosition = np.dot (NewAtomPosition0, unitcell)
tot_attempt = tot_attempt + 1
exAtomPositionDF['distance'] =( (exAtomPositionDF['x'] - NewAtomPosition[0])**2+
(exAtomPositionDF['y'] - NewAtomPosition[1])**2+
(exAtomPositionDF['z'] - NewAtomPosition[2])**2 )**0.5
condition = exAtomPositionDF['distance'].min() < minDistance
# print(newatom, NewAtomPosition, exAtomPositionDF['distance'].min() > minDistance, min(NewAtomPosition))
exAtomPositionDF['x'].iloc[newatom] = NewAtomPosition[0]
exAtomPositionDF['y'].iloc[newatom] = NewAtomPosition[1]
exAtomPositionDF['z'].iloc[newatom] = NewAtomPosition[2]
NewPositions = exAtomPositionDF.iloc[:,0:7].values.tolist()
NewCompound = jh.component_from_positions(NewPositions)
jh.w_poscar(NewPositions, compound = NewCompound, filename = OutputPath,
unitcell = unitcell, Selective = True)
return True
def build_SprayinBox( OutputPath='./outPOSCAR.vasp',
AtomDensity = 2.65, N_Atoms = [1 ,2] , AtomName = ['Si', 'O'] , MaxAtom = 200,
potdir = '/vasp/POTCAR/PAW_PBE', InitCell=[[],[],[]]):
##########################################
# Preallocate & grep MASS/Radious from POTCAR
##########################################
atommass = []
atomradious = []
for atom in AtomName:
#Atomic Mass
atommass.append(atom_mass_radi_lib(atom, key='mass'))
#Wigner-Seitz Radious
atomradious.append(atom_mass_radi_lib(atom, key='radi'))
N_Atoms = np.array(N_Atoms) * (int(MaxAtom/sum(N_Atoms)))
InsertAtomDF= pd.DataFrame(np.array([AtomName, N_Atoms,atommass,atomradious]).T,
columns=['element', 'N_atom', 'mass', 'radious'])
InsertAtomDF['mass'] = InsertAtomDF['mass'].astype('float')
InsertAtomDF['radious'] = InsertAtomDF['radious'].astype('float')
InsertAtomDF['N_atom'] = InsertAtomDF['N_atom'].astype('int')#, 'N_atom':int, 'mass':float, 'radious': float)
InsertAtomDF['sumMass'] = InsertAtomDF['N_atom'] * InsertAtomDF['mass']
##########################################
# Preallocate & for atom positions
##########################################
totatom = sum(N_Atoms)
##########################################
# Define Box for Inserting atom
##########################################
Box = box_generator(InsertAtomDF,TargetDensity=AtomDensity, a=InitCell[0], b=InitCell[1], c=InitCell[2])
unitcell= [Box[ 'BoxVeca'], Box[ 'BoxVecb'] , Box[ 'BoxVecc']]
# add_atom_in_box(totatom,cellpar, Box, InsertAtomDF,OutputPath, unitcell)
add_atom_in_box(totatom, Box, InsertAtomDF,OutputPath, unitcell)
return True
def box_generator(InsertAtomDF,TargetDensity, a=[], b=[], c=[]):
NumberofAvogadro = 6.022e23 # atom/mol
Mass = InsertAtomDF['sumMass'].sum()/NumberofAvogadro #g.atom/mol / (atom/mol ) = g
Volume = Mass / TargetDensity * 1e24 # g/(g/cm^3) = cm^3 * *1e8)^3 = A^3
if len(a)+len(b)+len(c) == 0:
Height = Volume**(1/3)
Box ={'BoxOrigin':[0,0,0], 'BoxVeca': [Height,0,0], 'BoxVecb': [0,Height,0], 'BoxVecc': [0,0,Height]}
else:
area = np.linalg.norm(np.cross(a,b))
Height = Volume/area
Box ={'BoxOrigin':[0,0,0], 'BoxVeca': a, 'BoxVecb': b, 'BoxVecc': [0,0,Height]}
return Box
def atom_mass_radi_lib(atom, key='radi'):
#Lib: AtomicNumber,Label,Name,Radious,Mass
lib= [[1,'H','Hydrogen',53,1.00], [2,'He','Helium',31,4.00], [3,'Li','Lithium',167,6.94], [4,'Be','Beryllium',112,9.01], [5,'B','Boron',87,10.81],
[6,'C','Carbon',67,12.01], [7,'N','Nitrogen',56,14.00], [8,'O','Oxygen',48,15.99], [9,'F','Fluorine',42,18.99], [10,'Ne','Neon',38,20.17],
[11,'Na','Sodium',190,22.98], [12,'Mg','Magnesium',145,24.30], [13,'Al','Aluminium',118,26.98], [14,'Si','Silicon',111,28.08], [15,'P','Phosphorus',98,30.97],
[16,'S','Sulfur',88,32.06], [17,'Cl','Chlorine',79,35.45], [18,'Ar','Argon',71,39.09], [19,'K','Potassium',243,39.94], [20,'Ca','Calcium',194,40.08],
[21,'Sc','Scandium',184,44.95], [22,'Ti','Titanium',176,47.90], [23,'V','Vanadium',171,50.94], [24,'Cr','Chromium',166,51.99], [25,'Mn','Manganese',161,54.93],
[26,'Fe','Iron',156,55.84], [27,'Co','Cobalt',152,58.70], [28,'Ni','Nickel',149,58.93], [29,'Cu','Copper',145,63.54], [30,'Zn','Zinc',142,65.38],
[31,'Ga','Gallium',136,69.72], [32,'Ge','Germanium',125,72.59], [33,'As','Arsenic',114,74.92], [34,'Se','Selenium',103,78.96], [35,'Br','Bromine',94,79.90],
[36,'Kr','Krypton',88,83.80], [37,'Rb','Rubidium',265,85.46], [38,'Sr','Strontium',219,87.62], [39,'Y','Yttrium',212,88.90], [40,'Zr','Zirconium',206,91.22],
[41,'Nb','Niobium',198,92.90], [42,'Mo','Molybdenum',190,95.94], [43,'Tc','Technetium',183,98.00], [44,'Ru','Ruthenium',178,101.07], [45,'Rh','Rhodium',173,102.90],
[46,'Pd','Palladium',169,106.40], [47,'Ag','Silver',165,107.86], [48,'Cd','Cadmium',161,112.41], [49,'In','Indium',156,114.82], [50,'Sn','Tin',145,118.69],
[51,'Sb','Antimony',133,121.75], [52,'Te','Tellurium',123,126.90], [53,'I','Iodine',115,127.60], [54,'Xe','Xenon',108,131.30], [55,'Cs','Cesium',298,132.90],
[56,'Ba','Barium',253,137.33], [57,'La','Lanthanum',195,138.90], [58,'Ce','Cerium',185,140.12], [59,'Pr','Praseodymium',247,140.90], [60,'Nd','Neodymium',206,144.24],
[61,'Pm','Promethium',205,145.00], [62,'Sm','Samarium',238,150.40], [63,'Eu','Europium',231,151.96], [64,'Gd','Gadolinium',233,157.25], [65,'Tb','Terbium',225,158.92],
[66,'Dy','Dysprosium',228,162.50], [67,'Ho','Holmium',226,164.93], [68,'Er','Erbium',226,167.26], [69,'Tm','Thulium',222,168.93], [70,'Yb','Ytterbium',222,173.04],
[71,'Lu','Lutetium',217,174.96], [72,'Hf','Hafnium',208,178.49], [73,'Ta','Tantalum',200,180.94], [74,'W','Tungsten',193,183.85], [75,'Re','Rhenium',188,186.20],
[76,'Os','Osmium',185,190.20], [77,'Ir','Iridium',180,192.22], [78,'Pt','Platinum',177,195.09], [79,'Au','Gold',174,196.96], [80,'Hg','Mercury',171,200.59],
[81,'Tl','Thallium',156,204.37], [82,'Pb','Lead',154,207.20], [83,'Bi','Bismuth',143,208.98], [84,'Po','Polonium',135,209.00], [85,'At','Astatine',127,210.00],
[86,'Rn','Radon',120,222.00], [87,'Fr','Francium','None',223.00], [88,'Ra','Radium','None',226.02], [89,'Ac','Actinium',195,227.02], [90,'Th','Thorium',180,231.03],
[91,'Pa','Protactinium',180,232.03], [92,'U','Uranium',175,237.04], [93,'Np','Neptunium',175,238.02], [94,'Pu','Plutonium',175,242.00], [95,'Am','Americium',175,243.00] ]
dicList=[]
for i in lib:
dicList.append({'label':i[1], 'radi':i[0], 'fullname':i[2], 'radious':i[3], 'mass':i[4]})
elementDF = pd.DataFrame(dicList)
if key == 'radi':
return elementDF[elementDF.label == atom].radious.iloc[0] /100 # pm to Angtrom
elif key == 'mass':
return elementDF[elementDF.label == atom].mass.iloc[0]
from tkinter import Button, Label, Tk, Grid, Entry, filedialog, END, StringVar,Text
from os import listdir
## Functions for GUI
def readAllEnrty():
workspace = OutputEntry.get()
ElementList = [i.replace(' ','') for i in ElementEntry.get().split(',')]
NumElementList = [int(i.replace(' ','')) for i in NumElementEntry.get().split(',')]
Density = float(DensityEntry.get())
MaxAtomNum = int(MaxAtomNumEntry.get())
NImage = int(NImageEntry.get())
InitCell = [[float(UnitCellAxEntry.get()),float(UnitCellAyEntry.get()),float(UnitCellAzEntry.get())],
[float(UnitCellBxEntry.get()),float(UnitCellByEntry.get()),float(UnitCellBzEntry.get())],
[float(UnitCellCxEntry.get()),float(UnitCellCyEntry.get()),float(UnitCellCzEntry.get())]]
outputText.delete('1.0', END)
outputText.insert(END, str('Output Path/File: %s/POSCAR_XX\n'%workspace))
if len(ElementList) == len(NumElementList):
compisition=''
Ndivision =0
for element, Nelement in zip(ElementList, NumElementList):
compisition+='%s%i' %(element,Nelement)
Ndivision += Nelement
outputText.insert(END, str('Compisition: %s\n'%compisition))
outputText.insert(END, str('Density: %3.2f\n'%Density))
outputText.insert(END, str('Maximun number of Atom: %i\n'%(int(MaxAtomNum/Ndivision) *int(Ndivision))))
outputText.insert(END, str('Input Cell : %s\n'%(InitCell)))
outputText.insert(END, str('Number of amorphous structure to build: %i\n'%(NImage)))
outputText.insert(END, str('Start to construct the structure...\n\n: '))
outputText.insert(END, str('In the Outputpath we generated\n: '))
for i in range(NImage ):
buiding = build_SprayinBox( OutputPath=workspace+'/POSCAR%2.2i.vasp' %i,
N_Atoms = NumElementList , AtomName = ElementList,
MaxAtom = int(MaxAtomNum/Ndivision) *int(Ndivision),
AtomDensity = Density,
InitCell = InitCell
)
for n, i in enumerate(listdir(workspace)):
if n % 3 == 0 and n !=0 :
outputText.insert(END, str('%s\n ' %i))
else: outputText.insert(END, str('%s , ' %i))
else:
outputText.insert(END, str('Number of Elements: %s\n'%ElementList))
outputText.insert(END, str('Wrong Input, Match the number of elements and component\n'))
def output(Inentry):
path = filedialog.askdirectory()
Inentry.delete(1, END) # Remove current text in entry
Inentry.insert(0, path) # Insert the 'path'
return path
class MainApplication():
def __init__(self, master):
self.master = master
self.master.title("Amorphous Builder")
# label = Label(self.master, text="Test Callback", )
def LabelEntry(self, StringValue, ColNum=0, RowNum=0, initialString=False):
def _on_click(event):
event.widget.delete(0, END)
label = Label(self.master, text=StringValue).grid( column = ColNum, row = RowNum, pady=5, padx=5)
entry = Entry(self.master, width=40)
# if initialString:
entry.grid( column = ColNum+1, row = RowNum, sticky='W', pady=5, padx=5)
entry.bind("<Button-1>", _on_click)
return label, entry
def BrowsDirButton(self, entry, ColNum=0, RowNum=0):
button = Button(self.master, text="Browse", command=lambda: output(entry))
button.grid(column = ColNum, row = RowNum)
return button
def InitCellInforTable(self, StringValue, ColNum=0, RowNum=0, initialString=False):
def _on_click(event):
event.widget.delete(0, END)
axlabel = Label(self.master, text=StringValue).grid( column = ColNum, row = RowNum, pady=5, padx=5)
xentry = Entry(self.master, width=10)# if initialString:
yentry = Entry(self.master, width=10)# if initialString:
zentry = Entry(self.master, width=10)# if initialString:
xentry.grid( column = ColNum+1, row = RowNum, sticky='W', pady=5, padx=5)
yentry.grid( column = ColNum+1, row = RowNum, sticky='W', pady=5, padx=5+30*3)
zentry.grid( column = ColNum+1, row = RowNum, sticky='W', pady=5, padx=5+30*6)
return axlabel,xentry,yentry,zentry
def close(self):
self.master.quit()
return
if __name__ == '__main__':
root = Tk()
gui = MainApplication(root)
rown = 0
IntroductionLabel=Label(gui.master, text='(POSCAR Format / based on Spraying)', font="Helvetica 12 bold").grid(column = 1, row = rown, columnspan =2 , pady=5, padx=5)
rown +=1
InLabel = Label(gui.master, text='Type Input values', font="Helvetica 12 bold").grid(column = 1, row = rown, columnspan =2 , pady=5, padx=5)
rown +=1
ElementLabel,ElementEntry = gui.LabelEntry('Element', 1, rown)
ElementExample=StringVar(gui.master, value='(example for Si3N4) Si, N')
ElementEntry.configure(textvariable=ElementExample); rown +=1
NumElementLabel,NumElementEntry = gui.LabelEntry('Number of Element', 1, rown)
NumElementExample=StringVar(gui.master, value='(example for Si3N4) 3, 4')
NumElementEntry.configure(textvariable=NumElementExample); rown +=1
DensityLabel,DensityEntry = gui.LabelEntry('Density (g/cm^3)', 1, rown); rown +=1
NImageLabel,NImageEntry = gui.LabelEntry('Number of Structure', 1, rown); rown +=1
MaxAtomNumLabel,MaxAtomNumEntry = gui.LabelEntry('Max Atom Number', 1, rown); rown +=1
OutputLabel,OutputEntry = gui.LabelEntry('Output Path', 1, rown)
OuyputBrowse2 = gui.BrowsDirButton(OutputEntry, 3, rown); rown +=1
defLabel = Label(gui.master, text='Initial Cell Condition', font="Helvetica 12 bold")
defLabel.grid(column = 1, row = rown, columnspan =2 , pady=5, padx=5); rown +=1
UnitCellALabel,UnitCellAxEntry,UnitCellAyEntry,UnitCellAzEntry = gui.InitCellInforTable('a-axis', 1, rown); rown +=1
UnitCellBLabel,UnitCellBxEntry,UnitCellByEntry,UnitCellBzEntry = gui.InitCellInforTable('b-axis', 1, rown); rown +=1
UnitCellCLabel,UnitCellCxEntry,UnitCellCyEntry,UnitCellCzEntry = gui.InitCellInforTable('c-axis', 1, rown); rown +=1
UnitCellCxEntryExp=StringVar(gui.master, value='0')
UnitCellCxEntry.configure(textvariable=UnitCellCxEntryExp)
UnitCellCyEntryExp=StringVar(gui.master, value='0')
UnitCellCyEntry.configure(textvariable=UnitCellCyEntryExp)
UnitCellCzEntryExp=StringVar(gui.master, value='0')
UnitCellCzEntry.configure(textvariable=UnitCellCzEntryExp)
defLabel2 = Label(gui.master, text='(put 0,0,0 in c-axis) for automatic vertical cell define')
defLabel2.grid(column = 1, row = rown, columnspan =2 , pady=5, padx=5); rown +=1
begin_button = Button(gui.master, text='Begin!', command=lambda: readAllEnrty())
begin_button.grid(column = 1, row= rown+1, columnspan=3); rown +=2
outputLabel = Label(gui.master, text='Progress Box', font="Helvetica 10 bold")
outputLabel.grid(column = 1, row = rown, columnspan =1 , pady=5, padx=5, sticky='w' ); rown +=1
outputText=Text(gui.master,height=10)#,warp='word')
outputText.grid(column = 1, row= rown, columnspan=3)
root.mainloop()
| [
"noreply@github.com"
] | Jkwnlee.noreply@github.com |
63c4bf0dfddc62edee6e0e06ceae87c3172669ef | 50c7d7f94263b250bba1ded2b247925ff4e28cf8 | /mars/Jetson.py | 08783934524b067e2f0d398903b10d85ad4ad3ae | [
"MIT"
] | permissive | jmaggio14/goddard | 8bce241ed19b27249b3707f98784bbf13c90d0ea | f34755c9fbd982b2965d9b23685df0f21ebaff08 | refs/heads/master | 2021-01-12T13:25:32.669157 | 2016-09-25T18:43:23 | 2016-09-25T18:43:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,497 | py | # Copyright (c) 2016, Jeffrey Maggio and Joseph Bartelmo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
ark9719
6/17/2016
'''
from GpioPin import GpioPin
from Watchdog import Watchdog
from Threads import TelemetryThread
from Valmar import Valmar
from GraphUtility import GraphUtility
import logging
import csv
import sys
import time
import json
import subprocess
import base64
logger = logging.getLogger('mars_logging')
telemetryLogger = logging.getLogger('telemetry_logging')
class Jetson(object):
"""
Jetson controls input from the controller, and manages the data sent back from the
arduino/mars
"""
def __init__(self, devices, config, timestamp, q = None, marsOnlineQueue = None):
self._devices = devices
self._pinHash = self._devices['pinHash']
self._devices['Watchdog'] = Watchdog(config, self._devices['Arduino'], self._devices['Mars'], self._pinHash)
self.initDevices()
self._statsT = TelemetryThread(self)
self.initCommands()
self._exit = False
self._timestamp = timestamp
self._config = config
self._header = False
self._q = q
self.graphUtil = GraphUtility(config)
self._pauseTelemetry = False
self._marsOnlineQueue = marsOnlineQueue
def initDevices(self):
"""
Make every device in the device hash accessible via Jetson
:return:
"""
self._arduino = self._devices['Arduino']
self._stream = self._devices['Stream']
self._mars = self._devices['Mars']
self._motor = self._devices['Motor']
self._motor._arduino = self._arduino
self._led = self._devices['LED']
self._led._arduino = self._arduino
self._watchdog = self._devices['Watchdog']
self._valmar = self._devices['Valmar']
def initCommands(self):
"""
Initialize a list of valid system commands
:return:
"""
self._sysCommands = {'system shutdown': self.systemShutdown,
'system restart': self.systemRestart,
'recall': self._watchdog.recall,
'stream open': self._stream.open,
'stream close': self._stream.close,
'motor off': self._pinHash['motorRelay'].toggleOff,
'motor on' : self._pinHash['motorRelay'].toggleOn,
'laser off': self._pinHash['laserRelay'].toggleOff,
'laser on' : self._pinHash['laserRelay'].toggleOn,
'led off': self._pinHash['ledRelay'].toggleOff,
'led on' : self._pinHash['ledRelay'].toggleOn,
'reset arduino': self._arduino.resetArduino,
'hibernate': self.hibernate,
'start': self.start,
'exit': self.exit,
'list logs': self.listLogs,
'watchdog off': self._watchdog.disable,
'watchdog on': self._watchdog.enable,
'valmar off': self._valmar.disable,
'valmar on': self._valmar.enable
}
def safeInput(self):
"""
Continuesly scans for controller input first identifying the type of command then checking validity
before writing to the arduino and storing the last command.
:return:
"""
#Prompt for input
if self._q is None:
try:
controlCode = raw_input("LED, motion, stream, or control code: \n")
except KeyboardInterrupt:
self.exit()
else:
controlCode = self._q.get()
myCodeInput = self.recieveInput(controlCode)
return controlCode
def recieveInput(self, controlCode):
"""
Decipher the type of input. Motor, LED, Stream or System
:param controlCode:
:return: Return specialized command object
"""
logger.info("Control code: " + controlCode)
if controlCode in self._motor._motorCodes:
return self._motor.issue(controlCode, self._arduino)
elif "forward" in controlCode or "backward" in controlCode or "brake" in controlCode:
print 'motor operand'
return self._motor.movement(controlCode)
elif "brightness" in controlCode:
return self._led.issue(self._arduino, controlCode)
elif controlCode in self._stream._streamCodes:
return self._stream.issue(controlCode)
elif controlCode in self._sysCommands:
self._sysCommands[controlCode]()
elif 'graph' in controlCode:
self.graph(controlCode)
else:
return logger.warning("Invalid control code. Check documentation for command syntax.")
def inputLoop(self):
"""
Runs a loop over the safeInput function, checks self._exit to determine
whether or not it should hop out of the loop
"""
while self._exit == False:
self.safeInput()
def telemetryController(self):
"""
The controller for generating(Reading) data, checking it for errors and saving it.
:return:
"""
telemetry = None
if self._pauseTelemetry == False:
logger.debug("Generating Telemetry...")
telemetry = self._mars.generateTelemetry()
if telemetry is not None:
#inject telemetry updates
telemetry.update(self._watchdog.watch(telemetry))
telemetry.update(self._valmar.updateTelemetry())
logger.debug("Displaying Telemetry...")
telemetryLogger.info(self.displayTelemetry(self._mars._telemetry))
logger.debug("Saving telemetry...")
self.saveStats(self._mars._telemetry)
#Set the integ time to the time of the last read for calculations
else:
self._arduino.flushBuffers()
pass
self._mars._integTime = time.time()
else:
i = 0
while self._arduino._init is False and i < 5:
time.sleep(5)
i += 1
def displayTelemetry(self, data):
"""
Transforms the data into a more readable output for logger
:param data:
:return:
"""
return json.dumps(data)
def saveStats(self, data):
"""
This is the method in control of saving data generated by Mars onto the harddisk.
:param data:
:return:
"""
self._filename = self._config.logging.output_path + '/output/' + self._config.user_input.log_name + '-' + self._timestamp + '/' + self._config.user_input.log_name + '_machine_log.csv'
#If the header to the file isn't written, write it.
try:
with open(self._filename, 'a') as rawFile:
#If the header to the file isn't written, write it.
if (not self._header):
rawWriter = csv.DictWriter(rawFile, data.keys())
rawWriter.writeheader()
self._header = True
rawWriter = csv.DictWriter(rawFile, data.keys())
rawWriter.writerow(data)
except Exception as e:
logger.warning("unable to log data because: \r\n {}".format(e))
def manageThreads(self, toggle):
"""
This method manages the two threads that will run for the duration of the program. One scanning for input,
the other generating, displaying, and saving data.
:return:
"""
if (toggle == 'start'):
logger.info("Attempting to start threads")
try:
self._statsT.start()
self.inputLoop()
except Exception as e:
logger.error("error starting threads ({})".format(e))
elif (toggle == 'stop'):
logger.info("Attempting to stop threads")
try:
self._statsT.stop()
except Exception as e:
logger.error("error stopping threads ({})".format(e))
def systemRestart(self):
"""
Restart the entire system, arduino included
:return:
"""
logger.warning("initiating safe restart")
logger.warning ("shutting down arduino")
self._arduino.powerOff()
### add functionality to cut power to motor controller
logger.warning("restarting this computer")
logger.warning("this connection will be lost")
time.sleep(1)
subprocess.call(['sudo reboot'], shell=True)
def systemShutdown(self):
"""
Shutdown the system
:return:
"""
logger.info("initiating safe shutdown")
### add functionality to cut power to motor controller
logger.info("shutting downn this computer")
logger.info("this connection will be lost")
subprocess.call(['sudo poweroff'], shell=True)
def start(self):
"""
Start command for the program. Start all the relays, the motor, stream, and threads
:return:
"""
self._pinHash['motorRelay'].toggleOff()
logger.info("Motor circuit closed")
self._pinHash['ledRelay'].toggleOff()
logger.info("LED circuit closed")
self._pinHash['laserRelay'].toggleOff()
logger.info("Laser circuit closed")
logger.info("Starting motor...")
self._motor.start()
logger.info("Starting stream...")
self._stream.open()
logger.info("Starting threads...")
self.manageThreads('start')
def exit(self):
"""
Exit command for stopping the program.
:return:
"""
logger.info("Stopping threads")
self.manageThreads('stop')
logger.info("Braking motor...")
self._motor.brake()
time.sleep(2) #necessary to make sure Mars moves to a stop
logger.info("Closing stream...")
self._sysCommands['stream close']()
logger.info(("Turning off LEDs..."))
self._led.issue(self._arduino, "brightness 0")
self._pinHash['motorRelay'].toggleOff()
logger.info("Motor Circuit turned off")
self._pinHash['ledRelay'].toggleOff()
logger.info("LED Circuit turned off")
self._pinHash['laserRelay'].toggleOff()
logger.info("Laser Circuit turned off")
self._exit = True
if self._marsOnlineQueue is not None:
self._marsOnlineQueue.put(0)
def hibernate(self):
"""
TODO: Hibernate function for Jetson
:return:
"""
self._pinHash['motorRelay'].toggleOff()
logger.warning("Motor circuit opened")
self._pinHash['ledRelay'].toggleOff()
logger.warning("Led circuit opened")
self._pinHash['laserRelay'].toggleOff()
logger.warning("Laser circuit opened")
self._stream.close()
logger.warning("Closing video stream")
self._valmar.issueCommand("enable",False)
logger.warning("Pausing VALMAR gap measurement system")
self._pauseTelemetry = True
logger.warning("Pausing telemetry")
logger.warning("System hibernating")
def resume(self):
"""
this method is meant to resume normal function after hibernation
:return:
"""
self._pinHash['motorRelay'].toggleOn()
logging.info("Motor circuit closed")
self._pinHash['ledRelay'].toggleOn()
logging.info("LED circuit closed")
self._pinHash['laserRelay'].toggleOn()
logging.info("Laser circuit closed")
logging.info("All circuits closed and ready for use")
logging.info("Resuming stream...")
self._stream.open()
def graph(self, graphCommand):
graphCommand = graphCommand.split(' ')
if len(graphCommand) > 1:
self.graphUtil.generate_pdf(graphCommand[1])
else:
self.graphUtil.generate_pdf()
def listLogs(self):
logger.info(self.graphUtil.get_all_outputs())
| [
"joebartelmo@gmail.com"
] | joebartelmo@gmail.com |
9e01ee06ccb0d0c3f6fcbb90b6af174e4d295b4a | 96086ae5e7bfa1e40159f919269a90c83e472326 | /opengever/usermigration/plone_tasks.py | 121756f0302306a726785ba83d2b3607d1afb842 | [] | no_license | lukasgraf/opengever.core | 6fc313717fbec3692354e56c2c3293789076a389 | a15c4ff8e0d5494906d7de46a43e3427c8d2d49f | refs/heads/master | 2020-12-01T11:38:46.721555 | 2018-06-18T10:13:09 | 2018-06-18T10:13:09 | 57,871,187 | 0 | 0 | null | 2016-05-02T06:59:58 | 2016-05-02T06:59:58 | null | UTF-8 | Python | false | false | 6,253 | py | """
Migrate user IDs in Plone tasks (issuers, responsibles, responses)
"""
from opengever.ogds.base.utils import ogds_service
from opengever.task.adapters import IResponseContainer
from opengever.task.task import ITask
from opengever.usermigration.exceptions import UserMigrationException
from plone import api
import logging
logger = logging.getLogger('opengever.usermigration')
FIELDS_TO_CHECK = ('responsible', 'issuer')
class PloneTasksMigrator(object):
"""This migrator changes the `issuer` and `responsible` fields on
Plone tasks, as well as updating responses on tasks as needed.
It does not however fix local roles assigned to Plone tasks - these can
be fixed using the "local roles" migration in ftw.usermigration.
"""
def __init__(self, portal, principal_mapping, mode='move', strict=True):
self.portal = portal
self.principal_mapping = principal_mapping
if mode != 'move':
raise NotImplementedError(
"PloneTasksMigrator only supports 'move' mode")
self.mode = mode
self.strict = strict
# Keep track of tasks that need reindexing
self.to_reindex = set()
self.task_moves = {
'responsible': [],
'issuer': [],
}
self.response_moves = {
'creator': [],
'responsible_before': [],
'responsible_after': [],
}
def _verify_user(self, userid):
ogds_user = ogds_service().fetch_user(userid)
if ogds_user is None:
msg = "User '{}' not found in OGDS!".format(userid)
raise UserMigrationException(msg)
def _fix_responses(self, obj):
container = IResponseContainer(obj)
path = '/'.join(obj.getPhysicalPath())
for response_no, response in enumerate(container):
response_identifier = '%s - Response #%s' % (path, response_no)
# Fix response creator
creator = getattr(response, 'creator', '')
if creator in self.principal_mapping:
logger.info("Fixing 'creator' for %s" % response_identifier)
new_userid = self.principal_mapping[creator]
response.creator = new_userid
self.response_moves['creator'].append((
response_identifier, creator, new_userid))
for change in response.changes:
# Fix responsible [before|after]
if change.get('id') == 'responsible':
before = change.get('before', '')
if before in self.principal_mapping:
new_userid = self.principal_mapping[before]
change['before'] = unicode(new_userid)
# Need to flag changes to track mutations - see #3419
response.changes._p_changed = True
logger.info(
"Fixed 'responsible:before' for change in %s "
"(%s -> %s)" % (
response_identifier, before, new_userid))
self.response_moves['responsible_before'].append((
response_identifier, before, new_userid))
after = change.get('after', '')
if after in self.principal_mapping:
new_userid = self.principal_mapping[after]
change['after'] = unicode(new_userid)
# Need to flag changes to track mutations - see #3419
response.changes._p_changed = True
logger.info(
"Fixed 'responsible:after' for change in %s "
"(%s -> %s)" % (
response_identifier, after, new_userid))
self.response_moves['responsible_after'].append((
response_identifier, after, new_userid))
def _migrate_plone_task(self, obj):
task = ITask(obj)
for field_name in FIELDS_TO_CHECK:
# Check 'responsible' and 'issuer' fields
old_userid = getattr(task, field_name, None)
if old_userid in self.principal_mapping:
path = '/'.join(obj.getPhysicalPath())
logger.info('Fixing %r for %s' % (field_name, path))
new_userid = self.principal_mapping[old_userid]
setattr(task, field_name, new_userid)
self.to_reindex.add(obj)
self.task_moves[field_name].append(
(path, old_userid, new_userid))
def migrate(self):
catalog = api.portal.get_tool('portal_catalog')
# Verify all new users exist before doing anything
for old_userid, new_userid in self.principal_mapping.items():
self._verify_user(new_userid)
all_tasks = [b.getObject() for b in catalog.unrestrictedSearchResults(
object_provides=ITask.__identifier__)]
for obj in all_tasks:
self._migrate_plone_task(obj)
self._fix_responses(obj)
for obj in self.to_reindex:
# Reindex 'responsible' and 'issuer' for changed objects.
logger.info('Reindexing %s' % '/'.join(obj.getPhysicalPath()))
obj.reindexObject(idxs=FIELDS_TO_CHECK)
results = {
'task_issuers': {
'moved': self.task_moves['issuer'],
'copied': [],
'deleted': []},
'task_responsibles': {
'moved': self.task_moves['responsible'],
'copied': [],
'deleted': []},
'response_creators': {
'moved': self.response_moves['creator'],
'copied': [],
'deleted': []},
'response_responsible_before': {
'moved': self.response_moves['responsible_before'],
'copied': [],
'deleted': []},
'response_responsible_after': {
'moved': self.response_moves['responsible_after'],
'copied': [],
'deleted': []},
}
return results
| [
"lukas.graf@4teamwork.ch"
] | lukas.graf@4teamwork.ch |
0bb2549289954d0cdd01d3c98940189639a7f025 | de6a1f394dbdc7584febcaa08f3ada33e56c065b | /test_010.py | 56da4f41a6571119ecf73df86ffff554661b12b3 | [] | no_license | SongJialiJiali/test | 41fdf59004940f4e5f5a85215cbf6c89a10f56ca | 2df5d3b361bc7d25cd3d2afd5ac1c64fbc303920 | refs/heads/master | 2022-06-09T02:10:13.949872 | 2020-05-02T01:25:17 | 2020-05-02T01:25:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | #!/usr/bin/env python3
# -*- coding : utf-8 -*-
from hashlib import sha256
from hmac import HMAC
import os
def encrypt_password(password,salt = None):
if salt is None:
salt = os.urandom(8)
if isinstance(salt,str):
salt = salt.encode('utf-8')
new_password = password.encode('utf-8')
encrypt_password = HMAC(salt,new_password,sha256).hexdigest()
print("Encrypt passwrod is %s."% encrypt_password )
if __name__ == '__main__':
raw_password = input("Please input your password:")
encrypt_password(raw_password)
| [
"tangqing@ruc.edu.cn"
] | tangqing@ruc.edu.cn |
45a755bd8b8cb6a153369221f028d183ec19b8d3 | 3f2d5b39b5abeb20a7042aa9baafa6088be1fb7a | /10-List/ex1.py | 79406977b6d15b28fc16c65488a650888b8ee90c | [] | no_license | lcantillo00/python-exercises | ee98e89cac271c6dbb56e94fb8b3e46efacfa58c | e1fb01d5c8e9aef4448acb62580d21662ce074a0 | refs/heads/master | 2020-12-03T00:08:40.578431 | 2017-08-13T13:50:14 | 2017-08-13T13:50:14 | 95,993,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | alist = [4, 2, 8, 6, 5]
alist[2] = True
print(alist)
| [
"test@examle.com"
] | test@examle.com |
703b872755bffebbd0241a908c313554e9a27581 | 68501c700ad51c66e265887ae2a695cbf9fdea4f | /face_recognition/dnn.py | e78f0f92a57fa0d0e918e142b109aee0ff449dec | [] | no_license | rick00young/machine_learn | ac525aad3e807e5419ae6f0cd1100da3ac94d116 | bd49d46f80c063857efc181afa533999ec6e958e | refs/heads/master | 2021-09-10T08:03:34.392915 | 2018-03-22T14:50:31 | 2018-03-22T14:50:31 | 115,474,882 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,149 | py | import tflearn
import sys, os
import numpy as np
from sklearn.cross_validation import train_test_split
from termcolor import cprint
import load_face_feature
x, y, l = load_face_feature.load_feature()
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=1)
# print(x)
# print(y)
# sys.exit(1)
# Build neural network
net = tflearn.input_data(shape=[None, 128])
net = tflearn.fully_connected(net, 32)
net = tflearn.fully_connected(net, 32)
net = tflearn.fully_connected(net, len(l), activation='softmax')
net = tflearn.regression(net)
# Define model
model = tflearn.DNN(net)
# Start training (apply gradient descent algorithm)
model.fit(x_train, y_train, n_epoch=300, batch_size=10, show_metric=True)
model.save('model/dnn/face_model.rflearn')
pred = model.predict(x_test)
# print(pred)
# print(type(pred))
for _i, _p in enumerate(pred):
_max_sort = np.argsort(-_p)
# print(_max_sort)
_max = _max_sort[0]
# print(_i, _max)
real_sort = np.argsort(-np.array(y_test[_i]))
_real_max = real_sort[0]
print('predict: index: %s user_name: %s; real user_name: %s' %
(_i, l.get(_max, ''), l.get(_real_max, ''))) | [
"yyr168@gmail.com"
] | yyr168@gmail.com |
0188c7002df3f9b926221ab5ae6d97b4e6680527 | e8f86629459c7e8e0e23d493fd039e539072118a | /bin/procress.py | ba83784fbe4eac0636ddde333979c37a75d63f21 | [] | no_license | aripollak/random | 1d443b79e05c2c775ef3d8d140bdc3f6c27911f4 | 91b544bd29d982e73089bc8664feb19287da69b1 | refs/heads/master | 2023-08-18T05:02:55.884683 | 2023-08-08T20:23:16 | 2023-08-08T20:23:16 | 2,603,084 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,356 | py | #!/usr/bin/python3
# Requires: python 3; Mock & nose for testing
import unittest
import time
import re
import sys
import subprocess
from io import BytesIO
from unittest import mock
from argparse import ArgumentParser
DESCRIPTION = """Attaches to a running process with strace and shows you the
number of bytes being read/written.
"""
def prettysize(num, time=None):
"""Returns a "pretty" string of *num* bytes, converting to KB, MB, etc.
with units.
If *time* is given, treats it as number of seconds and returns
the number of bytes per second as a formatted string.
>>> procress.prettysize(5000)
'4.88KB'
>>> procress.prettysize(5000, time=2)
'2.44KB/s'
"""
suffix = ""
if time is not None:
if time != 0:
num /= time
suffix = '/s'
for x in ['bytes', 'KB', 'MB', 'GB']:
if num < 1024.0:
return '%3.2f%s%s' % (num, x, suffix)
num /= 1024.0
return '%3.3f%s%s' % (num, 'TB', suffix)
class Procress:
def __init__(self, pid):
self._pid = pid
# only used in run(), but need to start timing when the proc starts:
self._times = {'start': time.time(), 'last_output': 0.0}
self._proc = subprocess.Popen(
('strace', '-q', '-e', 'trace=read,write', '-p', str(self._pid)),
bufsize=1, stderr=subprocess.PIPE, close_fds=True)
self._stream = self._proc.stderr
def analyze(self, interval=0.25):
# TODO: strace supports multiple -p options; this would be a nice feature
counters = {'read': 0, 'write': 0}
for line in self._stream:
if line.decode('ascii').startswith('--- '):
continue
matches = re.match(r'(\w*)\(.*\)\s+=\s+(\d+)',
line.decode('ascii').rstrip('\n'))
try:
counters[matches.group(1)] += int(matches.group(2))
except:
print("unrecognized line: " + str(line))
raise
current_time = time.time()
elapsed_time = current_time - self._times['start']
# we don't need to output progress for every input line,
# just every *interval* seconds.
if current_time - self._times['last_output'] >= interval:
self._times['last_output'] = current_time
yield 'read: {0} ({1}); write: {2} ({3})'.format(
prettysize(counters['read']),
prettysize(counters['read'], time=elapsed_time),
prettysize(counters['write']),
prettysize(counters['write'], time=elapsed_time))
def main(argv=sys.argv):
parser = ArgumentParser(description=DESCRIPTION)
# require a -p opt for forwards-compatibility if we want to support commands
parser.add_argument('-p', '--pid', dest='pid', type=int, required=True,
help='Attach to the given process ID (e.g. from the ps command)')
args = parser.parse_args(argv[1:])
try:
# use ljust to clear the line before overwriting it with \r
sys.stdout.writelines(line.ljust(80) + '\r'
for line in Procress(args.pid).analyze())
except KeyboardInterrupt:
print() # so the last line written doesn't get lost
except Exception:
print()
raise
class TestProcress(unittest.TestCase):
@mock.patch('subprocess.Popen')
@mock.patch('time.time', return_value=1.0)
def test_analyze(self, mock_time, mock_subprocess):
proc = Procress(1)
proc._stream = BytesIO('write(1, "foo", 3) = 3'.encode('ascii'))
self.assertEqual(next(proc.analyze(interval=0.0)),
'read: 0.00bytes (0.00bytes/s); write: 3.00bytes (3.00bytes/s)')
with self.assertRaises(StopIteration):
next(proc.analyze(interval=0.0))
@mock.patch('subprocess.Popen')
@mock.patch('time.time', return_value=1.0)
def test_analyze_invalid(self, mock_time, mock_subprocess):
proc = Procress(1)
proc._stream = BytesIO(
b'attach: ptrace(PTRACE_ATTACH, ...): No such process')
with self.assertRaises(AttributeError):
next(proc.analyze(interval=0.0))
next(proc.analyze(interval=0.0))
if __name__ == '__main__':
sys.exit(main())
| [
"ajp@aripollak.com"
] | ajp@aripollak.com |
85d65fee23ef9219bccd7539832408deff403c06 | 2639f4e77b66b0453472f4e8dd6b8748034d2bb6 | /items.py | e72dff1d223738ef881256f66c48de27e6f28753 | [] | no_license | loile1990/scrapy | 71be7f9646a8062a2be1d45e18f6a7cd302ade96 | 243e9edcd5b6e4cd31a9ca8691a7449f64e5eebe | refs/heads/master | 2022-12-09T21:07:51.742840 | 2020-09-08T11:27:51 | 2020-09-08T11:27:51 | 293,767,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | # Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy import spider
from scrapy.spider import BaseSpider
from scrapy.spiders import CrawlSpider
from scrapy.spiders import Spider
from scrapy.selector import HtmlXPathSelector
from jobs.items import JobsItem
class JobsItem(scrapy.Item):
# define the fields for your item here like:
title = scrapy.Field()
name = scrapy.Field()
pass
| [
"noreply@github.com"
] | loile1990.noreply@github.com |
dd2b38d1b15276435f35be709ffd16991ea3190a | efdc0fc89c2023fc8821caddb169a8f3defa15dd | /consumers/models/line.py | 3f644851964bef4134fd3811d527b736441173ee | [] | no_license | harishgobugari/Optimizing_Public_Transportation | f4d87bb6bc3f6425a4465ce7780ec7283c5b4dfd | f8232058ae446cb961f51e57b1b94e2c2ebe2a35 | refs/heads/main | 2023-04-09T13:30:43.118629 | 2021-04-17T21:25:38 | 2021-04-17T21:25:38 | 358,984,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,019 | py | """Contains functionality related to Lines"""
import json
import logging
from models import Station
logger = logging.getLogger(__name__)
class Line:
"""Defines the Line Model"""
def __init__(self, color):
"""Creates a line"""
self.color = color
self.color_code = "0xFFFFFF"
if self.color == "blue":
self.color_code = "#1E90FF"
elif self.color == "red":
self.color_code = "#DC143C"
elif self.color == "green":
self.color_code = "#32CD32"
self.stations = {}
def _handle_station(self, value):
"""Adds the station to this Line's data model"""
if value["line"] != self.color:
return
self.stations[value["station_id"]] = Station.from_message(value)
def _handle_arrival(self, message):
"""Updates train locations"""
value = message.value()
prev_station_id = value.get("prev_station_id")
prev_dir = value.get("prev_direction")
if prev_dir is not None and prev_station_id is not None:
prev_station = self.stations.get(prev_station_id)
if prev_station is not None:
prev_station.handle_departure(prev_dir)
else:
logger.debug("unable to handle previous station due to missing station")
else:
logger.debug(
"unable to handle previous station due to missing previous info"
)
station_id = value.get("station_id")
station = self.stations.get(station_id)
if station is None:
logger.debug("unable to handle message due to missing station")
return
station.handle_arrival(
value.get("direction"), value.get("train_id"), value.get("train_status")
)
def process_message(self, message):
"""Given a kafka message, extract data"""
# TODO: Based on the message topic, call the appropriate handler.
if message.topic() == "chicago.stations.stream": # Set the conditional correctly to the stations Faust Table
try:
value = json.loads(message.value())
self._handle_station(value)
except Exception as e:
logger.fatal("bad station? %s, %s", value, e)
elif "arrivals" in message.topic(): # Set the conditional to the arrival topic
self._handle_arrival(message)
elif "TURNTILE_SUMMARY" in message.topic(): # Set the conditional to the KSQL Turnstile Summary Topic
json_data = json.loads(message.value())
station_id = json_data.get("STATION_ID")
station = self.stations.get(station_id)
if station is None:
logger.debug("unable to handle message due to missing station")
return
station.process_message(json_data)
else:
logger.debug(
"unable to find handler for message from topic %s", message.topic
)
| [
"noreply@github.com"
] | harishgobugari.noreply@github.com |
677a2e7bd9a52dbc65cf97e180de728918cde4f0 | 4b6d5759563418de16c6793cd98620b9f49cfdda | /psistats/workers/mem.py | 9f6bcb7e823162e9d396802735e17cb5d9d98525 | [
"MIT"
] | permissive | psistats/linux-client | fe7881c01d35bc9861be314757cc71751a1bd8e3 | 92892baaee8b5cf2b41cf82d55ca69854431e3a4 | refs/heads/master | 2020-12-25T16:59:33.612529 | 2016-11-21T06:39:26 | 2016-11-21T06:39:26 | 22,295,114 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | from psistats.workerThread import WorkerThread
from psistats import system
class MemWorker(WorkerThread):
def work(self):
return {'mem': system.get_mem_usage() }
| [
"adow@psikonc.om"
] | adow@psikonc.om |
d89d76b57a914617374ae2be28918b6019c91b82 | 2cb07ae51d1de3e8bdff12e5628e7d142a98d970 | /Aula3/Problem15_12_4.py | 3454f557c9f57d8e47ebee3ce6450c7593be0a3e | [] | no_license | juanfdg/JuanFreireCES22 | e7c40a11584a86e1f81520d9da0bbdd58ea48e02 | 4d80b32163ea6d3f4c5f35375969a748022be438 | refs/heads/master | 2021-04-27T00:50:48.754467 | 2018-07-03T03:29:36 | 2018-07-03T03:29:36 | 122,661,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | class Point():
def __init__(self, x, y):
self.x = x
self.y = y
# Method wont work when other_point.x - self.x = 0
def get_line_to(self, other_point):
slope = (other_point.y-self.y)/(other_point.x-self.x)
linear_coef = self.y - slope*self.x
return (slope, linear_coef)
print(Point(4, 11).get_line_to(Point(6, 15))) | [
"--global"
] | --global |
8d6cca91d5489b3dabcf10d8c98523f7f3c593f8 | 9924e0dc6e0e8c8665508a218636f391451a153f | /Extras/use_flacco.py | 2e8dfe4b9cb62fa2b2d599de9da641448cd1f9e8 | [] | no_license | ai-se/ExploratoryLandscapeAnalysis | b531d374221397ed91f43eeff00217aa85797881 | c338fe93bb11881d25b6000853ca7ac0be69e212 | refs/heads/master | 2020-07-13T12:52:04.601453 | 2016-09-23T21:21:08 | 2016-09-23T21:21:08 | 66,961,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,730 | py | from __future__ import division
import pyRserve
from os import listdir
import pandas as pd
from random import shuffle
def df_to_list_str(df):
columns = df.columns.tolist()
list = []
for column in columns:
list.extend(df[column].tolist())
result_str = ""
for i, l in enumerate(list):
result_str += str(l)
if i<len(list)-1: result_str += ","
return result_str
def get_ela_features(independent, dependent):
# rcmd = pyRserve.connect(host='localhost', port=6311)
# print(rcmd.eval('rnorm(100)'))
features = {}
i_ncols = len(independent.columns)
str_indep = "matrix(c(" + df_to_list_str(independent) + "), ncol=" + str(i_ncols) + ")"
str_dep = "matrix(c(" + df_to_list_str(dependent) + "), ncol=" + str(1) + ")"
assert(len(independent) == len(dependent)), "sanity check failed"
conn = pyRserve.connect(host='localhost', port=6311)
conn.voidEval("library('flacco')")
conn.voidEval("X <- " + str_indep)
conn.voidEval("y<- " + str_dep)
conn.voidEval("feat.object = createFeatureObject(X = X, y = y, blocks = 3)")
fs1 = conn.r("calculateFeatureSet(feat.object, set = 'ela_distr')")
for name, value in zip(fs1.keys, fs1.values):
features[name] = value
# fs2 = conn.r("calculateFeatureSet(feat.object, set = 'ela_level')")
# for name, value in zip(fs2.keys, fs2.values):
# features[name] = value
# fs3 = conn.r("calculateFeatureSet(feat.object, set = 'ela_meta')")
# for name, value in zip(fs3.keys, fs3.values):
# features[name] = value
# fs4 = conn.r("calculateFeatureSet(feat.object, set = 'cm_grad')")
# for name, value in zip(fs4.keys, fs4.values):
# features[name] = value
return features
if __name__ == "__main__":
files = ["../FeatureModels/" + f for f in listdir("../FeatureModels") if ".csv" in f]
for filename in ["../FeatureModels/BerkeleyDB.csv"]:
contents = pd.read_csv(filename)
independent_columns = [c for c in contents.columns if "$<" not in c]
dependent_column = [c for c in contents.columns if "$<" in c]
independents = contents[independent_columns]
raw_dependents = contents[dependent_column]
dependents = (raw_dependents - raw_dependents.mean()) / (raw_dependents.max() - raw_dependents.min())
indexes = range(len(contents))
shuffle(indexes)
n = 100#min(n, int(len(contents) * 0.1))
samples = indexes[:n]
independent_values = independents[independents.index.isin(samples)]
dependent_values = dependents[dependents.index.isin(samples)]
print filename
print get_ela_features(independent_values, dependent_values)
exit() | [
"vivekaxl@gmail.com"
] | vivekaxl@gmail.com |
f5289579f0788d3468c2f4f17cc876c5dce19e11 | cd7046d2a38ec82cb792716124b9513750322eb0 | /djangoRest/blog/migrations/0001_initial.py | c14d6626e904182a2ab8e0be67833fb2449677b0 | [] | no_license | dylanpoll/DjangoBlog | f0ed457bb608c75a2b43aee3c973e8dc5cb5b7fc | 657465e23eec271530937942c8b3fef3f917addc | refs/heads/main | 2023-06-13T00:59:16.686438 | 2021-07-07T04:38:40 | 2021-07-07T04:38:40 | 383,670,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | # Generated by Django 3.2.4 on 2021-06-11 04:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"noreply@github.com"
] | dylanpoll.noreply@github.com |
b1cf17110770b26cd45e43606dac6ca93a53dcd2 | f61e148b136f60d199a8e819150ee78a605c0fdc | /sum_powers_of_two.py | f3d29c962fbee7622f86d0ecc8361709648e78d1 | [] | no_license | vrieni/misc | 462af37e97158ffc66baef2dd6a1a33bd1b6d5d4 | c90b92624fdd03221816afc408ab84411bf65b84 | refs/heads/master | 2021-01-09T21:54:29.074948 | 2015-11-07T14:31:38 | 2015-11-07T14:31:38 | 45,040,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py |
def sum_powers_of_two(n):
EXPONENT = 2
if n != 0:
return power(n, EXPONENT) + sum_powers_of_two(n-1)
else:
return n
def power(base, exponent):
return base ** exponent
print sum_powers_of_two(4) | [
"vrieni.arguelles@gmail.com"
] | vrieni.arguelles@gmail.com |
4f41fa65828ca3db16df37f69904da4061ab6c1f | b9a754d09984634d2f88e91241c47583d8ce1b15 | /happi/_Diagnostics/TrackParticles.py | 4847eaed33c90695064653ceb27a5a0e4084a0cf | [] | no_license | iouatu/mySmilei | 9aa97d3fb1f9e5ddf477e4bc4eff22d7667b8f8f | 41c2496d21ac03d0dd9b9d8ec41d60cdbf13bf1b | refs/heads/main | 2023-07-23T01:42:48.705778 | 2021-08-18T18:13:01 | 2021-08-18T18:13:01 | 397,676,095 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,364 | py | from .Diagnostic import Diagnostic
from .._Utils import *
# Define a function that finds the next closing character in a string
def findClosingCharacter(string, character, start=0):
stack = []
associatedBracket = {")":"(", "]":"[", "}":"{"}
for i in range(start, len(string)):
if string[i] == character and len(stack)==0:
return i
elif string[i] in ("(", "[", "{"):
stack.append(string[i])
elif string[i] in (")", "]", "}"):
if len(stack)==0:
raise Exception("Error in selector syntax: missing `"+character+"`")
if stack[-1]!=associatedBracket[string[i]]:
raise Exception("Error in selector syntax: missing closing parentheses or brackets")
del stack[-1]
raise Exception("Error in selector syntax: missing `"+character+"`")
class TrackParticles(Diagnostic):
"""Class for loading a TrackParticles diagnostic"""
def _init(self, species=None, select="", axes=[], timesteps=None, sort=True, sorted_as="", length=None, chunksize=20000000, **kwargs):
# If argument 'species' not provided, then print available species and leave
if species is None:
species = self.getTrackSpecies()
if len(species)>0:
self._error += ["Printing available tracked species:"]
self._error += ["-----------------------------------"]
self._error += ["\n".join(species)]
else:
self._error += ["No tracked particles files found"]
return
if type(sort) not in [bool, str]:
self._error += ["Argument `sort` must be `True` or `False` or a string"]
return
if not sort and select!="":
self._error += ["Cannot select particles if not sorted"]
return
self._sort = sort
# Get info from the hdf5 files + verifications
# -------------------------------------------------------------------
self.species = species
self._h5items = {}
disorderedfiles = self._findDisorderedFiles()
if not disorderedfiles: return
self._short_properties_from_raw = {
"id":"Id", "position/x":"x", "position/y":"y", "position/z":"z",
"momentum/x":"px", "momentum/y":"py", "momentum/z":"pz",
"charge":"q", "weight":"w", "chi":"chi",
"E/x":"Ex", "E/y":"Ey", "E/z":"Ez", "B/x":"Bx", "B/y":"By", "B/z":"Bz"
}
# If sorting allowed, find out if ordering needed
needsOrdering = False
if sort:
if type(sort) is str:
# The sorted file gets a name from `sorted_as`
if type(sorted_as) is not str or self._re.search(r"[^a-zA-Z0-9_]","_"+sorted_as):
self._error += ["Argument `sorted_as` must be a keyword composed of letters and numbers"]
return
if not sorted_as:
self._error += ["Argument `sorted_as` is required when `sort` is a selection"]
return
if sorted_as:
sorted_as = "_"+sorted_as
orderedfile = self._results_path[0]+self._os.sep+"TrackParticles_"+species+sorted_as+".h5"
needsOrdering = self._needsOrdering(orderedfile)
if sorted_as and not needsOrdering and type(sort) is str:
print("WARNING: ordered file `"+"TrackParticles_"+species+sorted_as+".h5"+"` already exists.")
print(" Skipping sorting operation.")
# Find times in disordered files
if not sort or needsOrdering:
self._locationForTime = {}
for file in disorderedfiles:
f = self._h5py.File(file, "r")
self._locationForTime.update( {int(t):[f,it] for it, t in enumerate(f["data"].keys())} )
self._lastfile = f
self._timesteps = self._np.array(sorted(self._locationForTime))
self._alltimesteps = self._np.copy(self._timesteps)
# List available properties
try: # python 2
self._raw_properties_from_short = {v:k for k,v in self._short_properties_from_raw.iteritems()}
T0 = next(self._lastfile["data"].itervalues())["particles/"+self.species]
except: # python 3
self._raw_properties_from_short = {v:k for k,v in self._short_properties_from_raw.items()}
T0 = next(iter(self._lastfile["data"].values()))["particles/"+self.species]
self.available_properties = [v for k,v in self._short_properties_from_raw.items() if k in T0]
# If sorting allowed, then do the sorting
if sort:
# If the first path does not contain the ordered file (or it is incomplete), we must create it
if needsOrdering:
self._orderFiles(orderedfile, chunksize, sort)
if self._needsOrdering(orderedfile):
return
# Create arrays to store h5 items
self._lastfile = self._h5py.File(orderedfile, "r")
for prop in ["Id", "x", "y", "z", "px", "py", "pz", "q", "w", "chi",
"Ex", "Ey", "Ez", "Bx", "By", "Bz"]:
if prop in self._lastfile:
self._h5items[prop] = self._lastfile[prop]
self.available_properties = list(self._h5items.keys())
# Memorize the locations of timesteps in the files
self._locationForTime = {t:it for it, t in enumerate(self._lastfile["Times"])}
self._timesteps = self._np.array(sorted(self._lastfile["Times"]))
self._alltimesteps = self._np.copy(self._timesteps)
self.nParticles = self._h5items["Id"].shape[1]
# Add moving_x in the list of properties
if "x" in self.available_properties:
file = disorderedfiles[0]
with self._h5py.File(file, "r") as f:
try: # python 2
D = next(f["data"].itervalues())
except: # python 3
D = next(iter(f["data"].values()))
if "x_moved" in D.attrs:
self.available_properties += ["moving_x"]
# Get available times in the hdf5 file
if self._timesteps.size == 0:
self._error += ["No tracked particles found"]
return
# If specific timesteps requested, narrow the selection
if timesteps is not None:
try:
ts = self._np.array(self._np.double(timesteps),ndmin=1)
if ts.size==2:
# get all times in between bounds
self._timesteps = self._timesteps[ self._np.nonzero((self._timesteps>=ts[0]) * (self._timesteps<=ts[1]))[0] ]
elif ts.size==1:
# get nearest time
self._timesteps = self._np.array(self._timesteps[ self._np.array([(self._np.abs(self._timesteps-ts)).argmin()]) ])
else:
raise
except:
self._error += ["Argument `timesteps` must be one or two non-negative integers"]
return
# Need at least one timestep
if self._timesteps.size < 1:
self._error += ["Timesteps not found"]
return
# Select particles
# -------------------------------------------------------------------
if sort:
self.selectedParticles = self._selectParticles( select, True, chunksize )
if self.selectedParticles is None:
self._error += ["Error: argument 'select' must be a string or a list of particle IDs"]
return
# Remove particles that are not actually tracked during the requested timesteps
if self._verbose: print("Removing dead particles ...")
if type(self.selectedParticles) is not slice and len(self.selectedParticles) > 0:
first_time = self._locationForTime[self._timesteps[ 0]]
last_time = self._locationForTime[self._timesteps[-1]]+1
IDs = self._readUnstructuredH5(self._h5items["Id"], self.selectedParticles, first_time, last_time)
dead_particles = self._np.flatnonzero(self._np.all( self._np.isnan(IDs) + (IDs==0), axis=0 ))
self.selectedParticles = self._np.delete( self.selectedParticles, dead_particles )
# Calculate the number of selected particles
if type(self.selectedParticles) is slice:
self.nselectedParticles = self.nParticles
else:
self.nselectedParticles = len(self.selectedParticles)
if self.nselectedParticles == 0:
self._error += ["No particles found"]
return
if self._verbose: print("Kept "+str(self.nselectedParticles)+" particles")
# Manage axes
# -------------------------------------------------------------------
if type(axes) is not list:
self._error += ["Error: Argument 'axes' must be a list"]
return
# if axes provided, verify them
if len(axes)>0:
self.axes = axes
for axis in axes:
if axis not in self.available_properties:
self._error += ["Error: Argument 'axes' has item '"+str(axis)+"' unknown."]
self._error += [" Available axes are: "+(", ".join(sorted(self.available_properties)))]
return
# otherwise use default
else:
self.axes = self.available_properties
# Get x_moved if necessary
if "moving_x" in self.axes:
self._XmovedForTime = {}
for file in disorderedfiles:
with self._h5py.File(file, "r") as f:
for t in f["data"].keys():
self._XmovedForTime[int(t)] = f["data"][t].attrs["x_moved"]
# Then figure out axis units
self._type = self.axes
self._factors = []
for axis in self.axes:
axisunits = ""
if axis == "Id":
self._centers.append( [0, 281474976710655] )
elif axis in ["x" , "y" , "z", "moving_x"]:
axisunits = "L_r"
self._centers.append( [0., self.namelist.Main.grid_length[{"x":0,"y":1,"z":-1}[axis[-1]]]] )
elif axis in ["px", "py", "pz"]:
axisunits = "P_r"
self._centers.append( [-1., 1.] )
elif axis == "w":
axisunits = "N_r * L_r^%i" % self._ndim_particles
self._centers.append( [0., 1.] )
elif axis == "q":
axisunits = "Q_r"
self._centers.append( [-10., 10.] )
elif axis == "chi":
axisunits = "1"
self._centers.append( [0., 2.] )
elif axis[0] == "E":
axisunits = "E_r"
self._centers.append( [-1., 1.] )
elif axis[0] == "B":
axisunits = "B_r"
self._centers.append( [-1., 1.] )
self._log += [False]
self._label += [axis]
self._units += [axisunits]
if axis == "Id":
self._factors += [1]
else:
factor, _ = self.units._convert(axisunits, None)
self._factors += [factor]
self._title = "Track particles '"+species+"'"
self._shape = [0]*len(self.axes)
self._centers = [self._np.array(c) for c in self._centers]
# Hack to work with 1 axis
if len(axes)==1: self._vunits = self._units[0]
else: self._vunits = ""
# Set the directory in case of exporting
self._exportPrefix = "TrackParticles_"+self.species+"_"+"".join(self.axes)
self._exportDir = self._setExportDir(self._exportPrefix)
self._rawData = None
# Finish constructor
self.length = length or self._timesteps[-1]
self.valid = True
return kwargs
def _needsOrdering(self, orderedfile):
if not self._os.path.isfile(orderedfile):
return True
else:
try:
f = self._h5py.File(orderedfile, "r")
if "finished_ordering" not in f.attrs.keys():
return True
except:
self._os.remove(orderedfile)
return True
finally:
f.close()
return False
def _selectParticles( self, select, already_sorted, chunksize ):
if type(select) is str:
# Parse the selector
i = 0
operation = ""
seltype = []
selstr = []
timeSelector = []
particleSelector = []
doubleProps = []
int16Props = []
while i < len(select):
if i+4<len(select) and select[i:i+4] in ["any(","all("]:
seltype += [select[i:i+4]]
if seltype[-1] not in ["any(","all("]:
raise Exception("Error in selector syntax: unknown argument "+seltype[-1][:-1])
comma = findClosingCharacter(select, ",", i+4)
parenthesis = findClosingCharacter(select, ")", comma+1)
timeSelector += [select[i+4:comma]]
selstr += [select[i:parenthesis]]
try:
timeSelector[-1] = "self._alltimesteps["+self._re.sub(r"\bt\b","self._alltimesteps",timeSelector[-1])+"]"
eval(timeSelector[-1])
except:
raise Exception("Error in selector syntax: time selector not understood in "+select[i:i+3]+"()")
try:
particleSelector += [select[comma+1:parenthesis]]
doubleProps += [[]]
int16Props += [[]]
for prop in self.available_properties:
(particleSelector[-1], nsubs) = self._re.subn(r"\b"+prop+r"\b", "properties['"+prop+"'][:actual_chunksize]", particleSelector[-1])
if nsubs > 0:
if prop == "q" : int16Props [-1] += [prop]
else : doubleProps[-1] += [prop]
except:
raise Exception("Error in selector syntax: not understood: "+select[i:parenthesis+1])
operation += "stack["+str(len(seltype)-1)+"]"
i = parenthesis+1
elif not already_sorted and not select[i].isspace():
raise Exception("Complex selection operations not allowed for unsorted files (bad character %s)"%select[i])
else:
operation += select[i]
i+=1
nOperations = len(seltype)
# Nothing to select if empty operation
if len(operation)==0.:
return self._np.s_[:]
# Execute the selector
if self._verbose: print("Selecting particles ... (this may take a while)")
def makeBuffers(size):
properties = {}
for k in range(nOperations):
for prop in int16Props[k]:
if prop not in properties:
properties[prop] = self._np.empty((size,), dtype=self._np.int16)
for prop in doubleProps[k]:
if prop not in properties:
properties[prop] = self._np.empty((size,), dtype=self._np.double)
properties["Id"] = self._np.empty((size,), dtype=self._np.uint64)
return properties
if already_sorted:
# Setup the chunks of particles (if too many particles)
chunks = ChunkedRange(self.nParticles, chunksize)
# Allocate buffers
selectedParticles = self._np.array([], dtype=self._np.uint64)
properties = makeBuffers(chunks.adjustedchunksize)
# Loop on chunks
for chunkstart, chunkstop, actual_chunksize in chunks:
# Execute each of the selector items
stack = []
for k in range(nOperations):
selection = self._np.empty((chunks.adjustedchunksize,), dtype=bool)
if seltype[k] == "any(": selection.fill(False)
elif seltype[k] == "all(": selection.fill(True )
requiredProps = doubleProps[k] + int16Props[k] + ["Id"]
# Loop times
for time in eval(timeSelector[k]):
if self._verbose: print(" Selecting block `"+selstr[k]+")`, at time "+str(time))
# Extract required properties from h5 files
it = self._locationForTime[time]
for prop in requiredProps:
self._h5items[prop].read_direct(properties[prop], source_sel=self._np.s_[it,chunkstart:chunkstop], dest_sel=self._np.s_[:actual_chunksize])
# Calculate the selector
selectionAtTimeT = eval(particleSelector[k]) # array of True or False
# Combine with selection of previous times
selectionAtTimeT[self._np.isnan(selectionAtTimeT)] = False
existing = properties["Id"][:actual_chunksize]>0 # existing particles at that timestep
if seltype[k] == "any(": selection[existing] += selectionAtTimeT[existing]
elif seltype[k] == "all(": selection *= selectionAtTimeT * existing
stack.append(selection)
# Merge all stack items according to the operations
selectedParticles = self._np.union1d( selectedParticles, eval(operation).nonzero()[0] )
else:
# Execute the selector item
selectedParticles = self._np.array([], dtype="uint64")
k = 0
requiredProps = doubleProps[k] + int16Props[k] + ["Id"]
# Loop times
for time in eval(timeSelector[k]):
if self._verbose: print(" Selecting block `"+selstr[k]+")`, at time "+str(time))
# Get group in file
[f, it] = self._locationForTime[time]
group = f["data/"+"%010i"%time+"/particles/"+self.species]
npart = group["id"].shape[0]
# Loop on chunks
selectionAtTimeT = []
for chunkstart, chunkstop, actual_chunksize in ChunkedRange(npart, chunksize):
# Allocate buffers
properties = makeBuffers(actual_chunksize)
# Extract required properties from h5 files
for prop in requiredProps:
group[self._raw_properties_from_short[prop]].read_direct(properties[prop], source_sel=self._np.s_[chunkstart:chunkstop], dest_sel=self._np.s_[:actual_chunksize])
# Calculate the selector
sel = eval(particleSelector[k]) # array of True or False
selectionAtTimeT.append(properties["Id"][sel])
selectionAtTimeT = self._np.concatenate(selectionAtTimeT)
# Combine with selection of previous times
if seltype[k] == "any(": selectedParticles = self._np.union1d(selectedParticles, selectionAtTimeT)
elif seltype[k] == "all(": selectedParticles = self._np.intersect1d(selectedParticles, selectionAtTimeT)
selectedParticles.sort()
return selectedParticles
# Otherwise, the selection can be a list of particle IDs
else:
try:
IDs = self._lastfile["unique_Ids"] # get all available IDs
return self._np.flatnonzero(self._np.in1d(IDs, select)) # find the requested IDs
except:
return
# Method to get info
def _info(self):
info = "Track particles: species '"+self.species+"'"
if self._sort:
info += " containing "+str(self.nParticles)+" particles"
if self.nselectedParticles != self.nParticles:
info += "\n with selection of "+str(self.nselectedParticles)+" particles"
return info
# Read hdf5 dataset faster with unstrusctured list of indices
def _readUnstructuredH5(self, dataset, indices, first_time, last_time=None):
if last_time is None:
last_time = first_time + 1
cs = 1000
if type(indices) is slice or len(indices) < cs:
return dataset[first_time:last_time, indices]
else:
n = len(indices)
result = self._np.empty(( last_time - first_time, n ), dtype=dataset.dtype)
chunksize = min(cs,n)
nchunks = int(n/chunksize)
chunksize = int(n / nchunks)
chunkstop = 0
for ichunk in range(nchunks):
chunkstart = chunkstop
chunkstop = min(chunkstart + chunksize, n)
result[:,chunkstart:chunkstop] = dataset[first_time:last_time, indices[chunkstart:chunkstop]]
return result
# get all available tracked species
def getTrackSpecies(self):
for path in self._results_path:
files = self._glob(path+self._os.sep+"TrackParticles*.h5")
species_here = [self._re.search("_(.+).h5",self._os.path.basename(file)).groups()[0] for file in files]
try : species = [ s for s in species if s in species_here ]
except: species = species_here
return species
# get all available timesteps
def getAvailableTimesteps(self):
return self._alltimesteps
# Get a list of disordered files
def _findDisorderedFiles(self):
disorderedfiles = []
for path in self._results_path:
file = path+self._os.sep+"TrackParticlesDisordered_"+self.species+".h5"
if not self._os.path.isfile(file):
self._error += ["Missing TrackParticles file in directory "+path]
return []
disorderedfiles += [file]
return disorderedfiles
# Make the particles ordered by Id in the file, in case they are not
def _orderFiles( self, fileOrdered, chunksize, sort ):
if self._verbose:
print("Ordering particles ... (this could take a while)")
if type(sort) is str:
print(" Selecting particles according to "+sort)
try:
# If ordered file already exists, find out which timestep was done last
latestOrdered = -1
if self._os.path.isfile(fileOrdered):
f0 = self._h5py.File(fileOrdered, "r+")
try: latestOrdered = f0.attrs["latestOrdered"]
except: pass
# otherwise, make new (ordered) file
else:
f0 = self._h5py.File(fileOrdered, "w")
# Open the last file and get the number of particles from each MPI
last_time = self._timesteps[-1]
last_file, _ = self._locationForTime[last_time]
number_of_particles = (last_file["data/"+"%010i/"%last_time+"latest_IDs"][()] % (2**32)).astype('uint32')
if self._verbose: print("Number of particles: "+str(number_of_particles.sum()))
# Calculate the offset that each MPI needs
offset = self._np.cumsum(number_of_particles, dtype='uint64')
total_number_of_particles = offset[-1]
offset = self._np.roll(offset, 1)
offset[0] = 0
# Do the particle selection if requested
selectedIds = None
selectedIndices = self._np.s_[:]
nparticles_to_write = total_number_of_particles
if type(sort) is str:
selectedIds = self._selectParticles( sort, False, chunksize )
nparticles_to_write = len(selectedIds)
# Make datasets if not existing already
size = (len(self._timesteps), nparticles_to_write)
group = last_file["data/"+"%010i/"%last_time+"particles/"+self.species]
for k, name in self._short_properties_from_raw.items():
try : f0.create_dataset(name, size, group[k].dtype, fillvalue=(0 if name=="Id" else self._np.nan))
except: pass
# Loop times and fill arrays
for it, t in enumerate(self._timesteps):
# Skip previously-ordered times
if it<=latestOrdered: continue
if self._verbose: print(" Ordering @ timestep = "+str(t))
f, _ = self._locationForTime[t]
group = f["data/"+"%010i/"%t+"particles/"+self.species]
nparticles = group["id"].size
if nparticles == 0: continue
# If not too many particles, sort all at once
if nparticles_to_write < chunksize and nparticles < chunksize:
# Get the Ids and find where they should be stored in the final file
if selectedIds is None:
locs = (
group["id"][()].astype("uint32") # takes the second hald of id (meaning particle number)
+ offset[ (group["id"][()]>>32).astype("uint32") & 0b111111111111111111111111 ]
-1
)
else:
_,selectedIndices,locs = self._np.intersect1d( group["id"][()], selectedIds, return_indices=True )
# Loop datasets and order them
if len(locs) > 0:
for k, name in self._short_properties_from_raw.items():
if k not in group: continue
ordered = self._np.empty((nparticles_to_write, ), dtype=group[k].dtype)
if k == "id": ordered.fill(0)
else : ordered.fill(self._np.nan)
ordered[locs] = group[k][()][selectedIndices]
f0[name].write_direct(ordered, dest_sel=self._np.s_[it,:])
# If too many particles, sort by chunks
else:
data = {}
for k, name in self._short_properties_from_raw.items():
data[k] = self._np.empty((chunksize,), dtype=self._np.int16 if k == "charge" else self._np.double)
# Loop chunks of the output
for first_o, last_o, npart_o in ChunkedRange(nparticles_to_write, chunksize):
for k, name in self._short_properties_from_raw.items():
if k not in group: continue
if k == "id": data[k].fill(0)
else : data[k].fill(self._np.nan)
# Loop chunks of the input
for first_i, last_i, npart_i in ChunkedRange(nparticles, chunksize):
# Obtain IDs
ID = group["id"][first_i:last_i]
# Extract useful IDs for this output chunk
if selectedIds is None:
loc_in_output = ID.astype("uint32") + offset[ (ID>>32).astype("uint32") & 0b111111111111111111111111 ] - 1
keep = self._np.flatnonzero((loc_in_output >= first_o) * (loc_in_output < last_o))
loc_in_output = loc_in_output[keep] - first_o
else:
_,keep,loc_in_output = self._np.intersect1d( ID, selectedIds[first_o:last_o], return_indices=True )
# Fill datasets with this chunk
for k, name in self._short_properties_from_raw.items():
if k not in group: continue
data[k][loc_in_output] = group[k][first_i:last_i][keep]
# Accumulated data is written out
for k, name in self._short_properties_from_raw.items():
if k not in group: continue
f0[name][it, first_o:last_o] = data[k][:npart_o]
# Indicate that this iteration was succesfully ordered
f0.attrs["latestOrdered"] = it
f0.flush()
if self._verbose: print(" Finalizing the ordering process")
# Create the "Times" dataset
f0.create_dataset("Times", data=self._timesteps)
# Create the "unique_Ids" dataset
if selectedIds is None:
unique_Ids = self._np.empty((nparticles_to_write,), dtype=f0["Id"].dtype)
for iMPI in range(number_of_particles.size):
for first, last, npart in ChunkedRange(number_of_particles[iMPI], chunksize):
o = int(offset[iMPI])
unique_Ids[o+first : o+last] = ((iMPI<<32) + 1) + self._np.arange(first,last,dtype='uint64')
f0.create_dataset("unique_Ids", data=unique_Ids)
else:
f0.create_dataset("unique_Ids", data=selectedIds)
# Indicate that the ordering is finished
f0.attrs["finished_ordering"] = True
# Close file
f0.close()
except Exception as e:
print("Error in the ordering of the tracked particles")
if self._verbose:
print(e)
raise
finally:
# Close disordered files
for t in self._locationForTime:
self._locationForTime[t][0].close()
if self._verbose: print("Ordering succeeded")
# Method to generate the raw data (only done once)
def _generateRawData(self, times=None):
if not self._validate(): return
self._prepare1() # prepare the vfactor
if self._sort:
if self._rawData is None:
self._rawData = {}
first_time = self._locationForTime[self._timesteps[0]]
last_time = self._locationForTime[self._timesteps[-1]] + 1
if self._verbose: print("Loading data ...")
# fill up the data
ID = self._readUnstructuredH5(self._h5items["Id"], self.selectedParticles, first_time, last_time)
deadParticles = (ID==0).nonzero()
for axis in self.axes:
if self._verbose: print(" axis: "+axis)
if axis == "Id":
self._rawData[axis] = ID
else:
if axis=="moving_x":
data = self._readUnstructuredH5(self._h5items["x"], self.selectedParticles, first_time, last_time)
for it, time in enumerate(self._timesteps):
data[it,:] -= self._XmovedForTime[time]
else:
data = self._readUnstructuredH5(self._h5items[axis], self.selectedParticles, first_time, last_time)
data[deadParticles] = self._np.nan
self._rawData[axis] = data
if self._verbose: print("Process broken lines ...")
# Add the lineBreaks array which indicates where lines are broken (e.g. loop around the box)
self._rawData['brokenLine'] = self._np.zeros((self.nselectedParticles,), dtype=bool)
self._rawData['lineBreaks'] = {}
if self._timesteps.size > 1:
dt = self._np.diff(self._timesteps)*self.timestep
for axis in ["x","y","z"]:
if axis in self.axes:
dudt = self._np.diff(self._rawData[axis],axis=0)
for i in range(dudt.shape[1]): dudt[:,i] /= dt
dudt[~self._np.isnan(dudt)] = 0. # NaNs already break lines
# Line is broken if velocity > c
self._rawData['brokenLine'] += self._np.abs(dudt).max(axis=0) > 1.
broken_particles = self._np.flatnonzero(self._rawData['brokenLine'])
for broken_particle in broken_particles:
broken_times = list(self._np.flatnonzero(self._np.abs(dudt[:,broken_particle]) > 1.)+1)
if broken_particle in self._rawData['lineBreaks'].keys():
self._rawData['lineBreaks'][broken_particle] += broken_times
else:
self._rawData['lineBreaks'][broken_particle] = broken_times
# Add the times array
self._rawData["times"] = self._timesteps
if self._verbose: print("... done")
# If not sorted, get different kind of data
else:
if self._rawData is None:
self._rawData = {}
if self._verbose: print("Loading data ...")
properties = dict(self._raw_properties_from_short, moving_x="position/x")
if times is None: times = self._timesteps
for time in times:
if time in self._rawData: continue
[f, timeIndex] = self._locationForTime[time]
group = f["data/"+"%010i"%time+"/particles/"+self.species]
self._rawData[time] = {}
for axis in self.axes:
self._rawData[time][axis] = group[properties[axis]][()]
if "moving_x" in self.axes:
self._rawData[time]["moving_x"] -= self._XmovedForTime[time]
if self._verbose: print("... done")
# We override the get and getData methods
def getData(self, timestep=None):
if not self._validate(): return
self._prepare1() # prepare the vfactor
if timestep is None:
ts = self._timesteps
elif timestep not in self._timesteps:
print("ERROR: timestep "+str(timestep)+" not available")
return {}
else:
ts = [timestep]
indexOfRequestedTime = self._np.where(self._timesteps==timestep)
if len(ts)==1 and not self._sort:
self._generateRawData(ts)
else:
self._generateRawData()
data = {}
data.update({ "times":ts })
if self._sort:
for axis, factor in zip(self.axes, self._factors):
if timestep is None:
data[axis] = self._rawData[axis]
else:
data[axis] = self._rawData[axis][indexOfRequestedTime]
data[axis] *= factor
else:
for t in ts:
data[t] = {}
for axis, factor in zip(self.axes, self._factors):
data[t][axis] = self._rawData[t][axis] * factor
return data
def get(self):
return self.getData()
# Iterator on UNSORTED particles for a given timestep
def iterParticles(self, timestep, chunksize=1):
if not self._validate(): return
self._prepare1() # prepare the vfactor
if timestep not in self._timesteps:
print("ERROR: timestep "+str(timestep)+" not available")
return
properties = self._raw_properties_from_short + {"moving_x":"x"}
disorderedfiles = self._findDisorderedFiles()
for file in disorderedfiles:
f = self._h5py.File(file, "r")
# This is the timestep for which we want to produce an iterator
try:
group = f["data/"+("%010d"%timestep)+"/particles/"+self.species]
except:
f.close()
continue
npart = group["id"].size
ID = self._np.empty((chunksize,), dtype=self._np.uint64)
data_double = self._np.empty((chunksize,), dtype=self._np.double)
data_int16 = self._np.empty((chunksize,), dtype=self._np.int16 )
for chunkstart in range(0, npart, chunksize):
chunkend = chunkstart + chunksize
if chunkend > npart:
chunkend = npart
ID = self._np.empty((chunkend-chunkstart,), dtype=self._np.uint64)
data_double = self._np.empty((chunkend-chunkstart,), dtype=self._np.double)
data_int16 = self._np.empty((chunkend-chunkstart,), dtype=self._np.int16 )
data = {}
for axis in self.axes:
if axis == "Id":
group[properties[axis]].read_direct(ID, source_sel=self._np.s_[chunkstart:chunkend])
data[axis] = ID.copy()
elif axis == "q":
group[properties[axis]].read_direct(data_int16, source_sel=self._np.s_[chunkstart:chunkend])
data[axis] = data_int16.copy()
elif axis == "moving_x":
group[properties["x"]].read_direct(data_double, source_sel=self._np.s_[chunkstart:chunkend])
data[axis] = data_double.copy()
else:
group[properties[axis]].read_direct(data_double, source_sel=self._np.s_[chunkstart:chunkend])
data[axis] = data_double.copy()
yield data
f.close()
# We override _prepare3
def _prepare3(self):
if not self._sort:
print("Cannot plot non-sorted data")
return False
if self._tmpdata is None:
A = self.getData()
self._tmpdata = []
for axis in self.axes: self._tmpdata.append( A[axis] )
return True
# We override the plotting methods
def _animateOnAxes_0D(self, ax, t, cax_id=0):
pass
def _animateOnAxes_1D(self, ax, t, cax_id=0):
timeSelection = (self._timesteps<=t)*(self._timesteps>=t-self.length)
times = self._timesteps[timeSelection]
A = self._tmpdata[0][timeSelection,:]
if times.size == 1:
times = self._np.double([times, times]).squeeze()
A = self._np.double([A, A]).squeeze()
try : ax.set_prop_cycle (None)
except: ax.set_color_cycle(None)
self._plot = ax.plot(self._tfactor*times, self._vfactor*A, **self.options.plot)
ax.set_xlabel(self._tlabel)
ax.set_ylabel(self.axes[0]+" ("+self.units.vname+")")
self._setLimits(ax, xmax=self._tfactor*self._timesteps[-1], ymin=self.options.vmin, ymax=self.options.vmax)
ax.set_title(self._title) # override title
self._setAxesOptions(ax)
return self._plot
def _animateOnAxes_2D(self, ax, t, cax_id=0):
if hasattr(ax, "_lines"):
if self in ax._lines:
for line in ax._lines[self]:
line.remove()
del ax._lines[self]
else:
ax._lines = {}
tmin = t-self.length
tmax = t
timeSelection = (self._timesteps<=tmax)*(self._timesteps>=tmin)
selected_times = self._np.flatnonzero(timeSelection)
itmin = selected_times[0]
itmax = selected_times[-1]
# Plot first the non-broken lines
x = self._tmpdata[0][timeSelection,:][:,~self._rawData["brokenLine"]]
y = self._tmpdata[1][timeSelection,:][:,~self._rawData["brokenLine"]]
try : ax.set_prop_cycle (None)
except: ax.set_color_cycle(None)
ax._lines[self] = ax.plot(self._xfactor*x, self._yfactor*y, **self.options.plot)
# Then plot the broken lines
try : ax.hold("on")
except: pass
for line, breaks in self._rawData['lineBreaks'].items():
x = self._tmpdata[0][:, line]
y = self._tmpdata[1][:, line]
prevline = None
for ibrk in range(len(breaks)):
if breaks[ibrk] <= itmin: continue
iti = itmin
if ibrk>0: iti = max(itmin, breaks[ibrk-1])
itf = min( itmax, breaks[ibrk] )
if prevline:
ax._lines[self] += ax.plot(self._xfactor*x[iti:itf], self._yfactor*y[iti:itf], color=prevline.get_color(), **self.options.plot)
else:
prevline, = ax.plot(self._xfactor*x[iti:itf], self._yfactor*y[iti:itf], **self.options.plot)
ax._lines[self] += [prevline]
if breaks[ibrk] > itmax: break
try : ax.hold("off")
except: pass
# Add labels and options
ax.set_xlabel(self._xlabel)
ax.set_ylabel(self._ylabel)
self._setLimits(ax, xmin=self.options.xmin, xmax=self.options.xmax, ymin=self.options.ymin, ymax=self.options.ymax)
self._setTitle(ax, t)
self._setAxesOptions(ax)
return 1
_plotOnAxes_0D = _animateOnAxes_0D
_plotOnAxes_1D = _animateOnAxes_1D
_plotOnAxes_2D = _animateOnAxes_2D
# Convert data to VTK format
def toVTK(self, rendering="trajectory", data_format="xml"):
"""
Export the data to Vtk
"""
if not self._validate(): return
if not self._sort:
print("Cannot export non-sorted data")
return
if self._ndim_particles != 3:
print ("Cannot export tracked particles of a "+str(self._ndim_particles)+"D simulation to VTK")
return
# The specified rendering option is checked
if rendering not in ["trajectory","cloud"]:
print ("Rendering of type {} is not valid. It should be `trajectory` or `cloud`.".format(rendering))
return
# The specified data format is checked
if data_format not in ["xml","vtk"]:
print ("Format of type {} is not valid. Should be `xml` or `vtk` ".format(data_format))
return
self._mkdir(self._exportDir)
fileprefix = self._exportDir + self._exportPrefix + "_" + rendering
ntimes = len(self._timesteps)
# Determine the correct file extension according to the given data format
if data_format == "xml":
extension = "vtp"
else:
extension = "vtk"
# Creation of a customed vtk object
vtk = VTKfile()
# Require x, y and z
xaxis = "x"
if "x" not in self.axes:
xaxis = "moving_x"
if xaxis not in self.axes or "y" not in self.axes or "z" not in self.axes:
print("Error exporting tracked particles to VTK: axes 'x', 'y' and 'z' are required")
return
# Cloud mode: each time step is a separated cloud of particles
# If there is only one timestep, the trajectory mode becomes a cloud
if (ntimes == 1)or(rendering == "cloud"):
data = self.getData()
for istep,step in enumerate(self._timesteps):
data_clean_step = {}
# Clean data at istep: remove NaN
mask = self._np.ones(len(data[self.axes[0]][istep]), dtype=bool)
for ax in self.axes:
mask = self._np.logical_and(mask,self._np.logical_not(self._np.isnan(self._np.asarray(data[ax][istep]))))
for ax in self.axes:
#print(ax,data[ax][istep])
data_clean_step[ax] = self._np.asarray(data[ax][istep])[mask]
pcoords_step = self._np.stack((data_clean_step[xaxis],data_clean_step["y"],data_clean_step["z"])).transpose()
pcoords_step = self._np.ascontiguousarray(pcoords_step, dtype='float32')
# Convert pcoords that is a numpy array into vtkFloatArray
pcoords_step = vtk.Array(pcoords_step, "")
# List of scalar arrays
attributes = []
for ax in self.axes:
if ax not in ["x", "y", "z", "moving_x", "Id"]:
attributes += [vtk.Array(self._np.ascontiguousarray(data_clean_step[ax].flatten(),'float32'),ax)]
# Integer arrays
elif ax == "Id":
attributes += [vtk.Array(self._np.ascontiguousarray(data_clean_step[ax].flatten(),'int32'),ax)]
vtk.WriteCloud(pcoords_step, attributes, data_format, fileprefix+"_{:06d}.{}".format(step,extension))
print("Exportation of {}_{:06d}.{}".format(fileprefix,step,extension))
print("Successfully exported tracked particles to VTK, folder='"+self._exportDir)
# Trajectory mode
elif (rendering == "trajectory"):
data = self.getData()
pcoords = self._np.stack((data[xaxis],data["y"],data["z"])).transpose()
npoints, nt, nd = pcoords.shape
pcoords = self._np.reshape(pcoords, (npoints*nt, nd))
pcoords = self._np.ascontiguousarray(pcoords, dtype='float32')
# Convert pcoords that is a numpy array into vtkFloatArray
pcoords = vtk.Array(pcoords, "")
# Segments between points to describe the trajectories
connectivity = self._np.ascontiguousarray([[nt]+[nt*i+j for j in range(nt)] for i in range(npoints)])
# List of scalar arrays
attributes = []
for ax in self.axes:
if ax not in ["x", "y", "z", "moving_x", "Id"]:
attributes += [vtk.Array(self._np.ascontiguousarray(data[ax].flatten(),'float32'),ax)]
# Integer arrays
elif ax == "Id":
attributes += [vtk.Array(self._np.ascontiguousarray(data[ax].flatten(),'int32'),ax)]
vtk.WriteLines(pcoords, connectivity, attributes, data_format, fileprefix+".{}".format(extension))
print("Successfully exported tracked particles to VTK, folder='"+self._exportDir)
| [
"iustin.ouatu@physics.ox.ac.uk"
] | iustin.ouatu@physics.ox.ac.uk |
4073b08cca6ec10ec17ae2db1966b77ed9d5eda5 | 4865fa76e89edae1e4be27e92c33b079fa3a3c61 | /scripts/getPretrainedEmbedding.py | f8d38572972531d391d2974d9f3f73ec029e8e88 | [] | no_license | JoshBone/Classification | 1c804684d123d102f509fd6fc643322d72e40a73 | 2ff41fd3a5b0b60a53e91af633d69084d8e73dad | refs/heads/master | 2020-03-10T07:07:01.818737 | 2018-01-24T09:47:02 | 2018-01-24T09:47:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | import gensim.models.keyedvectors as w2v_model
import numpy as np
PATH = 'Word2Vec/GoogleNews-vectors-negative300.bin'
def getPretrainedEmbedding(vocab):
word2vec = w2v_model.KeyedVectors.load_word2vec_format(PATH, binary=True)
vocabulary = {key:value.index for key, value in word2vec.vocab.iteritems() if key.islower() and '_' not in key}
vocabIntersection = list(set(vocab.keys()).intersection(vocabulary.keys()))
initW = np.random.uniform(-0.25, 0.25, (len(vocab), 300))
for word in vocabIntersection:
idx = vocab.get(word)
initW[idx] = word2vec.word_vec(word)
return initW
if __name__=='__main__':
getPretrainedEmbedding()
| [
"NatalieWidmann@gmx.de"
] | NatalieWidmann@gmx.de |
ff2bef3529eb867a04d7c3be9cab113833f2803a | 663d5dc88b1b07599fd79665c45cf2831bdcf47f | /sol/solution (5)/main.py | d0862ba5cfafba0172a963a08b97b964a8d4a4a6 | [] | no_license | l-arkadiy-l/some-examples | c5392ba1cf5cf6afbf8f887a39c8be3801595edc | 616813129177724b12a910d8abe65119b085a8a1 | refs/heads/main | 2023-02-01T21:16:34.235364 | 2020-12-18T15:10:46 | 2020-12-18T15:10:46 | 322,626,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | import pygame
class GameOver(pygame.sprite.Sprite):
def __init__(self, pos, image, *group):
super(GameOver, self).__init__(*group)
self.image = pygame.image.load('data/{}'.format(image))
self.rect = self.image.get_rect()
self.pos = pygame.Vector2(pos)
self.speed = 1
def run(self):
self.pos.x += self.speed
pygame.init()
pygame.display.set_caption("Test")
screen = pygame.display.set_mode([600, 300])
screen.fill(pygame.Color('blue'))
gameover = GameOver((-600, 0), 'gameover.png')
running = True
Clock = pygame.time.Clock()
while running:
for event in pygame.event.get():
# при закрытии окна
if event.type == pygame.QUIT:
running = False
if gameover.pos.x < 0:
gameover.run()
screen.fill(pygame.Color('blue'))
screen.blit(gameover.image, (gameover.pos[0], gameover.pos[-1]))
pygame.display.flip()
Clock.tick(200)
| [
"noreply@github.com"
] | l-arkadiy-l.noreply@github.com |
3beb054b0e5a7e2a67d9567a58817ddd860701de | 32a81b88286597eaa1e90ca9e970955fa36bef87 | /src/npz2data.py | d786668e192afa0c4d0cbff77c1b6e7e3fd5f7ba | [] | no_license | DavidHux/ial | dfcc95673995f0428a264c406e83f106cd4890a4 | 912419fd2ba945bb42f358dd6ac282d020b34600 | refs/heads/master | 2022-07-31T21:29:15.695847 | 2020-05-23T09:22:55 | 2020-05-23T09:22:55 | 266,303,078 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,997 | py |
from u import Preprocess
import numpy as np
import os
dataset = 'citeseer'
outfile = 'out'
ff = '/Users/davidhu/Desktop/res_citeseer/ial_res_{}{}.npz'
of = '/Users/davidhu/Desktop/res_citeseer/'
ff = ''
of = ''
suffix = ['_initadj', '_finaladj']
def savedata(adj, feature, label, outfilename):
edgefile = of + '{}.edgelist'.format(outfilename)
featurefile = of + '{}.feature'.format(outfilename)
labelfile = of + '{}.label'.format(outfilename)
with open(edgefile, 'w') as ef, open(featurefile, 'w') as ff, open(labelfile, 'w') as lf:
farray = feature.toarray()
print('len farray: {}'.format(len(farray)))
for i in range(len(farray)):
ff.write('{} {}\n'.format(i, ' '.join(map(str, farray[i]))))
lf.write('{} {}\n'.format(i, label[i]))
t = adj.nonzero()
data = adj.data
rows = t[0]
cols = t[1]
for i in range(len(data)):
if rows[i] > cols[i]:
continue
ef.write('{} {}\n'.format(rows[i], cols[i]))
dirprefix = '/Users/davidhu/Desktop/npz/cora-default-0.25-0.5.npz/'
percents = ['0.25', '0.5', '0.75', '1)']
indir = '/root/hux/npz/'
# indir = dirprefix
outdir = '/root/hux/data/'
dataset = 'cora'
if __name__ == "__main__":
dirs = os.listdir(indir)
for p in percents:
count = 0
for d in dirs:
print(d)
a = d.find(p)
if a == -1:
continue
ff = outdir+dataset
if d.find('init') != -1:
ff += 'init-{}+{}'.format(p, count)
else:
ff += 'final-{}+{}'.format(p, count)
print(ff)
count += 1
adj, feature, label = Preprocess.loaddata(indir+d)
adj = adj.tolil()
for j in range(feature.shape[0]):
if not adj[j].nonzero():
adj[j, j] = 1
savedata(adj.tocsr(), feature, label, ff)
| [
"hux0713@gmail.com"
] | hux0713@gmail.com |
84fa33024fee869b44313aef66cd0027c98d725f | b67698b8540887845e1cb1f04ace8e15c5df05f2 | /mhap-1.6/data/generateRefGenome.py | b7b887a8809cae1e3eaf164dc2204f22d2cfd7e3 | [] | no_license | Pandaman-Ryan/solid_kmer_MinHash | 37b58b45ae4ac2415f185a9bb4975c179543d2d9 | f7522a4dc8708c56012da2e9d5eb0d6d5589eb66 | refs/heads/master | 2021-01-25T06:24:16.741562 | 2019-05-01T22:38:02 | 2019-05-01T22:38:02 | 93,563,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,498 | py | '''
Generate Reference Genome
@ An Zheng
This python code can read the genome sequence and cut a given-length segment from it.
The segment contains no "N"
'''
# module
import sys
import time
import os
import random
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
def main():
genomePath = "../../../data/chromFa/hg19.fa"
selectedChromesome = "chr21"
segmentLength = 1000000
savePath = "test_sample.fa"
'''
readGenome(genomePath, selectedChromesome, segmentLength, savePath)
'''
randomGenerator(segmentLength, savePath)
print ("Sampling complete!")
def randomGenerator(segmentLength, savePath):
genome = ""
for index in range(segmentLength):
nuc = random.choice(["A","T","C","G"])
genome += nuc
saveGenomeInFile(genome, savePath)
def readGenome(genomePath, selectedChromesome, segmentLength, savePath):
genomeDict = {}
for seq in SeqIO.parse(genomePath, "fasta"):
chromID = seq.id
genomeDict[chromID] = seq
seqToSave = str((genomeDict[selectedChromesome]).seq)
seqToSave_upper = seqToSave.upper()
while True:
startPos = random.randint(0, len(seqToSave_upper)-segmentLength)
genome = seqToSave_upper[startPos: startPos+segmentLength]
if "N" not in genome:
break
saveGenomeInFile(genome, savePath)
def saveGenomeInFile(genome, savePath):
recordToSave = SeqRecord(Seq(genome), id = "chr", name = "chr", description = "chr")
ofile = open(savePath, 'w')
SeqIO.write(recordToSave, ofile, "fasta")
ofile.close()
main()
| [
"ryan0225@foxmail.com"
] | ryan0225@foxmail.com |
f8bb59e5c6321593c70546da67bd12de64d68797 | c8e361b109422cb890ea7741579484ae33a6c49a | /14 - bucles - continue, pass y else.py | a0cec66ad52f13f3ff94264f788df9aa20b623f5 | [] | no_license | FranLopezFreelance/sintaxis-python | d2a17fc108014273f89e73006dfea373ecec6a34 | 9e8934cded11f7e173e603842cb2b3f70243bd30 | refs/heads/master | 2020-09-28T05:29:38.831314 | 2020-01-11T15:20:48 | 2020-01-11T15:20:48 | 226,700,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | for letra in "Python":
if letra =="h":
continue #saltea las siguientes instrucciones del bucle
print("Letra: " + letra)
# Pass se suele usar para declarar una clase e implementarla más adelante. Ej_
# Class MiClase:
#pass
email = input("Ingrese un Email: ")
for i in email:
if i =="@":
arroba = True
break;
else: # este else forma parte del bucle for (podría ser de un while)
# se ejecuta cuando el bucle haya completado todas sus vueltas
arroba = False
print(arroba)
| [
"franlopez.freelance@gmail.com"
] | franlopez.freelance@gmail.com |
fe94393a2370f27c306b93aaf8e0dc2795519bb5 | 3cc6e1fa0015eb7b57f898299a397877336643bd | /Crawler/spiders/NetEaseCommentSpider.py | 7dfbc79b7ee7301b631a1d0b5f4555c7841c221e | [] | no_license | HeHeManuu/NewsSpider | 549c2e38fa71658f8303cf9412204d9f293f221c | 73843ee2589c36f3a3d852f64dc6a9fbe5115d1d | refs/heads/master | 2021-01-20T10:32:48.502123 | 2018-06-07T09:48:14 | 2018-06-07T09:48:14 | 71,707,845 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,012 | py | from scrapy.spiders import CrawlSpider
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
from Crawler.items import CommentItem
from Crawler.settings import *
import re
import requests
import json
def get_163_allow_url():
"""
获得允许的url匹配,通过日期匹配
:return:
"""
start_time = NOW - datetime.timedelta(END_DAY)
allow_url = list()
if start_time.year == NOW.year:
if start_time.month == NOW.month:
for x in range(start_time.day, NOW.day + 1):
string = str(start_time.strftime('%m')) + (str(x) if x >= 10 else '0' + str(x))
allow_url.append('.*?/%s/%s/.*?' % (str(start_time.year)[2:], string))
else:
for x in range(start_time.month, NOW.month + 1):
allow_url.append(
".*?/%s/%s\d+.*?" % (str(start_time.year)[2:], (str(x) if x >= 10 else '0' + str(x))))
else:
for x in range(start_time.year, NOW.year + 1):
allow_url.append(".*?/%s/\d+/.*?" % str(x)[2:])
return allow_url
class NetEaseCommentSpider(CrawlSpider):
name = "wyxw_comments"
allowed_domains = ["news.163.com", "sports.163.com", "money.163.com", 'edu.163.com', "tech.163.com", "war.163.com"]
start_urls = [
'http://news.163.com',
'http://news.163.com/special/0001386F/rank_news.html',
"http://money.163.com/",
"http://sports.163.com",
"http://tech.163.com/",
"http://edu.163.com/",
"http://war.163.com/"
]
deny_urls = [
r'.*?news.163.com.*?/\d{2}/\d{4}/.*?',
r'.*?.photo.*?',
r'.*?.video.*?',
r'.*?.picstory.*?',
r'.*?reviews.*?'
]
deny_domain = [
'comment.news.163.com',
'caozhi.news.163.com',
'zajia.news.163.com',
'v.news.163.com',
'd.news.163.com'
]
rules = (
Rule(LinkExtractor(allow=".*?news.163.com.*?", deny=deny_urls, deny_domains=deny_domain), follow=True),
Rule(LinkExtractor(allow=get_163_allow_url(), deny_domains=deny_domain), callback="parse_item", follow=True)
)
def parse_item(self, response):
url = response.request.url
if re.match(r'.*?163.com.*?/\d+/\d+/.*?', url):
comment = self.get_comments(url)
if comment:
yield comment
def get_comments(self, news_url):
s1 = 'http://comment.news.163.com/api/v1/products/a2869674571f77b5a0867c3d71db5856/threads/'
s2 = '/comments/newList?offset='
s3 = '&limit='
news_id = news_url.split('/')[-1].split('.')[0]
s = s1 + news_id + s2
all_comments = []
sess = requests.Session()
sess.headers.update({'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'})
offset, limit = 0, 40
while offset < limit:
url = s + str(offset) + s3 + str(40)
res = sess.get(url=url).text
if res is None:
break
data = json.loads(res)
if offset == 0:
limit = data['newListSize']
for k, v in data['comments'].items():
per_comment = dict()
per_comment['against'] = v['against']
per_comment['vote'] = v['vote']
per_comment['time'] = v['createTime']
per_comment['content'] = v['content']
per_comment['location'] = v['user']['location']
per_comment['nick'] = '' if 'nickname' not in v['user'] else v['user']['nickname']
all_comments.append(per_comment)
offset += 40
if len(all_comments) == 0 or limit == 0:
return None
comment_item = CommentItem(
url=news_url,
sitename='NetEase',
num_comments=len(all_comments),
comments=all_comments
)
return comment_item
| [
"hehemanuu@qq.com"
] | hehemanuu@qq.com |
374146a75aa92da31772ba6f74549016b3f0ca6d | 9730727495a39113d7f954532c2cfea73baf5327 | /app/src/server.py | b7e14b98bbd2292c8a4048444251f9dfbb273063 | [] | no_license | tszyrowski/l-docker-sim | b5ec5e94daee7549ff481f6f850d3110c0ea10d8 | 7f8b313d1924542e07121d696c9caf47947d364e | refs/heads/main | 2023-06-09T22:05:46.132322 | 2021-06-17T19:45:57 | 2021-06-17T19:45:57 | 377,598,935 | 0 | 0 | null | 2021-06-17T19:45:58 | 2021-06-16T18:58:39 | HTML | UTF-8 | Python | false | false | 1,739 | py | import json
import os
import flask
import mysql.connector
# for debugging from Visual Studio Code -- turn off flask debugger first
import ptvsd
ptvsd.enable_attach(address=('0.0.0.0', 3000))
class DBManager:
def __init__(
self, database="example", host="db", user="root", password_file=None
) -> None:
pf = open(password_file, "r")
self.connection = mysql.connector.connect(
user=user,
password=pf.read(),
host=host,
database=database,
auth_plugin="mysql_native_password"
)
pf.close()
self.cursor = self.connection.cursor()
def populate_db(self):
self.cursor.execute("DROP TABLE IS EXISTS blog")
self.cursor.execute(
"CREATE TABLE blog (id INT AUTO_INCREMENT PRIMARY KEY, title ARCHAR(255))"
)
self.cursor.executemany(
"INSERT INTO blog (id, table) VALUES (%s, %s);",
[(i, "Blog post #%d" % i) for i in range(1, 5)]
)
self.connection.commit()
def query_tiles(self):
self.cursor.execute("SELECT title FROM blog")
rec = []
for c in self.cursor:
rec.append(c[0])
return rec
server = flask.Flask(__name__)
conn = None
@server.route("/blogs")
def listBlog():
global conn
if not conn:
conn = DBManager(password_file="/run/secrets/db-password")
conn.populate_db()
rec = conn.query_tiles()
result = []
for c in rec:
result.append(c)
return flask.jsonify(result)
@server.route("/")
def hello():
return flask.jsonify("Hello world from dock compose UPDATED deb")
if __name__ == "__main__":
server.run(host="0.0.0.0", port=5000) | [
"tszyrowski@gmail.com"
] | tszyrowski@gmail.com |
9b21a3e8c546f43ea86c322cb275a44fcb90d47e | 914537617a8976d0d4988022f6392287443bd16f | /snailshell_cp/management/cluster_control/__init__.py | 6957b06c05ae6dda0c29542a6f116f5c615a619d | [] | no_license | Flid/SnailShell-master | 1e6fa9e137859aa464142674197efac5e64df0e5 | cd68fd5e75f984c67f9058901c7d67c613fd2869 | refs/heads/master | 2021-10-09T12:09:42.455320 | 2018-12-27T20:56:32 | 2018-12-27T20:56:32 | 44,016,863 | 0 | 0 | null | 2018-06-27T10:22:08 | 2015-10-10T15:48:47 | Python | UTF-8 | Python | false | false | 191 | py | from .provision_master import provision_master_node # noqa: F401
from .utils import generate_local_ssh_key # noqa: F401
from .provision_slave_node import provision_slave_node # noqa: F401
| [
"anton.kirilenko@babylonhealth.com"
] | anton.kirilenko@babylonhealth.com |
2b80441ecd67ef7bb3df342022d0707ac0116b06 | 0be6db54c4d864885ef0fdffa472f5a12fc8f25f | /notebooks/chinese.py | ddde9d19f8dc27a8790cfc403cd7b220c8976ea6 | [] | no_license | luohongliang/handian | 8d8e7293f2a1ace103a55f8e252fcd01ea946dc8 | 9bcdb529876d0b5167822c5dc79421708abd94a1 | refs/heads/master | 2020-04-05T14:39:09.961765 | 2017-07-19T12:33:11 | 2017-07-19T12:33:11 | 94,728,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,720 | py | # -*- coding: utf-8 -*-
import os
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
import platform
#updated to use pymmseg function calls instead of plain mmseg
chinese_punctuation = [
u'\xb7',
u'\u203b',
u'\u25a1',
u'\u25c7',
u'\u25cb',
u'\u25ce',
u'\u25cf',
u'\u3016',
u'\u3017',
u'\u25a1',
u'\uff3b',
u'\u2013',
u'\u2014',
u'\u2018',
u'\u2019',
u'\u201C',
u'\u201D',
u'\u2026',
u'\u3000',
u'\u3001',
u'\u3002',
u'\u3008',
u'\u3009',
u'\u300A',
u'\u300B',
u'\u300C',
u'\u300D',
u'\u300E',
u'\u300F',
u'\u3010',
u'\u3011',
u'\u3014',
u'\u3015',
u'\uFE50',
u'\uFF01',
u'\uFF08',
u'\uFF09',
u'\uFF0C',
u'\uFF0D',
u'\uFF0E',
u'\uFF10',
u'\uFF11',
u'\uFF12',
u'\uFF13',
u'\uFF14',
u'\uFF15',
u'\uFF16',
u'\uFF17',
u'\uFF18',
u'\uFF19',
u'\uFF1A',
u'\uFF1B',
u'\uFF1F',
u'\uFF3B',
u'\uFF3C',
u'\uFF3D',
u'\u250B']
import string
for a in string.lowercase[:],string.uppercase[:],range(0,10):
for b in a:
chinese_punctuation.append(str(b).decode('utf-8'))
for n in range(32,90):
chinese_punctuation.append(("\uff"+format(n,"x")).decode('unicode-escape').decode('utf-8'))
print chinese_punctuation
if platform.system() == 'Windows':
raise NotImplementedError("mmseg Chinese language parser not implemented for Windows systems.")
else:
import mmseg
import os.path
TOKENIZER = None
def reset_mmseg():
global TOKENIZER
global mmseg
TOKENIZER = None
reload(mmseg)
import mmseg
def ancient_chinese_tokenizer(raw_text):
global TOKENIZER
if TOKENIZER is not 'Ancient':
# reload mmseg to re-init
reset_mmseg()
# directory of ancient dictionary
dirname = os.path.dirname(__file__)
dictionary = os.path.join(dirname, 'ancient words.dic')
# mmseg.dict_load_defaults()
mmseg.Dictionary.load_words(dictionary)
TOKENIZER = 'Ancient'
# process text
tokenizer = mmseg.Algorithm(raw_text.encode('utf-8-sig'))
tokens = []
for token in tokenizer:
token = token.text.decode('utf-8-sig', errors='replace').replace(u'\x00', '')
if token:
#if token not in chinese_punctuation:
if set(token)&set(chinese_punctuation) == set([]):
tokens.append(token)
return tokens
def modern_chinese_tokenizer(raw_text):
global TOKENIZER
if TOKENIZER is not 'Modern':
# reload mmseg to re-init
reset_mmseg()
#directory of modern dictionary
dirname = os.path.dirname(__file__)
dictionary = os.path.join(dirname, 'modern words.dic')
mmseg.dict_load_defaults()
mmseg.Dictionary.load_words(dictionary)
TOKENIZER = 'Modern'
# process text
#print raw_text.encode('utf-8')
tokenizer = mmseg.Algorithm(raw_text.encode('utf-8-sig'))
tokens = []
for token in tokenizer:
token = token.text.decode('utf-8-sig', errors='replace').replace(u'\x00', '')
if token:
if set(token)&set(chinese_punctuation) == set([]):
tokens.append(token)
return tokens
| [
"369774822@qq.com"
] | 369774822@qq.com |
95a35ab463caa9cccd718988e50c752c716a4f8a | 5797cce1f63aaecf76cf978930cd91eaf77c6eb9 | /handlers/http_errors.py | 0f2b8c9188fa20f501e8e31b8d24e8c3840aa840 | [] | no_license | elindell/cheeseandcheat | 450dbdb83e6d564b37b6fc43c1ce4bfbbfb22f21 | 949095b0204d6619c7e328b692242a110d20cdff | refs/heads/master | 2016-09-10T10:44:26.447626 | 2014-06-30T05:44:33 | 2014-06-30T05:44:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | import webapp2
import jinja2
import config
jinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(config.TEMPLATE_DIR))
def handle_404(request, response, exception):
template = jinja_environment.get_template('404.html')
response.out.write(template.render())
response.set_status(404)
def handle_500(request, response, exception):
template = jinja_environment.get_template('500.html')
response.out.write(template.render())
response.set_status(500)
def add_handlers(app):
app.error_handlers[404] = handle_404
app.error_handlers[500] = handle_500 | [
"evan.lindell@gmail.com"
] | evan.lindell@gmail.com |
4e1b2edf8e1b449f05062ca16e1fe4c68e92bb31 | 5520bddd95875aefb79b013858e5ec2f42c040d7 | /utilities/distance.py | 8d573ab5454fbe22fab854bcb4e720e9c6cbebf8 | [] | no_license | Kunal-Varma/Social-distance-analysis | 3b8c35b80b55ae1e64fd7c83f705330a9c809c6d | b154dda771dd0f162fc26835e51f23151d0c5899 | refs/heads/master | 2022-12-02T12:24:27.418413 | 2020-08-13T10:34:25 | 2020-08-13T10:34:25 | 287,249,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,630 | py |
from utilities.draw_bbox_cv2 import show_close_persons
from utilities.draw_bbox_cv2 import plot_close_lines
from utilities.draw_bbox_cv2 import draw_rectangle
def euclidean_distance(p1, p2):
return ((p2[0]-p1[0])**2+ (p2[1]-p1[1])**2)**0.5
def calculate_distance(image_np, bbox_cords):
close_persons = []
for i in range(len(bbox_cords)):
person1 = bbox_cords[i]
left_1, right_1, top_1, bottom_1, scores_1 = person1
p1_centroid = (left_1 + right_1) // 2, (top_1 + bottom_1) // 2
start_p1 = (int(left_1), int(top_1))
end_p1 = (int(right_1), int(bottom_1))
draw_rectangle(image_np, start_p1, end_p1, (0, 230, 0), scores_1, 3)
for j in range( len(bbox_cords)):
if i != j:
person2 = bbox_cords[j]
left_2, right_2, top_2, bottom_2, scores_2 = person2
p2_centroid = (left_2 + right_2) // 2, (top_2 + bottom_2) // 2
# Calculating pixel wise Distance between two persons
dist = (euclidean_distance(p1_centroid, p2_centroid))
if dist <= 160:
close_persons.append((p1_centroid, p2_centroid))
start_p1 = (int(left_1), int(top_1))
end_p1 = (int(right_1), int(bottom_1))
draw_rectangle(image_np, start_p1, end_p1, (255, 0, 0), scores_1, 3)
# #
# start_p2 = (int(left_2), int(top_2))
# end_p2 = (int(right_2), int(bottom_2))
# draw_rectangle(image_np, start_p2, start_p2, (255, 0, 0), scores_2, 3)
# else:
# start_p1 = (int(left_1), int(top_1))
# end_p1 = (int(right_1), int(bottom_1))
# draw_rectangle(image_np, start_p1, end_p1, (0, 255, 0), scores_1, 3)
# start_p2 = (int(left_2), int(top_2))
# end_p2 = (int(right_2), int(bottom_2))
# draw_rectangle(image_np, start_p2, end_p2, (0, 255, 0), scores_1, 3)
return close_persons
def calc_dist_and_plot_close(image_np, bbox_cords, im_height):
# Stores centroids of Close Persons
close_persons = calculate_distance(image_np, bbox_cords)
# print(close_persons)
# print(set(close_persons))
# People close to each other
close_p = int(len(close_persons)//2)
# to draw Count of close persons in Frame
show_close_persons(image_np, close_p, im_height)
# to draw line connecting the close persons
for p1, p2 in close_persons:
plot_close_lines(p1, p2, image_np)
| [
"noreply@github.com"
] | Kunal-Varma.noreply@github.com |
3be93f7425ba85cd9b0e4b9aa2532c40bf0cf47a | 32b7af2809c88749817d0b1e9c7529b2ce9c9e70 | /plot_freq.py | 42bc8e79aad876d96f12f6212ae1588bb891fdfc | [] | no_license | geogradient/meris_ts | 59f419c3dafe4a6d8db1c20abc13edc282375ed2 | 2e8dfbb98abf5b172a050fb79e33c0aca61bd5a8 | refs/heads/master | 2021-01-20T01:03:02.917435 | 2014-07-31T16:01:51 | 2014-07-31T16:01:51 | 22,100,407 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,433 | py | #!/usr/bin/env python
__author__ = "Jose M. Beltran <gemtoolbox@gmail.com>"
__version__ = "0.1.0"
import numpy as np
from PyQt4 import QtCore, QtGui, Qt
import PyQt4.Qwt5 as Qwt
class Freq_Curve(Qwt.QwtPlotCurve):
def __init__(self, *args):
super(Freq_Curve, self).__init__(*args)
self.setRenderHint(Qwt.QwtPlotItem.RenderAntialiased)
def setAttributes(self, color, weight, isFilled=False):
c = Qt.QColor(color)
if isFilled:
self.setPen(Qt.QPen(c, weight))
self.setBrush(c)
else:
self.setPen(Qt.QPen(c, weight))
class Freq_Plot(Qwt.QwtPlot):
def __init__(self, parent = None, *args):
super(Freq_Plot, self).__init__(parent, *args)
self.curves = {}
self.data ={}
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sizePolicy().hasHeightForWidth())
self.setSizePolicy(sizePolicy)
self.setMinimumSize(QtCore.QSize(350, 0))
#
#self.setCanvasBackground(GemColor(217, 217, 217, 255))
# set plot default layout
self.plotLayout().setMargin(0)
self.plotLayout().setCanvasMargin(0)
self.plotLayout().setAlignCanvasToScales(True)
# set Legend
'''
legend = Qwt.QwtLegend()
legend.setItemMode(Qwt.QwtLegend.CheckableItem)
self.insertLegend(legend, Qwt.QwtPlot.RightLegend)
'''
# set X Axis
bottomAxis = self.setAxisTitle(Qwt.QwtPlot.xBottom, 'Standard deviation')
#self.setAxisScale(Qwt.QwtPlot.xBottom, 0, 80)
#self.setAxisMaxMajor(Qwt.QwtPlot.xBottom, 36) # set a maximum of 10 Major ticks
#self.setAxisMaxMinor(Qwt.QwtPlot.xBottom, 0) # force zero minor ticks
self.enableAxis(Qwt.QwtPlot.xBottom)
# set Y Axis
self.setAxisTitle(Qwt.QwtPlot.yLeft, 'Count')
#self.setAxisScale(Qwt.QwtPlot.yLeft, 0, 45)
# set Grid
grid = Qwt.QwtPlotGrid()
grid.attach(self)
grid.setPen(Qt.QPen(Qt.Qt.black, 0, Qt.Qt.DotLine))
#
self.replot()
def showCurve(self, item, on):
item.setVisible(on)
self.replot()
| [
"beltran.data@gmail.com"
] | beltran.data@gmail.com |
8e8598e1f3351cced28f92ec247c302f5c9454f6 | 67c21537a0bf3ad3a7375d2e815cc48df1cdf362 | /hungry.py | 4fc03ccb1876a450fc4402ace1e4d52cb690fd88 | [] | no_license | gpoerzgen/test | 0f0a1f69977f8fc6370dc1ad1cb946af97f1ecb8 | cea5262b82adea228352692d447b642d95533e85 | refs/heads/main | 2023-08-13T20:20:35.836332 | 2021-10-04T16:23:14 | 2021-10-04T16:23:14 | 413,343,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | hungry=input("are you hungry?")
if hungry=="yes":
print("eat hamburger")
print("eat fish")
else:
print("do your homework") | [
"guido.poerzgen@gmx.de"
] | guido.poerzgen@gmx.de |
5dd748d3e226a468c90a215fc1bc11c4555b131c | 42f28a3e77c8e252bbc041ce3ecad25e67b85ba8 | /python/w3resource/python-execises/part-I/103.py | e1c5d5af654c8802b54d6b6fb17e77111b8dea5f | [] | no_license | linhnvfpt/homework | f7eae10670df0adc4038b2856be8215d89695010 | 31556dad588b77cafed577671cb56b46cc063406 | refs/heads/master | 2023-01-12T01:20:22.738126 | 2020-06-28T16:28:15 | 2020-06-28T16:28:15 | 60,790,466 | 0 | 0 | null | 2023-01-06T06:21:59 | 2016-06-09T16:41:20 | Python | UTF-8 | Python | false | false | 218 | py | # Write a Python program to extract the filename from a given path
import os
print()
print(os.path.abspath("103.py"))
print(os.path.basename(r"D:\lesson\homework\python\w3resource\python-execises\part-I\103.py"))
| [
"linh_nv@ehicas.com.vn"
] | linh_nv@ehicas.com.vn |
236ca9a7fc63497201167500bd84c06b69ca3832 | 175075b2e11002808d5948c11620dca702db480b | /HackerRank-Security Key Spaces.py | ff3d6bee21ee7aec3dd5d7dd0401e8c358d2024f | [] | no_license | Jeffreyhung/hackerrank | df6790c2062cdca246e5dc1274e250d229b8186a | 1d0b572762466de36226a0341ffa56cd2aea2759 | refs/heads/master | 2020-04-16T16:18:20.408779 | 2019-04-01T19:18:18 | 2019-04-01T19:18:18 | 165,731,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | x = input()
n = input()
data = list(str(x))
ans =""
for i in data:
i = (int(i) +n)%10
ans += str(i)
print ans
| [
"noreply@github.com"
] | Jeffreyhung.noreply@github.com |
8b554aa06725ce452f1b06fce96b1528b4dbff40 | 9e4203d91a21d4d09a600c3791de2f85637d2438 | /aqiSpider.py | 871a224b982720e6bea337b7f2cd5e0485b97dc6 | [] | no_license | lsyiverson/weather-py | b954ff5061e0d9ef8a8ac3502b511da68b528718 | ba37ea53016c511365641940793531d951723a70 | refs/heads/master | 2021-01-11T19:49:26.347582 | 2017-03-16T03:49:15 | 2017-03-16T03:49:15 | 79,405,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,662 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# encoding=utf-8
import urllib2
from lxml import etree
from datetime import datetime, timedelta
import re
from utils.fileutils import mkdir, touch ,append, rmtree
from openpyxl import Workbook
rmtree('./aqiresult')
pattern = re.compile('\s+')
site = 'http://tianqihoubao.com'
homePage = urllib2.urlopen(site + '/aqi/').read().decode('gbk').encode('utf8').replace('<wbr>', '')
homePageHtml = etree.HTML(homePage, parser=etree.HTMLParser(encoding='utf8'))
provinceElementXPath = homePageHtml.xpath('//div[@id="content"]/div[@class="citychk"]/dl')
workbook = Workbook()
cnAQIWorksheet = workbook.active
cnAQIWorksheet.title = 'China City AQI'
cnAQIWorksheet.append(['省份', '城市', '日期', '质量等级', 'AQI指数', '当天AQI排名', 'PM2.5', 'PM10', 'NO2', 'SO2', 'CO', 'O3'])
for provinceIndex, provinceElmTree in enumerate(provinceElementXPath):
# provinceIndex = 2
# provinceElmTree = provinceElementXPath[provinceIndex]
provinceName = provinceElmTree.find('dt/b').text
provincePath = './aqiresult/' + provinceName
mkdir(provincePath)
for cityIndex, cityElm in enumerate(provinceElmTree.findall('dd/a')):
# cityIndex = 0
# cityElm = provinceElmTree.findall('dd/a')[cityIndex]
if (provinceIndex == 0 and cityIndex > 3):
break
cityName = cityElm.text
filepath = provincePath + '/' + cityName.strip() + '.txt'
touch(filepath)
cityPre = re.sub(pattern, '', cityElm.attrib['href'])[:-5]
startMonth = '201501'
for i in range(24):
# i=0
startMonth = datetime.strptime('201501', '%Y%m')
month = startMonth.replace(year=startMonth.year+i/12, month=startMonth.month+i%12)
monthstr = month.strftime('%Y%m')
cityMonthAqiUrl = site + cityPre + '-' + monthstr + '.html'
cityMonthAqiPage = urllib2.urlopen(cityMonthAqiUrl).read().decode('gbk').encode('utf8')
cityMonthAqiPageHtml = etree.HTML(cityMonthAqiPage, parser=etree.HTMLParser(encoding='utf8'))
cityMonthAqiContentXPath = cityMonthAqiPageHtml.xpath('//div[@class="api_month_list"]/table[@class="b"]/tr')
monthlyData = ''
for dailyAqiElmTree in cityMonthAqiContentXPath[1:]:
date = re.sub(pattern, '', dailyAqiElmTree.findall('td')[0].text)
level = re.sub(pattern, '', dailyAqiElmTree.findall('td')[1].text)
aqi = re.sub(pattern, '', dailyAqiElmTree.findall('td')[2].text)
rank = re.sub(pattern, '', dailyAqiElmTree.findall('td')[3].text)
pm25 = re.sub(pattern, '', dailyAqiElmTree.findall('td')[4].text)
pm10 = re.sub(pattern, '', dailyAqiElmTree.findall('td')[5].text)
no2 = re.sub(pattern, '', dailyAqiElmTree.findall('td')[6].text)
so2 = re.sub(pattern, '', dailyAqiElmTree.findall('td')[7].text)
co = re.sub(pattern, '', dailyAqiElmTree.findall('td')[8].text)
o3 = re.sub(pattern, '', dailyAqiElmTree.findall('td')[9].text)
cnAQIWorksheet.append([provinceName, cityName, date, level, aqi, rank, pm25, pm10, no2, so2, co, o3])
format = '%s %s %s %s %s %s %s %s %s %s'
values = (date, level, aqi, rank, pm25, pm10, no2, so2, co, o3)
dailyAqiData = format % values
monthlyData += dailyAqiData + '\n'
append(filepath, monthlyData)
print provinceName + cityName + monthstr + u' 下载成功'
workbook.save('./aqiresult/aqi.xlsx')
print u'导出Excel成功'
| [
"lsyiverson@gmail.com"
] | lsyiverson@gmail.com |
26c2f5e55d19a42e4299bc3c03c1aa8d472539d8 | 38a42a205eaa5a0a46989c95f0b01f7e04b96a9e | /uoft/CSC148H1F Intro to Comp Sci/@week3_stacks/@@Exercise3/stack_ex.py | 25de6d1577c709a79973a271d6b1427ee3ffe857 | [
"MIT"
] | permissive | Reginald-Lee/biji-ben | d24cd1189ca3e9ed7b30e5b20a40137e8d6d4039 | 37009dfdbef9a15c2851bcca2a4e029267e6a02d | refs/heads/master | 2023-05-06T23:06:49.819088 | 2020-06-10T12:07:47 | 2020-06-10T12:07:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,692 | py | # Exercise 3: More Stack Exercises
#
# CSC148 Fall 2014, University of Toronto
# Instructor: David Liu
# ---------------------------------------------
# STUDENT INFORMATION
#
# List your information below, in format
# <full name>, <utorid>
# <Rui Qiu>, <999292509>
# ---------------------------------------------
from stack import Stack, EmptyStackError
class SmallStackError(Exception):
print("The stack has fewer than two elements.")
def reverse_top_two(stack):
""" (Stack) -> NoneType
Reverse the top two elements on stack.
Raise a SmallStackError if stack has fewer than two elements.
>>> stack = Stack()
>>> stack.push(1)
>>> stack.push(2)
>>> reverse_top_two(stack)
>>> stack.pop()
1
>>> stack.pop()
2
"""
try:
stack.is_empty() == False
except:
raise EmptyStackError
else:
try:
t1 = stack.pop()
t2 = stack.pop()
stack.push(t1)
stack.push(t2)
except:
raise SmallStackError
return stack
def reverse(stack):
""" (Stack) -> NoneType
Reverse all the elements of stack.
>>> stack = Stack()
>>> stack.push(1)
>>> stack.push(2)
>>> reverse(stack)
>>> stack.pop()
1
>>> stack.pop()
2
"""
temp = Stack()
temp2 = Stack()
while not stack.is_empty():
stuff = stack.pop()
temp.push(stuff)
while not temp.is_empty():
stuff = temp.pop()
temp2.push(stuff)
while not temp2.is_empty():
stuff = temp2.pop()
stack.push(stuff)
return stack | [
"rexarski@gmail.com"
] | rexarski@gmail.com |
a8d4ea1ab28833bfd43a58cd9b108e03ae0b7c42 | 9d90b664ebbd11a57ee6156c528081551b98055b | /wsgi/local_data/brython_programs/tuple1.py | fb6bc882e1ee285aa89bedf32f13c2ec02f31f08 | [] | no_license | 2014cdag21/c21 | d4f85f91ba446feb6669a39903dda38c21e8b868 | faf4b354f7d1d4abec79c683d7d02055c6bab489 | refs/heads/master | 2020-06-03T17:54:16.144118 | 2014-06-20T09:29:02 | 2014-06-20T09:29:02 | 19,724,479 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | d = (11,12,13,'asdf',14,15.0)
# Note - tuples are immutable types
# Common operations:
# length of a typle
print(len(d))
# indexation (in Python it starts from zero)
print(d[0], d[1])
# slicing
print(d[0:2]) # equals to (11, 12)
print(d[2:-1]) # equals to (13, 'asdf', 14)
print(d[:2]) # same as d[0:2], equals to (11, 12)
print(d[3:]) # equals to ('asdf', 14, 15.0)
# contains
print((15 in d, 100 in d)) # returns (True, False) | [
"chiamingyen@gmail.com"
] | chiamingyen@gmail.com |
ced8b4b80daafb73b1e2876e1dbf19ed04ecd6c9 | 46d25667ceb388b8f247b3b87d96729aa31081a7 | /None_hint.py | c8b8a4cd3a4ebda5daa564f24f76cb4ae3ae91aa | [] | no_license | haroon-rasheed/code_practice | 9cbac426e362907d63fd91ec22c07b527053bd88 | fe9f59b08a1e921ecb2eb405590156ed472436f2 | refs/heads/master | 2021-01-23T13:44:09.128846 | 2014-03-03T06:33:49 | 2014-03-03T06:33:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | #!/usr/bin/env python
lt = []
print "list length", len(lt)
#if (lt is None):
if (len(lt) == 0 ):
print "in NONE"
else:
print "not in none"
| [
"haroon_77_job@yahoo.com"
] | haroon_77_job@yahoo.com |
0e8f422dbaf4ff83f83fc49dc9410897d3314dcd | 7e9daf6a2a3ebfb969e793f92afc0dc5f1c2fc35 | /cat_mouse.py | 940150c58ad59356e7f9220c3b08a3bfc16612a7 | [] | no_license | NARESHSWAMI199/5-Star-On-Hacker-Rank-Python | e43ce5cb3429d2a683c37e6f4ba6440d073d47c2 | 51f245d1d0966de21ddf861b22fe3379e7c8a0a7 | refs/heads/main | 2023-02-25T03:05:25.330205 | 2021-01-19T13:49:27 | 2021-01-19T13:49:27 | 325,296,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | quries_size = int(input())
maxmum = 0
for i in range(quries_size):
query = list(map(int,input().split()))
if query[0] > query[2]:
dist_of_a = query[0] - query[2]
else :
dist_of_a = query[2]- query[0]
if query[1] > query[2]:
dist_of_b = query[1] - query[2]
else :
dist_of_b = query[2]- query[1]
if dist_of_a < dist_of_b:
print("Cat A")
elif dist_of_b < dist_of_a:
print("Cat B")
else :
print("Mouse C")
| [
"swaminaresh993@gmail.com"
] | swaminaresh993@gmail.com |
9884a68d7623d4263a342f9a222eea20af7658f3 | bb87125c8b35cc9ec7de9941be26b48a1483ec93 | /frcstats/migrations/0003_drive.py | c2183b6fb5c95a120aa7b98e80e2afcefdfdd488 | [] | no_license | alicen6/first_robotics | d52fa9d3d3f519d85d599bc004c404d073081f7e | 1bbe29e42cda8aa8e892b734a6dafb41b78207d9 | refs/heads/dev | 2016-08-12T06:10:31.582872 | 2016-03-22T01:03:22 | 2016-03-22T01:03:22 | 51,491,136 | 2 | 2 | null | 2016-02-12T18:19:29 | 2016-02-11T03:12:22 | Python | UTF-8 | Python | false | false | 1,005 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-25 20:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('frcstats', '0002_auto_20160225_1933'),
]
operations = [
migrations.CreateModel(
name='Drive',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('drivetrain', models.CharField(max_length=20)),
('gear_reduc', models.CharField(max_length=20)),
('motors', models.CharField(max_length=20)),
('extra_notes', models.CharField(max_length=120)),
('team_number', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='frcstats.Team')),
],
options={
'db_table': 'robot_info',
},
),
]
| [
"alicen@cobalt5.com"
] | alicen@cobalt5.com |
ddf74b3c82971765112795435b8df5cfc14f9c4f | 3ee5be115fa2fe3fac70155f9c3e2754ec7972d0 | /batman/product/admin.py | c2d45f414d38b1a51928994a3ba86a7c54bbc536 | [] | no_license | ankitjhunjhunwala03/batman | 48341f979a79ebff9e47509565c2d7a76c344166 | b8646cd8c21e3ab18801217984f588f89662a8dd | refs/heads/master | 2016-08-12T04:43:34.612598 | 2016-03-16T15:30:30 | 2016-03-16T15:30:30 | 54,040,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | from django.contrib import admin
from models import Product, Attribute, Category, AttributeValue, Variation, ProductImage
# Register your models here.
admin.site.register(Attribute)
admin.site.register(AttributeValue)
admin.site.register(Category)
admin.site.register(Product)
admin.site.register(Variation)
admin.site.register(ProductImage)
| [
"ankit.jhunjhunwala@teabox.com"
] | ankit.jhunjhunwala@teabox.com |
d4952e4625b9ebd20f0d0deb21cdd0ca66b480cf | faa0ce2a95da958be3bfb171cdff29eeb43c3eb6 | /py-exercises/JulieTestModule/characters/shadow.py | f71a4d7d759a9855c1f3ccbf67630318ea88332d | [] | no_license | julianapeace/digitalcrafts-exercises | 98fe4e20420c47cf9d92d16c45ac60dc35a49a6a | 98e6680138d55c5d093164a47da53e1ddb6d064c | refs/heads/master | 2021-08-30T04:17:09.997205 | 2017-12-16T00:22:22 | 2017-12-16T00:22:22 | 103,176,043 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | from characters.base import Character
class Shadow(Character):
def __init__(self, name = 'Shadow', health = 1, power = 5, armor = 0, evade = 0, coincount = 4):
super().__init__(name, health, power, armor, evade, coincount)
| [
"chancecordelia@gmail.com"
] | chancecordelia@gmail.com |
321dec02300a1bf263288bff2f4a3b9a909adf41 | 99bb97cefece86945c890d1c45b85be25bb648fc | /kids/migrations/0003_user_status.py | cb87421e1ce4e192de354e04f16781d1e59f716c | [
"MIT"
] | permissive | dimple1024/kidszone | 3d74d4f178c2d99b03503e3bc5490979db508090 | dfa8b7c6be2c5dce03803655a078fdf6e9ab7370 | refs/heads/master | 2022-01-15T09:24:22.675632 | 2019-06-15T20:15:12 | 2019-06-15T20:15:12 | 97,975,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-10-25 23:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kids', '0002_auto_20170804_1334'),
]
operations = [
migrations.AddField(
model_name='user',
name='status',
field=models.CharField(default='Kids Zone is Cool', max_length=40),
),
]
| [
"dimple.edcellpup@gmail.com"
] | dimple.edcellpup@gmail.com |
ba595f61c755b754deabe1b2bf4e7e9f70e606a5 | ebd2239e01603fa92bb1eb3dd807aada5856b2fe | /stannon/plotting.py | 2186338b1dfae49a643792687e86283a1e13c9fb | [] | no_license | adrains/plumage | 349f3e2fac42e9d41e673d3bb4e268f70045246b | 9d687b8486c3fae5e6958c7eb68a0bdcddf486e6 | refs/heads/master | 2023-08-18T01:55:10.589791 | 2023-08-15T14:21:16 | 2023-08-15T14:21:16 | 198,985,502 | 3 | 3 | null | 2019-10-11T21:50:58 | 2019-07-26T09:11:03 | Python | UTF-8 | Python | false | false | 35,505 | py | """Plotting functions related to Stannon
"""
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import plumage.plotting as pplt
import matplotlib.ticker as plticker
from collections import OrderedDict
from stannon.vectorizer import PolynomialVectorizer
def plot_label_recovery(
label_values,
e_label_values,
label_pred,
e_label_pred,
obs_join,
abundance_labels=[],
teff_lims=(2800,4500),
logg_lims=(4.4,5.4),
feh_lims=(-1.0,0.75),
elinewidth=0.4,
show_offset=True,
fn_suffix="",
title_text="",
teff_ticks=(500,250,100,50),
logg_ticks=(0.5,0.25,0.2,0.1),
feh_ticks=(0.5,0.25,0.5,0.25),):
"""Plot 1x3 grid of Teff, logg, and [Fe/H] literature comparisons.
Saves as paper/std_comp<fn_suffix>.<pdf/png>.
Parameters
----------
label_values: 2D numpy array
Label array with columns [teff, logg, feh]
label_pred: 2D numpy array
Predicted label array with columns [teff, logg, feh]
teff_lims, feh_lims: float array, default:[3000,4600],[-1.4,0.75]
Axis limits for Teff and [Fe/H] respectively.
show_offset: bool, default: False
Whether to plot the median offset as text.
fn_suffix: string, default: ''
Suffix to append to saved figures
title_text: string, default: ''
Text for fig.suptitle.
"""
plt.close("all")
# Make plot
fig, axes = plt.subplots(1, 3)
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.95, wspace=0.5)
# Temperatures
pplt.plot_std_comp_generic(
fig=fig,
axis=axes[0],
lit=label_values[:,0],
e_lit=e_label_values[:,0],
fit=label_pred[:,0],
e_fit=e_label_pred[:,0],
colour=label_values[:,2],
fit_label=r"$T_{\rm eff}$ (K, Cannon)",
lit_label=r"$T_{\rm eff}$ (K, Literature)",
cb_label="[Fe/H] (Literature)",
x_lims=teff_lims,
y_lims=teff_lims,
cmap="viridis",
show_offset=show_offset,
ticks=teff_ticks,)
# Ensure we only plot logg for stars we haven't given a default value to.
logg_mask = e_label_values[:,2] < 0.2
# Gravity
pplt.plot_std_comp_generic(
fig=fig,
axis=axes[1],
lit=label_values[:,1][logg_mask],
e_lit=e_label_values[:,1][logg_mask],
fit=label_pred[:,1][logg_mask],
e_fit=e_label_pred[:,1][logg_mask],
colour=label_values[:,2][logg_mask],
fit_label=r"$\log g$ (Cannon)",
lit_label=r"$\log g$ (Literature)",
cb_label="[Fe/H] (Literature)",
x_lims=logg_lims,
y_lims=logg_lims,
cmap="viridis",
show_offset=show_offset,
ticks=logg_ticks,)
# Ensure we only plot [Fe/H] for stars we haven't given a default value to.
feh_mask = e_label_values[:,2] < 0.2
# [Fe/H]]
pplt.plot_std_comp_generic(
fig=fig,
axis=axes[2],
lit=label_values[:,2][feh_mask],
e_lit=e_label_values[:,2][feh_mask],
fit=label_pred[:,2][feh_mask],
e_fit=e_label_pred[:,2][feh_mask],
colour=label_values[:,0][feh_mask],
fit_label=r"[Fe/H] (Cannon)",
lit_label=r"[Fe/H] (Literature)",
cb_label=r"$T_{\rm eff}\,$K (Literature)",
x_lims=feh_lims,
y_lims=feh_lims,
cmap="magma",
show_offset=show_offset,
ticks=feh_ticks,)
# Save plot
fig.set_size_inches(12, 3)
fig.tight_layout()
fig.savefig("paper/cannon_param_recovery{}.pdf".format(fn_suffix))
fig.savefig("paper/cannon_param_recovery{}.png".format(fn_suffix), dpi=300)
def plot_label_recovery_per_source(
label_values,
e_label_values,
label_pred,
e_label_pred,
obs_join,
teff_lims=(2800,4500),
logg_lims=(4.4,5.4),
feh_lims=(-1.0,0.75),
elinewidth=0.4,
show_offset=True,
fn_suffix="",
title_text="",
teff_ticks=(500,250,100,50),
logg_ticks=(0.5,0.25,0.2,0.1),
feh_ticks=(0.5,0.25,0.5,0.25),):
"""Plot 1x3 grid of Teff, logg, and [Fe/H] literature comparisons.
Saves as paper/std_comp<fn_suffix>.<pdf/png>.
Parameters
----------
label_values: 2D numpy array
Label array with columns [teff, logg, feh]
label_pred: 2D numpy array
Predicted label array with columns [teff, logg, feh]
teff_lims, feh_lims: float array, default:[3000,4600],[-1.4,0.75]
Axis limits for Teff and [Fe/H] respectively.
show_offset: bool, default: False
Whether to plot the median offset as text.
fn_suffix: string, default: ''
Suffix to append to saved figures
title_text: string, default: ''
Text for fig.suptitle.
"""
plt.close("all")
# Make plot
fig, (ax_teff_int, ax_feh_m15, ax_feh_ra12, ax_feh_cpm) = plt.subplots(1,4)
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.95, wspace=0.5)
# Interferometric temperatures
int_mask = ~np.isnan(obs_join["teff_int"])
pplt.plot_std_comp_generic(
fig=fig,
axis=ax_teff_int,
lit=label_values[:,0][int_mask],
e_lit=e_label_values[:,0][int_mask],
fit=label_pred[:,0][int_mask],
e_fit=e_label_pred[:,0][int_mask],
colour=label_values[:,2][int_mask],
fit_label=r"$T_{\rm eff}$ (K, Cannon)",
lit_label=r"$T_{\rm eff}$ (K, Interferometry)",
cb_label="[Fe/H] (Literature)",
x_lims=teff_lims,
y_lims=teff_lims,
cmap="viridis",
show_offset=show_offset,
ticks=teff_ticks,)
# Mann+15 [Fe/H]
feh_mask = ~np.isnan(obs_join["feh_m15"])
pplt.plot_std_comp_generic(
fig=fig,
axis=ax_feh_m15,
lit=label_values[:,2][feh_mask],
e_lit=e_label_values[:,2][feh_mask],
fit=label_pred[:,2][feh_mask],
e_fit=e_label_pred[:,2][feh_mask],
colour=label_values[:,0][feh_mask],
fit_label=r"[Fe/H] (Cannon)",
lit_label=r"[Fe/H]] (Mann+15)",
cb_label=r"$T_{\rm eff}\,$K (Literature)",
x_lims=feh_lims,
y_lims=feh_lims,
cmap="magma",
show_offset=show_offset,
ticks=feh_ticks,)
# Rojas-Ayala+12 [Fe/H]
feh_mask = ~np.isnan(obs_join["feh_ra12"])
pplt.plot_std_comp_generic(
fig=fig,
axis=ax_feh_ra12,
lit=label_values[:,2][feh_mask],
e_lit=e_label_values[:,2][feh_mask],
fit=label_pred[:,2][feh_mask],
e_fit=e_label_pred[:,2][feh_mask],
colour=label_values[:,0][feh_mask],
fit_label=r"[Fe/H] (Cannon)",
lit_label=r"[Fe/H] (Rojas-Ayala+12)",
cb_label=r"$T_{\rm eff}\,$K (Literature)",
x_lims=feh_lims,
y_lims=feh_lims,
cmap="magma",
show_offset=show_offset,
ticks=feh_ticks,)
# CPM [Fe/H]
feh_mask = obs_join["is_cpm"].values
pplt.plot_std_comp_generic(
fig=fig,
axis=ax_feh_cpm,
lit=label_values[:,2][feh_mask],
e_lit=e_label_values[:,2][feh_mask],
fit=label_pred[:,2][feh_mask],
e_fit=e_label_pred[:,2][feh_mask],
colour=label_values[:,0][feh_mask],
fit_label=r"[Fe/H] (Cannon)",
lit_label=r"[Fe/H] (Binary Primary)",
cb_label=r"$T_{\rm eff}\,$K (Literature)",
x_lims=feh_lims,
y_lims=feh_lims,
cmap="magma",
show_offset=show_offset,
ticks=feh_ticks,)
# Save plot
fig.set_size_inches(16, 3)
fig.tight_layout()
fig.savefig("paper/cannon_param_recovery_ps{}.pdf".format(fn_suffix))
fig.savefig("paper/cannon_param_recovery_ps{}.png".format(fn_suffix), dpi=200)
def plot_label_recovery_abundances(
label_values,
e_label_values,
label_pred,
e_label_pred,
obs_join,
abundance_labels,
feh_lims=(-1.0,0.75),
show_offset=True,
fn_suffix="",
feh_ticks=(0.5,0.25,0.5,0.25),):
"""Plot 1x3 grid of Teff, logg, and [Fe/H] literature comparisons.
Saves as paper/std_comp<fn_suffix>.<pdf/png>.
Parameters
----------
label_values: 2D numpy array
Label array with columns [teff, logg, feh]
label_pred: 2D numpy array
Predicted label array with columns [teff, logg, feh]
teff_lims, feh_lims: float array, default:[3000,4600],[-1.4,0.75]
Axis limits for Teff and [Fe/H] respectively.
show_offset: bool, default: False
Whether to plot the median offset as text.
fn_suffix: string, default: ''
Suffix to append to saved figures
title_text: string, default: ''
Text for fig.suptitle.
"""
plt.close("all")
n_abundances = len(abundance_labels)
if n_abundances == 0:
print("No abundances to plot!")
return
# Make plot
fig, axes = plt.subplots(1, n_abundances)
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.95, wspace=0.5)
if n_abundances == 1:
axes = [axes]
# Plot each abundance
for abundance_i, abundance in enumerate(abundance_labels):
label_i = 3 + abundance_i
abundance_label = "[{}/H]".format(abundance.split("_")[0])
abund_sources = obs_join["label_source_{}".format(abundance)]
abundance_mask = np.array([src != "" for src in abund_sources])
#~np.isnan(obs_join["label_adopt_{}".format(abundance)])
pplt.plot_std_comp_generic(
fig=fig,
axis=axes[abundance_i],
lit=label_values[:,label_i][abundance_mask],
e_lit=e_label_values[:,label_i][abundance_mask],
fit=label_pred[:,label_i][abundance_mask],
e_fit=e_label_pred[:,label_i][abundance_mask],
colour=label_values[:,0][abundance_mask],
fit_label=r"{} (Cannon)".format(abundance_label),
lit_label=r"{} (Literature)".format(abundance_label),
cb_label=r"$T_{\rm eff}\,$K (Literature)",
x_lims=feh_lims,
y_lims=feh_lims,
cmap="magma",
show_offset=show_offset,
ticks=feh_ticks,)
# Save plot
fig.set_size_inches(4*n_abundances, 3)
fig.tight_layout()
fig.savefig(
"paper/cannon_param_recovery_abundance{}.pdf".format(fn_suffix))
fig.savefig(
"paper/cannon_param_recovery_abundance{}.png".format(fn_suffix),
dpi=300)
def plot_cannon_cmd(
benchmark_colour,
benchmark_mag,
benchmark_feh,
science_colour=None,
science_mag=None,
x_label=r"$BP-RP$",
y_label=r"$M_{K_S}$",
highlight_mask=None,
highlight_mask_label="",
bp_rp_cutoff=0,):
"""Plots a colour magnitude diagram using the specified columns and saves
the result as paper/{label}_cmd.pdf. Optionally can plot a second set of
stars for e.g. comparison with standards.
Parameters
----------
info_cat: pandas.DataFrame
Table of stellar literature info.
info_cat_2: pandas.DataFrame, default: None
Table of stellar literature info for second set of stars (e.g.
standards). Optional.
plot_toi_ids: bool, default: False
Plot the TOI IDs on top of the points for diagnostic purposes.
colour: string, default: 'Bp-Rp'
Column name for colour (x) axis of CMD.
abs_mag: string, default: 'G_mag_abs'
Column name for absolute magnitude (y) axis of CMD.
x_label, y_label: string, default: r'$B_P-R_P$', r'$M_{\rm G}$'
Axis labels for X and Y axis respectively.
label: string, default: 'tess'
Label to use in filename, e.g. {label}_cmd.pdf
"""
plt.close("all")
fig, axis = plt.subplots()
# Plot benchmarks
scatter = axis.scatter(
benchmark_colour,
benchmark_mag,
zorder=1,
c=benchmark_feh,
label="Benchmark",
alpha=0.9,
cmap="viridis",
)
cb = fig.colorbar(scatter, ax=axis)
cb.set_label("[Fe/H]")
# Plot science targets, making sure to not plot any science targets beyond
# the extent of our benchmarks
if (science_colour is not None and science_mag is not None
and len(science_colour) > 0 and len(science_mag) > 0):
scatter = axis.scatter(
science_colour[science_colour > bp_rp_cutoff],
science_mag[science_colour > bp_rp_cutoff],
marker="o",
edgecolor="black",#"#ff7f0e",
#facecolors="none",
zorder=2,
alpha=0.6,
label="Science",)
# If we've been given a highlight mask, plot for diagnostic reasons
if highlight_mask is not None:
scatter = axis.scatter(
benchmark_colour[highlight_mask],
benchmark_mag[highlight_mask],
marker="o",
c=benchmark_feh[highlight_mask],
edgecolor="k",
linewidths=1.2,
zorder=1,
label=highlight_mask_label,)
plt.legend(loc="best", fontsize="large")
# Flip magnitude axis
ymin, ymax = axis.get_ylim()
axis.set_ylim((ymax, ymin))
axis.set_xlabel(x_label, fontsize="large")
axis.set_ylabel(y_label, fontsize="large")
axis.tick_params(axis='both', which='major', labelsize="large")
axis.xaxis.set_major_locator(plticker.MultipleLocator(base=0.5))
axis.xaxis.set_minor_locator(plticker.MultipleLocator(base=0.25))
axis.yaxis.set_major_locator(plticker.MultipleLocator(base=1.0))
axis.yaxis.set_minor_locator(plticker.MultipleLocator(base=0.5))
fig.tight_layout()
plt.savefig("paper/cannon_cmd.png", dpi=200)
plt.savefig("paper/cannon_cmd.pdf")
def plot_kiel_diagram(
teffs,
e_teffs,
loggs,
e_loggs,
fehs,
max_teff=4200,
label="",):
"""
"""
plt.close("all")
fig, axis = plt.subplots()
# Mask only those stars within the bounds of our trained Cannon model
mask = teffs < max_teff
# Plot
scatter = axis.scatter(
teffs[mask],
loggs[mask],
zorder=1,
c=fehs[mask],
cmap="viridis",
)
cb = fig.colorbar(scatter, ax=axis)
cb.set_label("[Fe/H]")
axis.errorbar(
x=teffs[mask],
y=loggs[mask],
xerr=e_teffs[mask],
yerr=e_loggs[mask],
zorder=0,
ecolor="black",
elinewidth=0.4,
fmt=".",
)
# Flip axes
ymin, ymax = axis.get_ylim()
axis.set_ylim((ymax, ymin))
xmin, xmax = axis.get_xlim()
axis.set_xlim((xmax, xmin))
axis.set_xlabel(r"$T_{\rm eff}$ (K)", fontsize="large")
axis.set_ylabel(r"$\log g$", fontsize="large")
axis.tick_params(axis='both', which='major', labelsize="large")
axis.xaxis.set_major_locator(plticker.MultipleLocator(base=200))
axis.xaxis.set_minor_locator(plticker.MultipleLocator(base=100))
axis.yaxis.set_major_locator(plticker.MultipleLocator(base=0.1))
axis.yaxis.set_minor_locator(plticker.MultipleLocator(base=0.05))
fig.tight_layout()
plt.savefig("paper/cannon_kiel_{}.png".format(label), dpi=200)
plt.savefig("paper/cannon_kiel_{}.pdf".format(label))
def plot_theta_coefficients(
sm,
teff_scale=0.3,
x_lims=(5700,6400),
y_spec_lims=(0,2.5),
y_theta_lims=(-0.1,0.1),
y_s2_lims=(-0.001,0.01),
x_ticks=(500,100),
linewidth=0.5,
alpha=0.9,
fn_label="",
fn_suffix="",
leg_loc="upper center",
line_list=None,
line_list_cm="cubehelix",
species_to_plot=[],
species_line_width=0.75,
species_line_lims_spec=(1.6,2.0),
species_line_lims_scatter=(0.003,0.004),
only_plot_first_order_coeff=True,):
"""Plot fluxes, values of first order theta coefficients for Teff, logg,
and [Fe/H], as well as model scatter - all against wavelength.
TODO
"""
# -------------------------------------------------------------------------
# Setup
# -------------------------------------------------------------------------
plt.close("all")
# Three axes if plotting only spectra, first order coeff, and the scatter
if only_plot_first_order_coeff:
fig, axes = plt.subplots(3, 1, sharex=True, figsize=(16, 8))
# Five axes if we're plotting all theta coefficients
else:
fig, axes = plt.subplots(5, 1, sharex=True, figsize=(16, 12))
fig.subplots_adjust(hspace=0.001, wspace=0.001)
axes = axes.flatten()
# We want to avoid plotting lines across the gaps we've excluded, so we're
# going to insert nans in the breaks so that matplotlib leaves a gap. This
# is a bit clunky, but this involves looping over each gap and inserting a
# fake wavelength value and corresponding nan for the theta and scatter
# arrays.
gap_px = np.argwhere(
np.abs(sm.masked_wl[:-1] - sm.masked_wl[1:]) > 1.0)[:,0]
gap_px = np.concatenate((gap_px+1, [sm.P]))
px_min = 0
wave = []
theta = []
scatter = []
for px_max in gap_px:
wave.append(np.concatenate(
(sm.masked_wl[px_min:px_max], [sm.masked_wl[px_max-1]+1])))
theta.append(np.concatenate(
(sm.theta[px_min:px_max],
np.atleast_2d(np.full(sm.N_COEFF, np.nan)))))
scatter.append(np.concatenate((sm.s2[px_min:px_max], [np.nan])))
px_min = px_max
wave = np.concatenate(wave)
theta = np.concatenate(theta)
scatter = np.concatenate(scatter)
# Adjust scale
theta[:,1] *= teff_scale
# Grab each set of coefficients (linear, quadratic, cross-term) and format
# as appropriate for plotting
vectorizer = PolynomialVectorizer(sm.label_names, 2)
theta_lvec = vectorizer.get_human_readable_label_vector()
# Format for plotting
theta_lvec = theta_lvec.replace("teff", r"$T_{eff}$")
theta_lvec = theta_lvec.replace("logg", r"$\log g$")
theta_lvec = theta_lvec.replace("feh", "[Fe/H]")
if sm.L > 3:
for abundance_i, abundance in enumerate(sm.label_names[3:]):
label_i = 4 + abundance_i
abundance_label = "[{}/H]".format(abundance.split("_")[0])
theta_lvec = \
theta_lvec.replace(abundance, abundance_label)
theta_lvec = theta_lvec.replace("*", r"$\times$")
theta_lvec = theta_lvec.replace("^2", r"$^2$")
theta_lvec = theta_lvec.split(" + ")
linear_term_ii = np.arange(sm.L) + 1
quad_term_ii = np.array(
[i for i in range(len(theta_lvec)) if "^" in theta_lvec[i]])
cross_term_ii = np.array(
[i for i in range(len(theta_lvec)) if "times" in theta_lvec[i]])
# -------------------------------------------------------------------------
# Panel 1: Spectra
# -------------------------------------------------------------------------
# Initialise teff colours
cmap = cm.get_cmap("magma")
teff_min = np.min(sm.training_labels[:,0])
teff_max = np.max(sm.training_labels[:,0])
# Do bad px masking
masked_spectra = sm.training_data.copy()
masked_spectra[sm.bad_px_mask] = np.nan
# First plot spectra
for star_i, star in enumerate(masked_spectra):
teff = sm.training_labels[star_i, 0]
colour = cmap((teff-teff_min)/(teff_max-teff_min))
axes[0].plot(sm.wavelengths, star, linewidth=0.2, c=colour)
# Only show teff_scale if != 1.0
if teff_scale == 1.0:
teff_label = r"$T_{\rm eff}$"
else:
teff_label = r"$T_{\rm eff} \times$" + "{:0.1f}".format(teff_scale)
# -------------------------------------------------------------------------
# Panel 2: First Order Coefficients
# -------------------------------------------------------------------------
labels = [teff_label, r"$\log g$", "[Fe/H]"]
for label_i in linear_term_ii:
axes[1].plot(
wave,
theta[:,label_i],
linewidth=linewidth,
alpha=alpha,
label=theta_lvec[label_i],)
# And first order abundance coefficients if we have it
if sm.L > 3:
for abundance_i, abundance in enumerate(sm.label_names[3:]):
label_i = 4 + abundance_i
abundance_label = "[{}/H]".format(abundance.split("_")[0])
axes[1].plot(
wave,
theta[:,label_i],
linewidth=linewidth,
alpha=alpha,
label=abundance_label,)
axes[1].hlines(0, 3400, 7100, linestyles="dashed", linewidth=0.1)
# -------------------------------------------------------------------------
# Panel 3: Scatter
# -------------------------------------------------------------------------
axes[2].plot(wave, scatter, linewidth=linewidth,)
# -------------------------------------------------------------------------
# [Optional] Atomic line plot
# -------------------------------------------------------------------------
# Overplot line list on spectrum and scatter subplots
if line_list is not None and len(species_to_plot) > 0:
# Remove any species not in our list
species_mask = np.isin(line_list["ion"].values, species_to_plot)
line_list_adopt = line_list[species_mask].copy()
# Count how many unique species are in the line list
unique_species = list(set(line_list_adopt["ion"].values))
unique_species.sort()
n_unique_species = len(unique_species)
colour_i = np.arange(len(unique_species))/n_unique_species
species_mapping_dict = OrderedDict(zip(unique_species, colour_i))
# Get the colour map for our lines
cmap = cm.get_cmap(line_list_cm)
# Only print those in our wavelength range
line_mask = np.logical_and(
line_list_adopt["wl"].values > x_lims[0],
line_list_adopt["wl"].values < x_lims[1],)
for line_i, line_data in line_list_adopt[line_mask].iterrows():
# Label lines on spectral plot
axes[0].vlines(
x=line_data["wl"],
ymin=species_line_lims_spec[0],
ymax=species_line_lims_spec[1],
linewidth=species_line_width,
colors=cmap(species_mapping_dict[line_data["ion"]]),
label=line_data["ion"],)
# Label lines on scatter plot
axes[2].vlines(
x=line_data["wl"],
ymin=species_line_lims_scatter[0],
ymax=species_line_lims_scatter[1],
linewidth=species_line_width,
colors=cmap(species_mapping_dict[line_data["ion"]]),
label=line_data["ion"],)
else:
n_unique_species = 0
# -------------------------------------------------------------------------
# [Optional] Panel 4 + 5: Quadratic + cross term coefficients
# -------------------------------------------------------------------------
if not only_plot_first_order_coeff:
# Plot quadratic coefficents
for quad_coeff_i in quad_term_ii:
axes[3].plot(
wave,
theta[:,quad_coeff_i],
linewidth=linewidth,
alpha=alpha,
label=theta_lvec[quad_coeff_i],)
# Plos cross-term coefficients
for cross_coeff_i in cross_term_ii:
axes[4].plot(
wave,
theta[:,cross_coeff_i],
linewidth=linewidth,
alpha=alpha,
label=theta_lvec[cross_coeff_i],)
axes[3].set_ylabel(r"$\theta_{\rm Quadratic}$")
axes[4].set_ylabel(r"$\theta_{\rm Cross}$")
# -------------------------------------------------------------------------
# Final Setup
# -------------------------------------------------------------------------
for axis in axes:
# Mask emission and telluric regions for all panels
pplt.shade_excluded_regions(
wave=sm.wavelengths,
bad_px_mask=~sm.adopted_wl_mask,
axis=axis,
res_ax=None,
colour="red",
alpha=0.25,
hatch=None)
# Legend
handles, labels = axis.get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
leg = axis.legend(
handles=by_label.values(),
labels=by_label.keys(),
loc=leg_loc,
ncol=np.max([sm.L, n_unique_species]),
fontsize="small",)
for legobj in leg.legendHandles:
legobj.set_linewidth(1.5)
axes[0].set_xlim(x_lims)
axes[0].set_ylim(y_spec_lims)
axes[1].set_ylim(y_theta_lims)
axes[2].set_ylim(y_s2_lims)
axes[0].set_ylabel(r"Flux")
axes[1].set_ylabel(r"$\theta_{\rm Linear}$")
axes[2].set_ylabel(r"Scatter")
axes[2].xaxis.set_major_locator(plticker.MultipleLocator(base=x_ticks[0]))
axes[2].xaxis.set_minor_locator(plticker.MultipleLocator(base=x_ticks[1]))
plt.xlabel("Wavelength (A)")
plt.tight_layout()
plt.savefig("paper/theta_coefficients_{}{}.pdf".format(fn_label, fn_suffix))
plt.savefig("paper/theta_coefficients_{}{}.png".format(fn_label, fn_suffix),
dpi=200)
def plot_spectra_comparison(
sm,
obs_join,
fluxes,
bad_px_masks,
labels_all,
source_ids,
y_offset=1.8,
x_lims=(5400,7000),
x_ticks=(200,100),
fn_label="",
data_label="",
star_name_col="simbad_name",
sort_col_name=None,
do_reverse_sort=True,
do_plot_eps=False,
fig_size=(12,8),
data_plot_label="Observed",
data_plot_colour="k",):
"""Plot a set of observed spectra against their Cannon generated spectra
equivalents.
"""
# Intialise
plt.close("all")
# If plotting diagnostic plot, use custom size
if fn_label == "d":
n_inches = len(source_ids) * 2# / 3
fig, ax = plt.subplots(1, 1, figsize=(12, n_inches))
else:
fig, ax = plt.subplots(1, 1, figsize=fig_size,)
fig.subplots_adjust(hspace=0.001, wspace=0.001)
# We want to sort based on a given column, but it's tricky since we have
# multiple data stuctures. So what we'll do is to use the sort order
# indices as the y axis offsets.
if sort_col_name is not None and sort_col_name in obs_join.columns.values:
# First reduce obs_join down to just the selected source_ids
selected_mask = np.isin(obs_join.index, source_ids)
sorted_indices = np.argsort(
obs_join[selected_mask][sort_col_name].values)
if do_reverse_sort:
sorted_indices = sorted_indices[::-1]
obs_join = obs_join[selected_mask].iloc[sorted_indices]
fluxes = fluxes[selected_mask][sorted_indices]
bad_px_masks = bad_px_masks[selected_mask][sorted_indices]
labels_all = labels_all[selected_mask][sorted_indices]
source_ids = obs_join.index.values
# Mask out emission and telluric regions
pplt.shade_excluded_regions(
wave=sm.wavelengths,
bad_px_mask=~sm.adopted_wl_mask,
axis=ax,
res_ax=None,
colour="red",
alpha=0.25,
hatch=None)
# Do bad px masking
masked_spectra = fluxes.copy()
masked_spectra[bad_px_masks] = np.nan
# For every star in source_ids, plot blue and red spectra
for star_i, source_id in enumerate(source_ids):
# Get the index of the particular benchmark
bm_i = int(np.argwhere(obs_join.index == source_id))
# Generate a model spectrum (with nans for our excluded regions)
labels = labels_all[bm_i]
spec_gen = np.full(fluxes.shape[1], np.nan)
spec_gen[sm.adopted_wl_mask] = sm.generate_spectra(labels)
# Plot observed spectrum
ax.plot(
sm.wavelengths,
masked_spectra[bm_i] + star_i*y_offset,
linewidth=0.2,
c=data_plot_colour,
label=data_plot_label,)
# Plot model spectrum
ax.plot(
sm.wavelengths,
spec_gen + star_i*y_offset,
linewidth=0.2,
c="r",
label="Cannon",)
# Label spectrum
star_txt = (
r"{}, $T_{{\rm eff}}={:0.0f}\,$K, "
r"[Fe/H]$ = ${:+.2f}, $(BP-RP)={:0.2f}$")
star_txt = star_txt.format(
obs_join.loc[source_id][star_name_col],
labels[0],
labels[2],
obs_join.loc[source_id]["BP_RP_dr3"],)
ax.text(
x=x_lims[0]+(x_lims[1]-x_lims[0])/2,
y=star_i*y_offset+1.6,
s=star_txt,
horizontalalignment="center",
)
# Only plot one set of legend items
if star_i == 0:
leg = ax.legend(loc="upper right", ncol=2,)
for legobj in leg.legendHandles:
legobj.set_linewidth(1.5)
ax.set_yticks([])
ax.set_xlim(x_lims)
ax.set_ylim((0, star_i*y_offset+2.4))
ax.xaxis.set_major_locator(plticker.MultipleLocator(base=x_ticks[0]))
ax.xaxis.set_minor_locator(plticker.MultipleLocator(base=x_ticks[1]))
ax.set_xlabel(r"Wavelength (${\rm \AA}$)")
plt.tight_layout()
fn = "paper/cannon_spectra_comp_{}_{}".format(
data_label, fn_label).replace("__", "_")
plt.savefig("{}.pdf".format(fn))
# Don't plot a PNG for the diagnostic plot of all spectra since the image
# dimensions will be excessively large
if fn_label != "d":
plt.savefig("{}.png".format(fn), dpi=300)
if do_plot_eps:
plt.savefig("{}.eps".format(fn))
def plot_label_uncertainty_adopted_vs_true_labels(
sm,
n_bins=20,
fn_label="",):
"""Function to plot histograms comparing the adopted and true label
distributions (+the difference between them) at the conclusion of training
a label uncertainties model. Currently works for three parameter and four
parameter models.
Parameters
----------
sm: Stannon Model
Trained *label uncertainties* Stannon model.
n_bins: float, default: 20
fn_label: string, default: ""
Label of the filename.
"""
if sm.model_type != "label_uncertainties":
raise ValueError("Stannon model must be label_uncertainties.")
labels_adopt = sm.masked_labels.copy()
labels_true = (sm.true_labels * sm.std_labels + sm.mean_labels).copy()
delta_labels = labels_adopt - labels_true
med_dl = np.median(delta_labels, axis=0)
std_dl = np.std(delta_labels, axis=0)
plt.close("all")
if sm.L == 3:
fig, ((ax_t, ax_g, ax_f), (ax_dt, ax_dg, ax_df)) = \
plt.subplots(2, 3, figsize=(9, 6))
else:
fig, ((ax_t, ax_g, ax_f, ax_ti), (ax_dt, ax_dg, ax_df, ax_dti)) = \
plt.subplots(2, 4, figsize=(12, 6))
# -------------------------------------------------------------------------
# Teff
# -------------------------------------------------------------------------
# Plot histogram for adopted and true labels
_ = ax_t.hist(
labels_adopt[:,0],
bins=n_bins,
alpha=0.5,
label=r"$T_{\rm eff, adopt}$")
_ = ax_t.hist(
labels_true[:,0],
bins=n_bins,
alpha=0.5,
label=r"$T_{\rm eff, true}$")
ax_t.legend(loc="best")
ax_t.set_xlabel(r"$T_{\rm eff}$")
# Plot histogram of the *difference* between these two sets of labels
_ = ax_dt.hist(delta_labels[:,0], bins=n_bins, alpha=0.5)
ax_dt.set_xlabel(r"${\Delta}T_{\rm eff}$")
# Plot text for median +/- std
x_lims = ax_dt.get_xlim()
y_lims = ax_dt.get_ylim()
text = r"${:0.0f}\pm{:0.0f}\,K$".format(med_dl[0], std_dl[0])
ax_dt.text(
x=((x_lims[1]-x_lims[0])/2 + x_lims[0]),
y=0.5*(y_lims[1]-y_lims[0])+y_lims[0],
s=text,
horizontalalignment="center",)
# -------------------------------------------------------------------------
# logg
# -------------------------------------------------------------------------
# Plot histogram for adopted and true labels
_ = ax_g.hist(
labels_adopt[:,1],
bins=n_bins,
alpha=0.5,
label=r"$\log g_{\rm adopt}$")
_ = ax_g.hist(
labels_true[:,1],
bins=n_bins,
alpha=0.5,
label=r"$\log g_{\rm true}$")
ax_g.legend(loc="best")
ax_g.set_xlabel(r"$\log g$")
# Plot histogram of the *difference* between these two sets of labels
_ = ax_dg.hist(delta_labels[:,1], bins=n_bins, alpha=0.5)
ax_dg.set_xlabel(r"$\Delta\log g$")
# Plot text for median +/- std
x_lims = ax_dg.get_xlim()
y_lims = ax_dg.get_ylim()
text = r"${:0.3f}\pm{:0.3f}\,$dex".format(med_dl[1], std_dl[1])
ax_dg.text(
x=((x_lims[1]-x_lims[0])/2 + x_lims[0]),
y=0.5*(y_lims[1]-y_lims[0])+y_lims[0],
s=text,
horizontalalignment="center",)
# -------------------------------------------------------------------------
# [Fe/H]
# -------------------------------------------------------------------------
# Plot histogram for adopted and true labels
_ = ax_f.hist(
labels_adopt[:,2],
bins=n_bins,
alpha=0.5,
label=r"[Fe/H]$_{adopt}$")
_ = ax_f.hist(
labels_true[:,2],
bins=n_bins,
alpha=0.5,
label=r"[Fe/H]$_{true}$")
ax_f.legend(loc="best")
ax_f.set_xlabel(r"[Fe/H]]")
# Plot histogram of the *difference* between these two sets of labels
_ = ax_df.hist(delta_labels[:,2], bins=n_bins, alpha=0.5)
ax_df.set_xlabel(r"$\Delta$[Fe/H]")
# Plot text for median +/- std
x_lims = ax_df.get_xlim()
y_lims = ax_df.get_ylim()
text = r"${:0.3f}\pm{:0.3f}\,$dex".format(med_dl[2], std_dl[2])
ax_df.text(
x=((x_lims[1]-x_lims[0])/2 + x_lims[0]),
y=0.5*(y_lims[1]-y_lims[0])+y_lims[0],
s=text,
horizontalalignment="center",)
# -------------------------------------------------------------------------
# [Ti/H]
# -------------------------------------------------------------------------
# Plot histogram for adopted and true labels
if sm.L == 4:
_ = ax_ti.hist(
labels_adopt[:,3],
bins=n_bins,
alpha=0.5,
label=r"[Ti/H]$_{adopt}$")
_ = ax_ti.hist(
labels_true[:,3],
bins=n_bins,
alpha=0.5,
label=r"[Ti/H]$_{true}$")
ax_ti.legend(loc="best")
ax_ti.set_xlabel(r"[Ti/H]]")
# Plot histogram of the *difference* between these two sets of labels
_ = ax_dti.hist(delta_labels[:,3], bins=n_bins, alpha=0.5)
ax_dti.set_xlabel(r"$\Delta$[Ti/H]")
# Plot text for median +/- std
x_lims = ax_dti.get_xlim()
y_lims = ax_dti.get_ylim()
text = r"${:0.3f}\pm{:0.3f}\,$dex".format(med_dl[3], std_dl[3])
ax_dti.text(
x=((x_lims[1]-x_lims[0])/2 + x_lims[0]),
y=0.5*(y_lims[1]-y_lims[0])+y_lims[0],
s=text,
horizontalalignment="center",)
# -------------------------------------------------------------------------
# Tidy up and save
# -------------------------------------------------------------------------
plt.tight_layout()
plt.savefig("paper/adopted_vs_true_label_hists{}.pdf".format(fn_label))
plt.savefig("paper/adopted_vs_true_label_hists{}.png".format(fn_label),
dpi=200) | [
"adam.d.rains@gmail.com"
] | adam.d.rains@gmail.com |
bf0ca61705ecaf897f2be3f6cb36e053ed3f9633 | 4e5c638b491fe8b34cb2decc4e0284efc9863a14 | /init_model.py | e44b511705073dc51a260fe7075548c800167358 | [] | no_license | zyzisyz/flow_sre | 0f7986c28ba8d76d7bcd6ec4a2a2940780fede11 | 0dad65ec8ffa6d9ef6b59f325b141cac4df227f0 | refs/heads/master | 2022-04-04T19:17:09.894725 | 2020-02-24T06:01:54 | 2020-02-24T06:01:54 | 235,137,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,510 | py | #!/usr/bin/env python
# coding=utf-8
# *************************************************************************
# > File Name: _init_model.py
# > Author: Yang Zhang
# > Mail: zyziszy@foxmail.com
# > Created Time: Mon 20 Jan 2020 11:19:38 PM CST
# ************************************************************************/
import flows as fnn
import math
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def init_model(args, num_inputs=72):
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
device = torch.device("cuda:" + args.device)
else:
device = torch.device("cpu")
# network structure
num_hidden = args.num_hidden
num_cond_inputs = None
act = 'relu'
assert act in ['relu', 'sigmoid', 'tanh']
modules = []
# normalization flow
assert args.flow in ['maf', 'realnvp', 'glow']
if args.flow == 'glow':
mask = torch.arange(0, num_inputs) % 2
mask = mask.to(device).float()
print("Warning: Results for GLOW are not as good as for MAF yet.")
for _ in range(args.num_blocks):
modules += [
fnn.BatchNormFlow(num_inputs),
fnn.LUInvertibleMM(num_inputs),
fnn.CouplingLayer(
num_inputs, num_hidden, mask, num_cond_inputs,
s_act='tanh', t_act='relu')
]
mask = 1 - mask
elif args.flow == 'realnvp':
mask = torch.arange(0, num_inputs) % 2
mask = mask.to(device).float()
for _ in range(args.num_blocks):
modules += [
fnn.CouplingLayer(
num_inputs, num_hidden, mask, num_cond_inputs,
s_act='tanh', t_act='relu'),
fnn.BatchNormFlow(num_inputs)
]
mask = 1 - mask
elif args.flow == 'maf':
for _ in range(args.num_blocks):
modules += [
fnn.MADE(num_inputs, num_hidden, num_cond_inputs, act=act),
fnn.BatchNormFlow(num_inputs),
fnn.Reverse(num_inputs)
]
model = fnn.FlowSequential(*modules)
for module in model.modules():
if isinstance(module, nn.Linear):
nn.init.orthogonal_(module.weight)
if hasattr(module, 'bias') and module.bias is not None:
module.bias.data.fill_(0)
return model
| [
"zyziszy@foxmail.com"
] | zyziszy@foxmail.com |
f08aadcd28f65a78ae69e885e77760356c24e338 | 690ce90cfd4d0b21487c2dfe3e3bd00c03f6eae7 | /10K_folding.py | 4d0b6032ec71cd356131c3b2cd49c4c64a019987 | [] | no_license | youngyoung1021/modeling-of-hypertension-prediction | 8d8a7db0d6740aa2620311d58177dd38e689f67d | 85dadc76319f06dec52594611ccc653596d9eeea | refs/heads/master | 2020-05-01T14:15:14.749101 | 2019-11-06T05:47:51 | 2019-11-06T05:47:51 | 177,514,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,148 | py | from keras.models import Sequential, load_model
from sklearn.preprocessing import LabelEncoder
from keras.layers import Dense
import numpy
import tensorflow as tf
from sklearn.model_selection import StratifiedKFold
import sklearn.metrics
seed = 0
numpy.random.seed(seed)
tf.compat.v1.random.set_random_seed(seed)
dataset=numpy.loadtxt('C:/Users/YOUNG/PycharmProjects/HyperTension/INTEGRATED_I109_2_6YEAR_2years.csv',delimiter=",")
X=dataset[:,0:26]
Y_obj=dataset[:,26]
e = LabelEncoder()
e.fit(Y_obj)
Y=e.transform(Y_obj)
n_fold=10
skf=StratifiedKFold(n_splits=n_fold,shuffle=True,random_state=seed)
accuracy=[]
for train,test in skf.split(X,Y):
model=Sequential()
model.add(Dense(30,input_dim=26,activation='relu'))
model.add(Dense(15,activation='relu'))
model.add(Dense(8,activation='relu'))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
model.fit(X[train],Y[train],epochs=20,batch_size=100)
k_accuracy="%.4f" % (model.evaluate(X[test],Y[test])[1])
accuracy.append(k_accuracy)
print("\n %.f fold accuracy:" % n_fold,accuracy)
| [
"noreply@github.com"
] | youngyoung1021.noreply@github.com |
16303e0043f9a61d8ee0dd7e4700dcbf71b98815 | 71b75560e6cc23b687c65c8be045817fadee6ac7 | /PRINT.py | 3cf6fc78d98cb206afb31889766f4df278a63f51 | [] | no_license | SHASHANK992/Python_Hackerrank_Challenges | 03b5b8411b60a8af6af0c0eaa119ccf8a76f92cd | adf01047b205c92bddb9cbdbb8fb9bc0dcb994e0 | refs/heads/master | 2021-01-10T15:32:33.231266 | 2017-02-15T08:16:37 | 2017-02-15T08:16:37 | 50,940,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | import sys;number = int(input());
for i in range(0,number): sys.stdout.write(str(i+1))
#PRINTING WITHOUT LEAVING LINES
| [
"SHASHANKRAINA@LIVE.IN"
] | SHASHANKRAINA@LIVE.IN |
f9fceeb22c9dd95e9c78285e0971b82781e9d17e | 750b5dbc15f9641bd890e17761fd3bc6a97c8893 | /stack/next-smaller-left.py | 5ffcdfaecd217a4f8fb35f227b0c2a29d07e4e4f | [] | no_license | dhruv-rajput/data-structures-and-algo | b2b8f7956788ab5ae755d43504fd4c8d7639560c | fa8225052dc0a91dbd2c99d1547905be2cb1a99e | refs/heads/master | 2023-06-14T09:54:04.964419 | 2021-05-21T12:20:49 | 2021-05-21T12:20:49 | 369,523,028 | 0 | 0 | null | 2021-07-04T11:51:46 | 2021-05-21T12:08:32 | Python | UTF-8 | Python | false | false | 616 | py | def ngr(arr,n):
ans=[]
stack=[]
i=0
while i<n:
if len(stack)==0:
ans.append(-1)
elif len(stack)>0 and stack[0]<arr[i]:
ans.append(stack[0])
elif len(stack)>0 and stack[0]>=arr[i]:
while len(stack)>0 and stack[0]>=arr[i]:
stack.pop(0)
if len(stack)==0:
ans.append(-1)
else:
ans.append(stack[0])
stack.insert(0,arr[i])
i+=1
print(*ans)
arr=[1,2,7,6,8,10,7]
n=len(arr)
ngr(arr,n)
| [
"dhruvguzi@gmail.com"
] | dhruvguzi@gmail.com |
478ae85747ae381b9bfd8ad559f504b43010caa5 | ca42966b33798739d94f13995b15e73df985a1e7 | /budget/urls_budgetlines.py | 20978d07c6eff762d92823cea88db0353dfeb652 | [] | no_license | sebriois/bcg_lab | acad7a7957f94f8fbb13488d6d3964dd73f8737f | c81f3754a22da7de6f0280ec6f349af1821ab511 | refs/heads/master | 2020-05-27T22:34:19.333012 | 2018-04-22T19:32:51 | 2018-04-22T19:32:51 | 990,080 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from django.urls import path
from budget.views_budgetlines import index, item, delete, export_to_xls
app_name = 'budget_line'
urlpatterns = [
path('<int:bl_id>/delete/', delete, name = "delete"),
path('<int:bl_id>/', item, name = "item"),
path('export-to-xls/', export_to_xls, name = "export"),
path('', index, name = "list")
]
| [
"sebriois@gmail.com"
] | sebriois@gmail.com |
929c4ad9a9243c92ced362f33c93efe04ccdf8d6 | 48bec0057f9946b240111cde7903792076fe408b | /chapter_12/urljpeg.py | ca3dda24839639d217784a33cee83bf7e43a9f47 | [] | no_license | oscar-dev19/Py4E | 9cdcb42457ec683dac0ebabb1bb5b5d2c00d2fad | 6e91caea4c362c42b733fa3131f35a07b54fb34e | refs/heads/master | 2022-07-07T05:26:20.673670 | 2022-06-21T15:13:51 | 2022-06-21T15:13:51 | 203,110,624 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | import socket
import time
HOST = 'data.pr4e.org'
PORT = 80
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysock.connect((HOST,PORT))
mysock.sendall(b'GET http://data.pr4e.org/cover3.jpg HTTP/1.0\r\n\r\n')
count = 0
picture = b""
while True:
data = mysock.recv(5120)
if len(data) < 1: break
# time.sleep(0.25)
count = count + len(data)
print(len(data), count)
picture += data
mysock.close()
# Look for the end of the header (2 CRLF)
pos = picture.find(b"\r\n\r\n")
print('Header length', pos)
print(picture[:pos].decode())
# Skip past the header and save the picture data.
picture = picture[pos+4:]
fhand = open("stuff.jpg", "wb")
fhand.write(picture)
fhand.close()
| [
"oscarl.developer@gmail.com"
] | oscarl.developer@gmail.com |
0dd5474f1ff9afa3aa926d01de7544a29100f831 | 411b49dbb217944d8e1a3f9446bdb8b8bc5a3475 | /TimeRanges.py | 48c86c5a350ba43c173172486e72ed8a25372078 | [] | no_license | Niyakiy/ec2-scheduler | d8e8ed6a5c459c73eedf0b0b9b9fb2b2bb537faa | a05cb8ae4c6eb3e863709dbd93b3675abda3f799 | refs/heads/master | 2021-01-17T08:12:47.040040 | 2016-06-10T07:40:56 | 2016-06-10T07:40:56 | 60,264,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,055 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
RANGES_LIST_REGEX = "^(([0-9]|0[0-9]|1[0-9]|2[0-3]):[0-5][0-9]-([0-9]|0[0-9]|1[0-9]|2[0-3]):[0-5][0-9],)*" \
"(([0-9]|0[0-9]|1[0-9]|2[0-3]):[0-5][0-9]-([0-9]|0[0-9]|1[0-9]|2[0-3]):[0-5][0-9])$"
class TimeRanges:
def __init__(self, raw_range_data):
self.ranges = []
self.merged_ranges = []
self.raw_data = raw_range_data
self.is_valid = self.validate_and_parse()
def __repr__(self):
return "TimeRanges: {}, \nMergedTimeRanges: {}".format(self.ranges, self.merged_ranges)
def __hhmm2minutes(self, hhmm):
return int(hhmm.split(':')[0]) * 60 + int(hhmm.split(':')[1])
def validate_and_parse(self):
"""
Function to parse, validate and megre time ranges
:return:
True in case of valid and merged ranges
"""
def overlaps(a, b):
return a[0] <= b[0] <= a[1]
def contains(a, b):
return min(a[0], b[0]) == a[0] and max(a[1], b[1]) == a[1]
def merge(a, b):
return [min(a[0], b[0]), max(a[1], b[1])]
if re.match(RANGES_LIST_REGEX, self.raw_data) is None:
return False
self.ranges = sorted([[self.__hhmm2minutes(i) for i in rng.split('-')] for rng in self.raw_data.split(',')],
key=lambda x: x[0])
# Validating and merging
for rng in self.ranges:
# check if stop hour is less than start
if rng[1] <= rng[0]:
return False
merged = False
for ind, mrng in enumerate(self.merged_ranges):
if contains(mrng, rng):
merged = True
break
if overlaps(mrng, rng):
self.merged_ranges[ind] = merge(mrng, rng)
merged = True
break
if rng not in self.merged_ranges and not merged:
self.merged_ranges.append(rng)
return True
| [
"eugene@zoomdata.com"
] | eugene@zoomdata.com |
69b9af48aec2b4c81aada800a7cc694fdb1cdb55 | adeaad20bb935aa05d605b0d4a3c12c80fdac5db | /015/015.py | 452f61182c6378f8029f124d12ba27e3848580c0 | [] | no_license | OKIDultO/Python-Practice-Projects | 9999271e2239adee15381602e0e49feab9cb6972 | 06e020898d878d0785e40bd175651ab87fb97c12 | refs/heads/ZL | 2021-01-21T08:15:13.515098 | 2017-07-02T09:58:16 | 2017-07-02T09:58:16 | 91,621,189 | 1 | 2 | null | 2017-07-02T09:54:08 | 2017-05-17T21:21:58 | Python | UTF-8 | Python | false | false | 314 | py | '''
题目:利用条件运算符的嵌套来完成此题:学习成绩>=90分的同学用A表示,60-89分之间的用B表示,60分以下的用C表示。
程序分析:(a>b)?a:b这是条件运算符的基本例子。
'''
x = int(input("请输入成绩:"))
print("A" if x>=90 else ("B" if x>=60 else "C"))
| [
"noreply@github.com"
] | OKIDultO.noreply@github.com |
69f2547fc6f6fa3c6d06c4987399a31f588ca942 | b53d3de52162f547cd7000ce597e3a25d0918d0e | /mysite/mysite/views.py | 4e358663a57c9c2768dd3b5e1f36ef60d9be749d | [] | no_license | gs3bopar/AnyMovie | 886f018540e35e5550d680ca19ef23321e17d2c3 | f14c0dcc13150189162111999ac2366ec65542e6 | refs/heads/main | 2023-03-04T04:09:31.666990 | 2021-02-14T18:56:28 | 2021-02-14T18:56:28 | 338,855,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
| [
"gurkaranboparai1120@gmail.com"
] | gurkaranboparai1120@gmail.com |
863f9513376a066d086285938ce7c5d94c37e3f3 | 8e0d5ead4fcfa5def33bef70def598e34feb58b5 | /orders/migrations/0006_auto_20200601_1844.py | d2d0f16b98e42faa26a5b3b60647f6861b5bd07d | [] | no_license | danytsfm/django-restaurant | bdcd2d2a125a4f06d1886de8046f9badb3dea605 | eccb09015a36f02c3ab6e382635cfeed83abf62e | refs/heads/django-restaurant | 2023-08-16T17:20:03.861083 | 2020-07-07T18:47:48 | 2020-07-07T18:47:48 | 269,191,103 | 2 | 1 | null | 2021-09-22T19:09:17 | 2020-06-03T20:46:29 | HTML | UTF-8 | Python | false | false | 404 | py | # Generated by Django 3.0.6 on 2020-06-01 22:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0005_auto_20200601_1838'),
]
operations = [
migrations.AlterField(
model_name='incart',
name='product_id',
field=models.IntegerField(verbose_name='product_id'),
),
]
| [
"danytsfm@hotmail.com"
] | danytsfm@hotmail.com |
206e069762fbda9f59cee91ad6cc3dbaa1d7dd5c | 9ea00270d634980441285609877ac6133719b173 | /fastestimator/dataset/cub200.py | 6d66fa1dcc6706148ef4634ec3a13f449e17ef0b | [
"Apache-2.0"
] | permissive | templeblock/fastestimator | aca2d3feddd99a1fe3286666818eff72a5597780 | a894b432dea3279c20f3a265a8ceea2882f295c3 | refs/heads/master | 2020-06-25T16:36:19.475709 | 2019-07-26T21:17:10 | 2019-07-26T21:17:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,607 | py | """Download Caltech-UCSD Birds 200 dataset.
http://www.vision.caltech.edu/visipedia/CUB-200.html
"""
import os
import tarfile
import tempfile
from glob import glob
import pandas as pd
import wget
def load_data(path=None):
"""Download the CUB200 data set to local storage. This will generate a cub200.csv file, which contains all the path information.
Args:
path (str, optional): The path to store the CUB200 data. Defaults to None, will save at `tempfile.gettempdir()`.
Raises:
FileNotFoundError: When the gernerated CSV file does not match with the extracted dataset.
"""
if path:
os.makedirs(path, exist_ok=True)
else:
path = os.path.join(tempfile.gettempdir(), 'FE_CUB200')
csv_path = os.path.join(path, 'cub200.csv')
if os.path.isfile(csv_path):
print('Found existing {}.'.format(csv_path))
df = pd.read_csv(csv_path)
found_images = df['image'].apply(lambda x: os.path.join(path, x)).apply(os.path.isfile).all()
found_annoation = df['annotation'].apply(lambda x: os.path.join(path, x)).apply(os.path.isfile).all()
if not (found_images and found_annoation):
print('There are missing files. Will download dataset again.')
else:
print('All files exist, using existing {}.'.format(csv_path))
return csv_path, path
url = {'image': 'http://www.vision.caltech.edu/visipedia-data/CUB-200/images.tgz',
'annotation': 'http://www.vision.caltech.edu/visipedia-data/CUB-200/annotations.tgz'}
img_path = os.path.join(path, 'images.tgz')
anno_path = os.path.join(path, 'annotations.tgz')
print("Downloading data to {} ...".format(path))
wget.download(url['image'], path)
wget.download(url['annotation'], path)
print('\nExtracting files ...')
with tarfile.open(img_path) as img_tar:
img_tar.extractall(path)
with tarfile.open(anno_path) as anno_tar:
anno_tar.extractall(path)
img_list = glob(os.path.join(path, 'images', '**', '*.jpg'))
df = pd.DataFrame(data={'image': img_list})
df['image'] = df['image'].apply(lambda x: os.path.relpath(x, path))
df['image'] = df['image'].apply(os.path.normpath)
df['annotation'] = df['image'].str.replace('images', 'annotations-mat').str.replace('jpg', 'mat')
if not (df['annotation'].apply(lambda x: os.path.join(path, x))).apply(os.path.exists).all():
raise FileNotFoundError
df.to_csv(os.path.join(path, 'cub200.csv'), index=False)
print('Data summary is saved at {}'.format(csv_path))
return csv_path, path
| [
"hsi-ming.chang@ge.com"
] | hsi-ming.chang@ge.com |
34ba0cec93b06f069771feb7d23bf30ecfde0cfa | 48fd347ecb41c73cc9f806913ebdc09007af82a1 | /1st_way.py | 8206c87c0ed426b23c6c0dff700e824667d40436 | [] | no_license | panosdimitrellos/Private-Keys-Gathering | 4ee69b0f186c9c4ef68f39c3788f3f6f99ca9e1a | 0e725cc3db28009232732c280dace52f162ff92b | refs/heads/main | 2023-05-22T22:43:52.679472 | 2021-06-08T17:26:05 | 2021-06-08T17:26:05 | 375,090,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | import subprocess
# Δημιουργούμε μια κενή λίστα
reposList = []
# Αριθμός repositories ως input
n = int(input("Enter number of repositories : "))
# Παίρνει τα repositories ως input και τα βάζει στην λίστα reposList
print("Enter the repositories :")
for i in range(0, n):
repo = input()
reposList.append(repo)
# Εκτυπώνει την λίστα reposList με τα στοιχεία της
print(reposList)
# Αυτή η συνάρητηση ελέγχει τα repositories με την βοήθεια του truffleHog και εκτυπώνει τα κλειδία
# Για πιο στοχευμένα αποτελέσματα πάνω στα API keys χρησιμοποιήσαμε --rules C:\rulesAPIkeys.json
def scan():
for x in reposList:
print("~~~~~~~~~~~~~~~~~~~~~~~ HERE ARE THE KEYS OF {name} REPOSITORY ~~~~~~~~~~~~~~~~~~~~~~~".format(name=x))
subprocess.run('truffleHog --rules C:\APIkeysrules.json --regex --entropy=False {name}'.format(name=x))
return
exec("scan()") | [
"noreply@github.com"
] | panosdimitrellos.noreply@github.com |
a1dd5fb6b4c7e23b824648c48bfdea56cd1a87d0 | 56e0eebb5f15cfd774795e0cd200d6f710447f3d | /Week 1 Homework/count_from_file.py | eeede3c11e52b9d960dc21fe039227fe3d84752b | [] | no_license | brandeddavid/Python-For-Research-Harvard | 7f44b7e11f47ba1117493318b6ce528d1d623a09 | 932b7e0cfc28d00b7354d99351b10dabbcfc56d1 | refs/heads/master | 2021-06-15T01:00:14.510630 | 2017-03-13T10:42:34 | 2017-03-13T10:42:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | import count_letters
file = open("sample.txt", "r")
for line in file:
print(line)
address_counter = count_letters.counter(line)
print(address_counter) | [
"david.mathenge98@gmail.com"
] | david.mathenge98@gmail.com |
8da4f2a72dbe102c8e230e0e81f9816bbd8d7319 | ddb15b80fd7823d16acb5aeb770089d82d7a15be | /confluence-maven-plugin/scripts/release.sh | 8d548c1347b56cf9b3c25b0c52ab2157f13c38bd | [] | no_license | lorenzo-deepcode/buildit-all | 2bdd24efd4aff9b993098f7d153cd27690648ed3 | 30a02c0bc1c8253706964de6141a0cc806568916 | refs/heads/master | 2020-04-09T18:46:03.818625 | 2018-12-05T13:33:09 | 2018-12-05T13:33:09 | 160,523,196 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,850 | sh | #!/usr/bin/env python
import xml.etree.ElementTree as ET;
import os;
os.system('git remote set-url origin ' + os.environ['GITHUB_AUTH_REPO_URL'] + ' &> /dev/null')
os.system('git config --global push.default simple')
os.system('git config --global user.name travis')
os.system('git config --global user.email travis')
os.system('git checkout master &> /dev/null')
currentVersion = ET.parse(open('pom.xml')).getroot().find('{http://maven.apache.org/POM/4.0.0}version').text
# tag
print('Tagging current version: ' + currentVersion)
os.system('git tag -a ' + currentVersion + ' -m "[skip ci] Built version: ' + currentVersion + '" &> /dev/null')
os.system('git push --tags &> /dev/null')
# bintray upload
print('Uploading maven artifact to bintray')
bintrayVersionUrl = 'https://api.bintray.com/maven/buildit/maven/confluence-maven-plugin/;publish=1/com/wiprodigital/confluence-maven-plugin/' + currentVersion + '/confluence-maven-plugin-' + currentVersion
bintrayJarUrl = bintrayVersionUrl + '.jar'
bintrayPomUrl = bintrayVersionUrl + '.pom'
os.system('curl -u ' + os.environ['BINTRAY_USERNAME'] + ':' + os.environ['BINTRAY_PASSWORD'] + ' -T target/*.jar "' + bintrayJarUrl + '"')
os.system('curl -u ' + os.environ['BINTRAY_USERNAME'] + ':' + os.environ['BINTRAY_PASSWORD'] + ' -T pom.xml "' + bintrayPomUrl + '"')
# bump version
decomposedVersion = currentVersion.split('.')
majorVersion = decomposedVersion[0]
minorVersion = decomposedVersion[1]
patchVersion = decomposedVersion[2].split('-')[0]
nextVersion = majorVersion + '.' + minorVersion + '.' + str(int(patchVersion) + 1)
print('Bumping version to: ' + nextVersion)
os.system('mvn -DnewVersion=' + nextVersion + ' versions:set versions:commit')
os.system('git add pom.xml')
os.system('git commit -m "[skip ci] Bumping version to: ' + nextVersion + '"')
os.system('git push -q')
| [
"lorenzo@deepcode.ai"
] | lorenzo@deepcode.ai |
41ecff411015234eb4e7f0678953bdef3b42e9b8 | dc5a638b523b8abcc14341bdb8d36d2a1174548c | /info/views.py | 3e395f8ea26480b77bc6d91fe5a4a33fcaf39fde | [] | no_license | sboh1214/music-player | d088e82e0416b79a4f496a15133213417c5f6a21 | 7257b6563d93d7033ef29292c890074af45598ce | refs/heads/master | 2020-09-22T09:30:21.746883 | 2019-12-04T03:37:59 | 2019-12-04T03:37:59 | 225,138,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py | from django.views.generic.base import TemplateView
from django.urls import reverse_lazy
from django.views.generic.list import ListView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
class ArtistView(TemplateView):
template_name = 'info/artist.html'
context_object_name = 'artist_list'
model = artist
class ArtistListView(ListView):
model = artist
class AlbumView(TemplateView):
template_name = 'info/album.html'
context_object_name = 'album_list'
model = album
class AlbumListView(ListView):
model = album
class SongView(TemplateView):
template_name = 'info/song.html'
context_object_name = 'song_list'
model = song
class SongListView(ListView):
model = song
class SongCreateView(CreateView):
model = song
fields = ['song_name']
success_url = reverse_lazy('list')
template_name_suffix = '_create'
class SongUpdateView(UpdateView):
model = song
fields = ['song_name']
success_url = reverse_lazy('list')
template_name_suffix = '_update'
class SongDeleteView(DeleteView):
model = song
success_url = reverse_lazy('list')
template_name_suffix = '_delete'
| [
"sboh1214@gmail.com"
] | sboh1214@gmail.com |
7a7320b5992b782c5ed8ab4ab8c7f8acd0e0d62b | 9dfbd485b7353adc7f8ad604dd20b27082bd3d6f | /django/solocalc/dap/admin.py | 3e7f607faac6f71bb91d0e85bf5d9faa6c12f20e | [] | no_license | WalterGoedecke/ceres | 63bdfaf0fe8fd54a7d3011b62af7d72c2dd2fc61 | 5249da3a7f52cd3bbebcd29d1323dfcd0d40140b | refs/heads/master | 2021-01-10T14:36:06.059528 | 2016-01-21T19:57:13 | 2016-01-21T19:57:13 | 43,573,546 | 0 | 0 | null | 2015-10-06T20:17:36 | 2015-10-02T20:00:00 | Python | UTF-8 | Python | false | false | 259 | py | from django.contrib import admin
# Register your models here.
from mezzanine.core.admin import TabularDynamicInlineAdmin, SingletonAdmin
from mezzanine.pages.admin import PageAdmin
from dap.models import *
admin.site.register(Calculation, CalculationAdmin) | [
"goedecke@txcorp.com"
] | goedecke@txcorp.com |
f66c598f24bf258557c6b380eb6f1b14b1fa4d9a | 67a7c314fc99d9cd7a677fcb6bc2b6dfa20a9cff | /spambayes-1.0.4/utilities/dump_cdb.py | 49728d0958b67c26cdc52128cfdcf1d6f116874e | [
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] | permissive | Xodarap/Eipi | 7ebbb9fd861fdb411c1e273ea5d2a088aa579930 | d30997a737912e38316c198531f7cb9c5693c313 | refs/heads/master | 2016-09-11T06:28:01.333832 | 2011-05-03T15:35:20 | 2011-05-03T15:35:20 | 1,367,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | #! /usr/bin/env python
RC_DIR = "~/.spambayes"
DB_FILE = RC_DIR + "/wordprobs.cdb"
import sys
import os
DB_FILE = os.path.expanduser(DB_FILE)
from spambayes.cdb import Cdb
def main():
if len(sys.argv) == 2:
db_file = sys.argv[1]
else:
db_file = os.path.expanduser(DB_FILE)
db = Cdb(open(db_file, 'rb'))
items = []
for k, v in db.iteritems():
items.append((float(v), k))
items.sort()
for v, k in items:
print k, v
if __name__ == "__main__":
main()
| [
"eipi@mybox.(none)"
] | eipi@mybox.(none) |
4cf0f265880518fe33637b3e56d940727ba2b525 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-ief/huaweicloudsdkief/v1/model/delete_app_version_response.py | 8ee4920d3d770b7d585ed8241983204f95e97477 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 2,447 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DeleteAppVersionResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
}
attribute_map = {
}
def __init__(self):
"""DeleteAppVersionResponse
The model defined in huaweicloud sdk
"""
super(DeleteAppVersionResponse, self).__init__()
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteAppVersionResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
161b4d83f8b91bc3f5ff4f23e641076fa05b408f | a62dac56e6d1cddf748dbe3494fbfd09d4028bc0 | /cs_project.py | 76f98d99d861815a155d6706c09c3a20e765388c | [] | no_license | alogoc/ansible-cloudstack | 55a370f59e7e0854f5e03f9d0cab5f9dbd20d362 | 01929eccd1688584a17ef468b8624bbdfb571654 | refs/heads/master | 2020-06-20T14:57:09.584728 | 2016-11-20T22:06:15 | 2016-11-20T22:06:15 | 74,859,126 | 0 | 0 | null | 2016-11-27T00:49:37 | 2016-11-27T00:49:37 | null | UTF-8 | Python | false | false | 28,925 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cs_project
short_description: Manages projects on Apache CloudStack based clouds.
description:
- Create, update, suspend, activate and remove projects.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the project.
required: true
display_text:
description:
- Display text of the project.
- If not specified, C(name) will be used as C(display_text).
required: false
default: null
state:
description:
- State of the project.
required: false
default: 'present'
choices: [ 'present', 'absent', 'active', 'suspended' ]
domain:
description:
- Domain the project is related to.
required: false
default: null
account:
description:
- Account the project is related to.
required: false
default: null
tags:
description:
- List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
- "If you want to delete all tags, set a empty list e.g. C(tags: [])."
required: false
default: null
version_added: "2.2"
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Create a project
- local_action:
module: cs_project
name: web
tags:
- { key: admin, value: john }
- { key: foo, value: bar }
# Rename a project
- local_action:
module: cs_project
name: web
display_text: my web project
# Suspend an existing project
- local_action:
module: cs_project
name: web
state: suspended
# Activate an existing project
- local_action:
module: cs_project
name: web
state: active
# Remove a project
- local_action:
module: cs_project
name: web
state: absent
'''
RETURN = '''
---
id:
description: UUID of the project.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the project.
returned: success
type: string
sample: web project
display_text:
description: Display text of the project.
returned: success
type: string
sample: web project
state:
description: State of the project.
returned: success
type: string
sample: Active
domain:
description: Domain the project is related to.
returned: success
type: string
sample: example domain
account:
description: Account the project is related to.
returned: success
type: string
sample: example account
tags:
description: List of resource tags associated with the project.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
'''
# import cloudstack common
import os
import time
from ansible.module_utils.six import iteritems
try:
from cs import CloudStack, CloudStackException, read_config
has_lib_cs = True
except ImportError:
has_lib_cs = False
CS_HYPERVISORS = [
"KVM", "kvm",
"VMware", "vmware",
"BareMetal", "baremetal",
"XenServer", "xenserver",
"LXC", "lxc",
"HyperV", "hyperv",
"UCS", "ucs",
"OVM", "ovm",
"Simulator", "simulator",
]
def cs_argument_spec():
return dict(
api_key = dict(default=None),
api_secret = dict(default=None, no_log=True),
api_url = dict(default=None),
api_http_method = dict(choices=['get', 'post'], default='get'),
api_timeout = dict(type='int', default=10),
api_region = dict(default='cloudstack'),
)
def cs_required_together():
return [['api_key', 'api_secret', 'api_url']]
class AnsibleCloudStack(object):
def __init__(self, module):
if not has_lib_cs:
module.fail_json(msg="python library cs required: pip install cs")
self.result = {
'changed': False,
'diff' : {
'before': dict(),
'after': dict()
}
}
# Common returns, will be merged with self.returns
# search_for_key: replace_with_key
self.common_returns = {
'id': 'id',
'name': 'name',
'created': 'created',
'zonename': 'zone',
'state': 'state',
'project': 'project',
'account': 'account',
'domain': 'domain',
'displaytext': 'display_text',
'displayname': 'display_name',
'description': 'description',
}
# Init returns dict for use in subclasses
self.returns = {}
# these values will be casted to int
self.returns_to_int = {}
# these keys will be compared case sensitive in self.has_changed()
self.case_sensitive_keys = [
'id',
'displaytext',
'displayname',
'description',
]
self.module = module
self._connect()
# Helper for VPCs
self._vpc_networks_ids = None
self.domain = None
self.account = None
self.project = None
self.ip_address = None
self.network = None
self.vpc = None
self.zone = None
self.vm = None
self.vm_default_nic = None
self.os_type = None
self.hypervisor = None
self.capabilities = None
def _connect(self):
api_key = self.module.params.get('api_key')
api_secret = self.module.params.get('api_secret')
api_url = self.module.params.get('api_url')
api_http_method = self.module.params.get('api_http_method')
api_timeout = self.module.params.get('api_timeout')
if api_key and api_secret and api_url:
self.cs = CloudStack(
endpoint=api_url,
key=api_key,
secret=api_secret,
timeout=api_timeout,
method=api_http_method
)
else:
api_region = self.module.params.get('api_region', 'cloudstack')
self.cs = CloudStack(**read_config(api_region))
def get_or_fallback(self, key=None, fallback_key=None):
value = self.module.params.get(key)
if not value:
value = self.module.params.get(fallback_key)
return value
# TODO: for backward compatibility only, remove if not used anymore
def _has_changed(self, want_dict, current_dict, only_keys=None):
return self.has_changed(want_dict=want_dict, current_dict=current_dict, only_keys=only_keys)
def has_changed(self, want_dict, current_dict, only_keys=None):
result = False
for key, value in want_dict.iteritems():
# Optionally limit by a list of keys
if only_keys and key not in only_keys:
continue
# Skip None values
if value is None:
continue
if key in current_dict:
if isinstance(value, (int, float, long, complex)):
# ensure we compare the same type
if isinstance(value, int):
current_dict[key] = int(current_dict[key])
elif isinstance(value, float):
current_dict[key] = float(current_dict[key])
elif isinstance(value, long):
current_dict[key] = long(current_dict[key])
elif isinstance(value, complex):
current_dict[key] = complex(current_dict[key])
if value != current_dict[key]:
self.result['diff']['before'][key] = current_dict[key]
self.result['diff']['after'][key] = value
result = True
else:
if self.case_sensitive_keys and key in self.case_sensitive_keys:
if value != current_dict[key].encode('utf-8'):
self.result['diff']['before'][key] = current_dict[key].encode('utf-8')
self.result['diff']['after'][key] = value
result = True
# Test for diff in case insensitive way
elif value.lower() != current_dict[key].encode('utf-8').lower():
self.result['diff']['before'][key] = current_dict[key].encode('utf-8')
self.result['diff']['after'][key] = value
result = True
else:
self.result['diff']['before'][key] = None
self.result['diff']['after'][key] = value
result = True
return result
def _get_by_key(self, key=None, my_dict=None):
if my_dict is None:
my_dict = {}
if key:
if key in my_dict:
return my_dict[key]
self.module.fail_json(msg="Something went wrong: %s not found" % key)
return my_dict
def get_vpc(self, key=None):
"""Return a VPC dictionary or the value of given key of."""
if self.vpc:
return self._get_by_key(key, self.vpc)
vpc = self.module.params.get('vpc')
if not vpc:
vpc = os.environ.get('CLOUDSTACK_VPC')
if not vpc:
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
}
vpcs = self.cs.listVPCs(**args)
if not vpcs:
self.module.fail_json(msg="No VPCs available.")
for v in vpcs['vpc']:
if vpc in [v['displaytext'], v['name'], v['id']]:
self.vpc = v
return self._get_by_key(key, self.vpc)
self.module.fail_json(msg="VPC '%s' not found" % vpc)
def is_vm_in_vpc(self, vm):
for n in vm.get('nic'):
if n.get('isdefault', False):
return self.is_vpc_network(network_id=n['networkid'])
self.module.fail_json(msg="VM has no default nic")
def is_vpc_network(self, network_id):
"""Returns True if network is in VPC."""
# This is an efficient way to query a lot of networks at a time
if self._vpc_networks_ids is None:
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
}
vpcs = self.cs.listVPCs(**args)
self._vpc_networks_ids = []
if vpcs:
for vpc in vpcs['vpc']:
for n in vpc.get('network',[]):
self._vpc_networks_ids.append(n['id'])
return network_id in self._vpc_networks_ids
def get_network(self, key=None):
"""Return a network dictionary or the value of given key of."""
if self.network:
return self._get_by_key(key, self.network)
network = self.module.params.get('network')
if not network:
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
'vpcid': self.get_vpc(key='id')
}
networks = self.cs.listNetworks(**args)
if not networks:
self.module.fail_json(msg="No networks available.")
for n in networks['network']:
# ignore any VPC network if vpc param is not given
if 'vpcid' in n and not self.get_vpc(key='id'):
continue
if network in [n['displaytext'], n['name'], n['id']]:
self.network = n
return self._get_by_key(key, self.network)
self.module.fail_json(msg="Network '%s' not found" % network)
def get_project(self, key=None):
if self.project:
return self._get_by_key(key, self.project)
project = self.module.params.get('project')
if not project:
project = os.environ.get('CLOUDSTACK_PROJECT')
if not project:
return None
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
projects = self.cs.listProjects(**args)
if projects:
for p in projects['project']:
if project.lower() in [ p['name'].lower(), p['id'] ]:
self.project = p
return self._get_by_key(key, self.project)
self.module.fail_json(msg="project '%s' not found" % project)
def get_ip_address(self, key=None):
if self.ip_address:
return self._get_by_key(key, self.ip_address)
ip_address = self.module.params.get('ip_address')
if not ip_address:
self.module.fail_json(msg="IP address param 'ip_address' is required")
args = {
'ipaddress': ip_address,
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'vpcid': self.get_vpc(key='id'),
}
ip_addresses = self.cs.listPublicIpAddresses(**args)
if not ip_addresses:
self.module.fail_json(msg="IP address '%s' not found" % args['ipaddress'])
self.ip_address = ip_addresses['publicipaddress'][0]
return self._get_by_key(key, self.ip_address)
def get_vm_guest_ip(self):
vm_guest_ip = self.module.params.get('vm_guest_ip')
default_nic = self.get_vm_default_nic()
if not vm_guest_ip:
return default_nic['ipaddress']
for secondary_ip in default_nic['secondaryip']:
if vm_guest_ip == secondary_ip['ipaddress']:
return vm_guest_ip
self.module.fail_json(msg="Secondary IP '%s' not assigned to VM" % vm_guest_ip)
def get_vm_default_nic(self):
if self.vm_default_nic:
return self.vm_default_nic
nics = self.cs.listNics(virtualmachineid=self.get_vm(key='id'))
if nics:
for n in nics['nic']:
if n['isdefault']:
self.vm_default_nic = n
return self.vm_default_nic
self.module.fail_json(msg="No default IP address of VM '%s' found" % self.module.params.get('vm'))
def get_vm(self, key=None):
if self.vm:
return self._get_by_key(key, self.vm)
vm = self.module.params.get('vm')
if not vm:
self.module.fail_json(msg="Virtual machine param 'vm' is required")
vpc_id = self.get_vpc(key='id')
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
'vpcid': vpc_id,
}
vms = self.cs.listVirtualMachines(**args)
if vms:
for v in vms['virtualmachine']:
# Due the limitation of the API, there is no easy way (yet) to get only those VMs
# not belonging to a VPC.
if not vpc_id and self.is_vm_in_vpc(vm=v):
continue
if vm.lower() in [ v['name'].lower(), v['displayname'].lower(), v['id'] ]:
self.vm = v
return self._get_by_key(key, self.vm)
self.module.fail_json(msg="Virtual machine '%s' not found" % vm)
def get_zone(self, key=None):
if self.zone:
return self._get_by_key(key, self.zone)
zone = self.module.params.get('zone')
if not zone:
zone = os.environ.get('CLOUDSTACK_ZONE')
zones = self.cs.listZones()
# use the first zone if no zone param given
if not zone:
self.zone = zones['zone'][0]
return self._get_by_key(key, self.zone)
if zones:
for z in zones['zone']:
if zone.lower() in [ z['name'].lower(), z['id'] ]:
self.zone = z
return self._get_by_key(key, self.zone)
self.module.fail_json(msg="zone '%s' not found" % zone)
def get_os_type(self, key=None):
if self.os_type:
return self._get_by_key(key, self.zone)
os_type = self.module.params.get('os_type')
if not os_type:
return None
os_types = self.cs.listOsTypes()
if os_types:
for o in os_types['ostype']:
if os_type in [ o['description'], o['id'] ]:
self.os_type = o
return self._get_by_key(key, self.os_type)
self.module.fail_json(msg="OS type '%s' not found" % os_type)
def get_hypervisor(self):
if self.hypervisor:
return self.hypervisor
hypervisor = self.module.params.get('hypervisor')
hypervisors = self.cs.listHypervisors()
# use the first hypervisor if no hypervisor param given
if not hypervisor:
self.hypervisor = hypervisors['hypervisor'][0]['name']
return self.hypervisor
for h in hypervisors['hypervisor']:
if hypervisor.lower() == h['name'].lower():
self.hypervisor = h['name']
return self.hypervisor
self.module.fail_json(msg="Hypervisor '%s' not found" % hypervisor)
def get_account(self, key=None):
if self.account:
return self._get_by_key(key, self.account)
account = self.module.params.get('account')
if not account:
account = os.environ.get('CLOUDSTACK_ACCOUNT')
if not account:
return None
domain = self.module.params.get('domain')
if not domain:
self.module.fail_json(msg="Account must be specified with Domain")
args = {}
args['name'] = account
args['domainid'] = self.get_domain(key='id')
args['listall'] = True
accounts = self.cs.listAccounts(**args)
if accounts:
self.account = accounts['account'][0]
return self._get_by_key(key, self.account)
self.module.fail_json(msg="Account '%s' not found" % account)
def get_domain(self, key=None):
if self.domain:
return self._get_by_key(key, self.domain)
domain = self.module.params.get('domain')
if not domain:
domain = os.environ.get('CLOUDSTACK_DOMAIN')
if not domain:
return None
args = {}
args['listall'] = True
domains = self.cs.listDomains(**args)
if domains:
for d in domains['domain']:
if d['path'].lower() in [ domain.lower(), "root/" + domain.lower(), "root" + domain.lower() ]:
self.domain = d
return self._get_by_key(key, self.domain)
self.module.fail_json(msg="Domain '%s' not found" % domain)
def get_tags(self, resource=None):
existing_tags = []
for tag in resource.get('tags',[]):
existing_tags.append({'key': tag['key'], 'value': tag['value']})
return existing_tags
def _process_tags(self, resource, resource_type, tags, operation="create"):
if tags:
self.result['changed'] = True
if not self.module.check_mode:
args = {}
args['resourceids'] = resource['id']
args['resourcetype'] = resource_type
args['tags'] = tags
if operation == "create":
response = self.cs.createTags(**args)
else:
response = self.cs.deleteTags(**args)
self.poll_job(response)
def _tags_that_should_exist_or_be_updated(self, resource, tags):
existing_tags = self.get_tags(resource)
return [tag for tag in tags if tag not in existing_tags]
def _tags_that_should_not_exist(self, resource, tags):
existing_tags = self.get_tags(resource)
return [tag for tag in existing_tags if tag not in tags]
def ensure_tags(self, resource, resource_type=None):
if not resource_type or not resource:
self.module.fail_json(msg="Error: Missing resource or resource_type for tags.")
if 'tags' in resource:
tags = self.module.params.get('tags')
if tags is not None:
self._process_tags(resource, resource_type, self._tags_that_should_not_exist(resource, tags), operation="delete")
self._process_tags(resource, resource_type, self._tags_that_should_exist_or_be_updated(resource, tags))
resource['tags'] = tags
return resource
def get_capabilities(self, key=None):
if self.capabilities:
return self._get_by_key(key, self.capabilities)
capabilities = self.cs.listCapabilities()
self.capabilities = capabilities['capability']
return self._get_by_key(key, self.capabilities)
# TODO: for backward compatibility only, remove if not used anymore
def _poll_job(self, job=None, key=None):
return self.poll_job(job=job, key=key)
def poll_job(self, job=None, key=None):
if 'jobid' in job:
while True:
res = self.cs.queryAsyncJobResult(jobid=job['jobid'])
if res['jobstatus'] != 0 and 'jobresult' in res:
if 'errortext' in res['jobresult']:
self.module.fail_json(msg="Failed: '%s'" % res['jobresult']['errortext'])
if key and key in res['jobresult']:
job = res['jobresult'][key]
break
time.sleep(2)
return job
def get_result(self, resource):
if resource:
returns = self.common_returns.copy()
returns.update(self.returns)
for search_key, return_key in returns.iteritems():
if search_key in resource:
self.result[return_key] = resource[search_key]
# Bad bad API does not always return int when it should.
for search_key, return_key in self.returns_to_int.iteritems():
if search_key in resource:
self.result[return_key] = int(resource[search_key])
# Special handling for tags
if 'tags' in resource:
self.result['tags'] = []
for tag in resource['tags']:
result_tag = {}
result_tag['key'] = tag['key']
result_tag['value'] = tag['value']
self.result['tags'].append(result_tag)
return self.result
class AnsibleCloudStackProject(AnsibleCloudStack):
def get_project(self):
if not self.project:
project = self.module.params.get('name')
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
projects = self.cs.listProjects(**args)
if projects:
for p in projects['project']:
if project.lower() in [ p['name'].lower(), p['id']]:
self.project = p
break
return self.project
def present_project(self):
project = self.get_project()
if not project:
project = self.create_project(project)
else:
project = self.update_project(project)
if project:
project = self.ensure_tags(resource=project, resource_type='project')
# refresh resource
self.project = project
return project
def update_project(self, project):
args = {}
args['id'] = project['id']
args['displaytext'] = self.get_or_fallback('display_text', 'name')
if self.has_changed(args, project):
self.result['changed'] = True
if not self.module.check_mode:
project = self.cs.updateProject(**args)
if 'errortext' in project:
self.module.fail_json(msg="Failed: '%s'" % project['errortext'])
poll_async = self.module.params.get('poll_async')
if project and poll_async:
project = self.poll_job(project, 'project')
return project
def create_project(self, project):
self.result['changed'] = True
args = {}
args['name'] = self.module.params.get('name')
args['displaytext'] = self.get_or_fallback('display_text', 'name')
args['account'] = self.get_account('name')
args['domainid'] = self.get_domain('id')
if not self.module.check_mode:
project = self.cs.createProject(**args)
if 'errortext' in project:
self.module.fail_json(msg="Failed: '%s'" % project['errortext'])
poll_async = self.module.params.get('poll_async')
if project and poll_async:
project = self.poll_job(project, 'project')
return project
def state_project(self, state='active'):
project = self.present_project()
if project['state'].lower() != state:
self.result['changed'] = True
args = {}
args['id'] = project['id']
if not self.module.check_mode:
if state == 'suspended':
project = self.cs.suspendProject(**args)
else:
project = self.cs.activateProject(**args)
if 'errortext' in project:
self.module.fail_json(msg="Failed: '%s'" % project['errortext'])
poll_async = self.module.params.get('poll_async')
if project and poll_async:
project = self.poll_job(project, 'project')
return project
def absent_project(self):
project = self.get_project()
if project:
self.result['changed'] = True
args = {}
args['id'] = project['id']
if not self.module.check_mode:
res = self.cs.deleteProject(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
res = self.poll_job(res, 'project')
return project
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
display_text = dict(default=None),
state = dict(choices=['present', 'absent', 'active', 'suspended' ], default='present'),
domain = dict(default=None),
account = dict(default=None),
poll_async = dict(type='bool', default=True),
tags=dict(type='list', aliases=['tag'], default=None),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_project = AnsibleCloudStackProject(module)
state = module.params.get('state')
if state in ['absent']:
project = acs_project.absent_project()
elif state in ['active', 'suspended']:
project = acs_project.state_project(state=state)
else:
project = acs_project.present_project()
result = acs_project.get_result(project)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| [
"mail@renemoser.net"
] | mail@renemoser.net |
95568c1dbe6674dfbb0b301bd32d464e7ca8e62a | 2b4cd124ad8e7e886be97b3ffc86bb077b084ecf | /dnadroid/dnadroid/analysis/MainConfiguration.py | ea86563c9a8e1d2f377c06e0c550c61737522185 | [] | no_license | GitDeng/dnadroid | 5fd3774c22b54ffabdf7758b04e6ad117a200f58 | 1da0ca098a0f332f7b2279f718a84cf9dd7575c1 | refs/heads/master | 2020-03-22T07:37:42.847329 | 2018-01-10T18:54:01 | 2018-01-10T18:54:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,671 | py | import os
import platform
#+---------------------------------------------------------------------------+
#| Local imports
#+---------------------------------------------------------------------------+
class MainConfiguration(object):
"""A container that stores all the parameters required to start an analysis
"""
def __init__(self, referenceAVD, androidSDKPath, androidTemporaryPath, androguardPath, typeOfDevice, deviceId, name):
self.androidSDKPath = androidSDKPath
self.androidTemporaryPath = androidTemporaryPath
if not os.path.exists(os.path.join(androidSDKPath, "tools/emulator")):
raise Exception("File {0} doesn't exist".format(os.path.join(androidSDKPath, "tools/emulator")))
self.emulatorPath = os.path.join(androidSDKPath, "tools/emulator")
self.adbPath = os.path.join(androidSDKPath, "platform-tools/adb")
self.androguardPath = androguardPath
self.typeOfDevice = typeOfDevice
self.name = name
# Differentiate real and emulated configurations
if self.typeOfDevice=='real':
self.deviceId=deviceId
self.referenceAVD = None
else:
self.referenceAVD = referenceAVD
self.virtualDevicePath = os.path.dirname(referenceAVD)
@staticmethod
def build(commandLineParser):
"""Builds and returns a MainConfiguration based on values
contained in the specified CommandLineParser"""
if commandLineParser is None:
raise Exception("Cannot build the main configuration if no commandLineParser is provided")
mainOptions = commandLineParser.mainOptions
if not 'device' in mainOptions.keys():
raise Exception("The device configuration entry is missing.")
typeOfDevice = mainOptions['device']
if not (typeOfDevice=='real' or typeOfDevice=='emulated'):
raise Exception("Type of device must be \"real\" or \"emulated\"")
deviceId=None
if typeOfDevice=='real':
if 'deviceid' in mainOptions.keys():
deviceId = mainOptions['deviceid']
else:
raise Exception("You must specify deviceid if you are using a real device")
refAVD = None
else:
if not 'referenceavd' in mainOptions.keys():
raise Exception("The referenceAVD configuration entry is missing.")
refAvdDirectory = mainOptions['referenceavd'] + '.avd/'
if not os.path.isdir(refAvdDirectory):
raise Exception("'{0}' is not a directory.".format(refAvdDirectory))
if not os.access(refAvdDirectory, os.R_OK):
raise Exception("You don't have read access to directory {0}.".format(refAvdDirectory))
refAVD = mainOptions['referenceavd']
if not 'name' in mainOptions.keys():
#print mainOptions.keys()
raise Exception("The name configuration entry is missing.")
xpName = mainOptions['name']
if not 'androidsdkpath' in mainOptions.keys():
raise Exception("The androidSDKPath configuration entry is missing.")
androidSDKPath = mainOptions['androidsdkpath']
if not os.path.isdir(androidSDKPath):
raise Exception("'{0}' is not an existing directory.".format(androidSDKPath))
if not os.access(androidSDKPath, os.R_OK):
raise Exception("You don't have read access to directory {0}.".format(androidSDKPath))
if not 'androidtemporarypath' in mainOptions.keys():
raise Exception("The androidTemporaryPath configuration entry is missing.")
androidTemporaryPath = mainOptions['androidtemporarypath']
if not os.path.isdir(androidTemporaryPath):
raise Exception("'{0}' is not an existing directory.".format(androidTemporaryPath))
if not os.access(androidTemporaryPath, os.W_OK):
raise Exception("You don't have write access to directory {0}.".format(androidTemporaryPath))
if not 'androguardpath' in mainOptions.keys():
raise Exception("The androguardPath configuration entry is missing.")
androguardPath = mainOptions['androguardpath']
if not os.path.isdir(androguardPath):
raise Exception("'{0}' is not an existing directory.".format(androguardPath))
if not os.access(androguardPath, os.R_OK):
raise Exception("You don't have read access to directory {0}.".format(androguardPath))
return MainConfiguration(refAVD, androidSDKPath, androidTemporaryPath, androguardPath, typeOfDevice, deviceId, xpName)
def __str__(self):
"""toString method"""
lines = [
"Main Conf of experiment \"{}\":".format(self.name),
"\t- SDK\t\t\t{0}".format(self.androidSDKPath),
"\t- Ref. AVD\t\t{0}".format(self.referenceAVD),
"\t- Androguard\t\t{0}".format(self.androguardPath),
"\t- Type of device\t{0}".format(self.typeOfDevice)
]
return '\n'.join(lines)
@property
def referenceAVD(self):
"""Path to the reference AVD we will use to clone
"""
return self.__referenceAVD
@referenceAVD.setter
def referenceAVD(self, referenceAVD):
if referenceAVD is None and self.__typeOfDevice=='emulated':
raise Exception("The reference AVD cannot be null.")
self.__referenceAVD = referenceAVD
@property
def name(self):
return self.__name
@name.setter
def name(self, name):
if name is None:
raise Exception("The name of XP cannot be null.")
self.__name = name
@property
def androidSDKPath(self):
"""Path to the android SDK
"""
return self.__androidSDKPath
@androidSDKPath.setter
def androidSDKPath(self, androidSDKPath):
if androidSDKPath is None:
raise Exception("The android SDK path cannot be null.")
self.__androidSDKPath = androidSDKPath
@property
def androidTemporaryPath(self):
"""Path to the android tempory directory
"""
return self.__androidTemporaryPath
@androidTemporaryPath.setter
def androidTemporaryPath(self, androidTemporaryPath):
if androidTemporaryPath is None:
raise Exception("The android temporary path cannot be null.")
self.__androidTemporaryPath = androidTemporaryPath
@property
def androidVirtualDevicePath(self):
"""Path to the android virtual device directory
"""
return self.__androidVirtualDevicePath
@androidVirtualDevicePath.setter
def androidVirtualDevicePath(self, androidVirtualDevicePath):
if androidVirtualDevicePath is None:
raise Exception("The android virtual device path cannot be null.")
self.__androidVirtualDevicePath = androidVirtualDevicePath
@property
def emulatorPath(self):
"""Path to the emulator binary in android sdk
"""
return self.__emulatorPath
@emulatorPath.setter
def emulatorPath(self, emulatorPath):
if emulatorPath is None:
raise Exception("The android emulator path cannot be null.")
self.__emulatorPath = emulatorPath
@property
def adbPath(self):
"""Path to the adb binary
"""
return self.__adbPath
@adbPath.setter
def adbPath(self, adbPath):
if adbPath is None:
raise Exception("The adb binary path cannot be null.")
self.__adbPath = adbPath
@property
def androguardPath(self):
"""Path to androguard framework
"""
return self.__androguardPath
@androguardPath.setter
def androguardPath(self, androguardPath):
if androguardPath is None:
raise Exception("The androguard path cannot be null.")
self.__androguardPath = androguardPath
@property
def typeOfDevice(self):
return self.__typeOfDevice
@typeOfDevice.setter
def typeOfDevice(self, typeOfDevice):
if typeOfDevice is None:
raise Exception("Type of device cannot be null.")
self.__typeOfDevice = typeOfDevice
@property
def deviceId(self):
return self.__deviceId
@deviceId.setter
def deviceId(self, deviceId):
if deviceId is None and self.__typeOfDevice=='real':
raise Exception("DeviceId cannot be null.")
self.__deviceId = deviceId
| [
"hamidreza.hanafi@gmail.com"
] | hamidreza.hanafi@gmail.com |
55895c6336c94136f15387270f97f6cfad42ce6f | 73a88b88c5ad7bc17ab833cbd7e3e271db7e4852 | /app/getres.py | a88da83fc3f8f9ed864d9819dd57c3479ee1c0d9 | [] | no_license | ask4ua/glDevOpsTask1 | 3adc51a76a38b0d1ec2c5f65a55331d43ff571de | 806aa193451eb61a76f6c4b82a4fd96dbb5ff36d | refs/heads/master | 2020-05-18T23:42:22.460641 | 2019-05-03T16:21:24 | 2019-05-03T16:21:24 | 184,718,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,863 | py | import psutil
import sys
class getRes():
@staticmethod
def print_cpu():
for name, value in psutil.cpu_times()._asdict().items():
print("system.cpu." + str(name) + " " + str(value))
@staticmethod
def print_mem():
for name, value in psutil.virtual_memory()._asdict().items():
print("virtual " + str(name) + " " + str(value))
for name, value in psutil.swap_memory()._asdict().items():
print("swap " + str(name) + " " + str(value))
class help():
@staticmethod
def print_full_help():
HELP="The script " + str(sys.argv[0]) + " provides in response cpu or memory resource utilization.\n\
\n\
Usage: " + str(sys.argv[0]) + " [cpu|mem]\n\
\n\
Where:\n\
- cpu - prints CPU metrics\n\
- mem - prints RAM metrics\n"
print(HELP)
@staticmethod
def print_param_limit_exc(parameters,param_limit=1):
print("To the script were provided such parameters: " + str(parameters))
print("The script should accept only "+str(param_limit)+" parameter(s) to specify which metrics set to print:\n\
- cpu - prints CPU metrics\n\
- mem - prints RAM metrics\n")
if __name__=='__main__':
PARAMETERS_LIMIT = 1
if len(sys.argv) > PARAMETERS_LIMIT + 1:
help.print_param_limit_exc(sys.argv[1:],PARAMETERS_LIMIT)
elif len(sys.argv) == 0:
print("No input parameters identifed")
help.print_full_help()
else:
for param in sys.argv[1:]:
if param.lower()=="cpu":
getRes.print_cpu()
elif param.lower()=="mem":
getRes.print_mem()
else:
print("Sorry, " + str(param) + " option not recognized.")
help.print_full_help()
exit(1)
| [
"vovo@ask4ua.com"
] | vovo@ask4ua.com |
7a5de2df998be6981b02a44e33573307cd07a0f2 | 2342744291c27d4501f53accf48678e22dfcfec7 | /scripts/download_and_regrid/rwc_lai.py | a004fde480447d38f2c2287c5b527c8e85dbaab4 | [] | no_license | l5d1l5/tree_mortality_from_vod | dc77858ac5bb690bc2dbae9d0eaa45793c8b0a99 | 0111df5ad1b61db121470a76b1f1a403f453bd89 | refs/heads/master | 2022-12-30T23:59:41.616552 | 2020-10-20T01:30:52 | 2020-10-20T01:30:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,754 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 14 20:01:12 2019
@author: kkrao
"""
import pandas as pd
import matplotlib.pyplot as plt
from datetime import timedelta
from dirs import Dir_CA, remove_vod_affected, select_high_mort_grids, RWC
import numpy as np
import seaborn as sns
np.set_printoptions(threshold=np.nan)
store=pd.HDFStore(Dir_CA+'/data_subset_GC.h5')
df = store["rwc_lai"]
df2 = store["RWC_matched"]
####################plots
############ scatter plot rwc and rwc_lai
#fig, ax = plt.subplots(figsize = (3,3))
#ax.scatter(df2, df, s = 5, alpha = 0.5)
#ax.set_xlabel("RWC")
#ax.set_ylabel(r"$\frac{RWC}{LAI}$")
#R2 = df.stack().corr(df2.stack())
#ax.annotate("$R$ = %0.2f"%R2, xy=(0.1, 0.8), color = "darkred",\
# xycoords='axes fraction',ha = "left")
######lai time series
#gridcell =333
#lai = store['LAI_025_grid_sum']
#lai.index+= timedelta(days=227)
#fig, ax = plt.subplots(figsize = (6,2))
#store["LAI_025_grid"].loc[:,gridcell].plot(legend = False, ax = ax)
#lai.loc[:,gridcell].plot(legend = False, ax = ax, marker = 'o',\
# markersize = 6, linestyle = "", color = 'b')
#ax2 = ax.twin
#store["vod_pm_matched"].loc[:,gridcell].plot(legend = False, ax = ax2)
#ax.set_ylabel('LAI')
#ax.set_xlabel("")
#plt.show()
############# time series VOD/LAI
#vod = store["vod_pm_matched"]
#lai = store["LAI_025_grid"]
#lai = lai.resample('1d').asfreq().interpolate()
#vod = vod.resample('1d').asfreq().interpolate()
#df = vod/lai
#df.index.name = 'vod_lai'
#store[df.index.name] = df
#df = df.loc[(df.index.year>=2009)&(df.index.year<=2015),:]
#
#fig, ax = plt.subplots(figsize = (6,2))
#df.loc[:,333].plot(legend = False, ax = ax)
#ax.set_ylabel(r'$\frac{VOD}{LAI}$')
#ax.set_xlabel("")
#plt.show()
##rwc = vod/lai scatter plot with mort
####### mort and ndwi sum win scatter plot
mort = store['mortality_025_grid']
mort = mort.loc[(mort.index.year>=2009)&(mort.index.year<=2015),:]
rwc= store['rwc_vod_lai']
rwc = rwc.loc[(rwc.index.year>=2009)&(rwc.index.year<=2015),:]
fig, ax = plt.subplots(figsize = (3,3))
ax.scatter(rwc,mort, color = 'k', s = 6, alpha = 0.5)
ax.set_xlabel(r"RWC = $\frac{f(VOD)}{LAI}$")
ax.set_ylabel("FAM")
rwc.index.name = ""
mort.index.name = ""
R2 = rwc.stack().corr(mort.stack())
ax.annotate("$R$ = %0.2f"%R2, xy=(0.1, 0.8), color = "darkred",\
xycoords='axes fraction',ha = "left")
###############
#df = store['vod_pm_matched']
#df = remove_vod_affected(df)
#df = select_high_mort_grids(df)
#start_month=7
#months_window=3
#df = df.loc[(df.index.year>=2009)]
#df=df.loc[(df.index.month>=start_month) & (df.index.month<start_month+months_window)]
#df = df.groupby(df.index.year).mean()
#print((df.std()/df.mean()).mean())
#
#df2 = store[ '/LAI_025_grid_sum']
#df2 = df2.loc[(df2.index.year>=2009)]
#df2 = select_high_mort_grids(df2)
#print((df2.std()/df2.mean()).mean())
######VOD/LAI composite time series with time shifting
vod = store['vod_pm_matched']
lai = store['/LAI_025_grid']
df = vod/lai
grid_cell = 333
alpha1 = 0.2
alpha2 = 0.5
color = '#BD2031'
sns.set_style('ticks')
fig, ax = plt.subplots(figsize = (6,2))
df.loc[:,grid_cell].rolling(60,min_periods=1).mean().plot(ax = ax, label = 'vod/lai')
vod.loc[:,grid_cell].rolling(30,min_periods=1).mean().plot(ax = ax, label = 'vod', color = 'k', alpha = alpha2)
lai.loc[:,grid_cell].rolling(30,min_periods=1).mean().plot(ax = ax, label = 'lai', color = 'g', alpha = alpha2)
for year in np.unique(df.index.year):
ax.axvspan(*pd.to_datetime(['%d-01-01'%year,'%d-03-30'%year]), alpha=alpha1, facecolor=color)
plt.legend()
##store the new df with RWC of synthetic time series
df.head()
df.index.name = "rwc_vod_lai"
df = RWC(df, start_year = 2005, start_month=1)
store[df.index.name] = df ### rwc = anomaly(VOD/LAI)
| [
"kkraoj@gmail.com"
] | kkraoj@gmail.com |
0f5b526ae48d78b8406f0db682c36822b46482bf | 5cb8f3b4db461de2e84084a6f0992955c2ee10ce | /txtrader/rtx.py | 3fb95d5dffab7f0b5662f99bf368b1c63a40fdfa | [
"MIT"
] | permissive | dadaxiaoxiaobaba/txTrader | 5bf134210da839adf7b6c15f1de365e4cd80facd | 9ad2afd37c81c2408632b3b5f7dfa4749586e6a6 | refs/heads/master | 2020-12-30T10:12:24.467623 | 2017-07-11T19:44:07 | 2017-07-11T19:44:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,031 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
rtx.py
------
RealTick TWS API interface module
Copyright (c) 2015 Reliance Systems Inc. <mkrueger@rstms.net>
Licensed under the MIT license. See LICENSE for details.
"""
import sys
import mx.DateTime
import types
import datetime
from uuid import uuid1
import json
import time
from config import Config
DEFAULT_CALLBACK_TIMEOUT = 5
# allow disable of tick requests for testing
ENABLE_TICK_REQUESTS = True
DISCONNECT_SECONDS = 15
SHUTDOWN_ON_DISCONNECT = True
ADD_SYMBOL_TIMEOUT = 5
from twisted.python import log
from twisted.internet.protocol import Factory, Protocol
from twisted.internet import reactor, defer
from twisted.internet.task import LoopingCall
from twisted.web import server
from socket import gethostname
class API_Symbol():
def __init__(self, api, symbol, client_id, init_callback):
self.api = api
self.id = str(uuid1())
self.output = api.output
self.clients = set([client_id])
self.callback = init_callback
self.symbol = symbol
self.fullname = ''
self.bid = 0.0
self.bid_size = 0
self.ask = 0.0
self.ask_size = 0
self.last = 0.0
self.size = 0
self.volume = 0
self.close = 0.0
self.rawdata = ''
self.api.symbols[symbol] = self
self.last_quote = ''
self.output('API_Symbol %s %s created for client %s' %
(self, symbol, client_id))
self.output('Adding %s to watchlist' % self.symbol)
self.cxn = api.cxn_get('TA_SRV', 'LIVEQUOTE')
cb = API_Callback(self.api, self.cxn.id, 'init_symbol', RTX_LocalCallback(
self.api, self.init_handler), ADD_SYMBOL_TIMEOUT)
self.cxn.request('LIVEQUOTE', '*', "DISP_NAME='%s'" % symbol, cb)
def __str__(self):
return 'API_Symbol(%s bid=%s bidsize=%d ask=%s asksize=%d last=%s size=%d volume=%d close=%s clients=%s' % (self.symbol, self.bid, self.bid_size, self.ask, self.ask_size, self.last, self.size, self.volume, self.close, self.clients)
def __repr__(self):
return str(self)
def export(self):
return {
'symbol': self.symbol,
'bid': self.bid,
'bidsize': self.bid_size,
'ask': self.ask,
'asksize': self.ask_size,
'last': self.last,
'size': self.size,
'volume': self.volume,
'close': self.close,
'fullname': self.fullname
}
def add_client(self, client):
self.output('API_Symbol %s %s adding client %s' %
(self, self.symbol, client))
self.clients.add(client)
def del_client(self, client):
self.output('API_Symbol %s %s deleting client %s' %
(self, self.symbol, client))
self.clients.discard(client)
if not self.clients:
self.output('Removing %s from watchlist' % self.symbol)
# TODO: stop live updates of market data from RTX
def update_quote(self):
quote = 'quote.%s:%s %d %s %d' % (
self.symbol, self.bid, self.bid_size, self.ask, self.ask_size)
if quote != self.last_quote:
self.last_quote = quote
self.api.WriteAllClients(quote)
def update_trade(self):
self.api.WriteAllClients('trade.%s:%s %d %d' % (
self.symbol, self.last, self.size, self.volume))
def init_handler(self, data):
self.output('API_Symbol init: %s' % data)
self.rawdata = data
self.parse_fields(None, data[0])
if self.api.symbol_init(self):
self.cxn = self.api.cxn_get('TA_SRV', 'LIVEQUOTE')
self.cxn.advise('LIVEQUOTE', 'TRDPRC_1,TRDVOL_1,BID,BIDSIZE,ASK,ASKSIZE,ACVOL_1',
"DISP_NAME='%s'" % self.symbol, self.parse_fields)
def parse_fields(self, cxn, data):
trade_flag = False
quote_flag = False
if 'TRDPRC_1' in data.keys():
self.last = float(data['TRDPRC_1'])
trade_flag = True
if 'TRDVOL_1' in data.keys():
self.size = int(data['TRDVOL_1'])
trade_flag = True
if 'ACVOL_1' in data.keys():
self.volume = int(data['ACVOL_1'])
trade_flag = True
if 'BID' in data.keys():
self.bid = float(data['BID'])
quote_flag = True
if 'BIDSIZE' in data.keys():
self.bidsize = int(data['BIDSIZE'])
quote_flag = True
if 'ASK' in data.keys():
self.ask = float(data['ASK'])
quote_flag = True
if 'ASKSIZE' in data.keys():
self.asksize = int(data['ASKSIZE'])
quote_flag = True
if 'COMPANY_NAME' in data.keys():
self.fullname = data['COMPANY_NAME']
if 'HST_CLOSE' in data.keys():
self.close = float(data['HST_CLOSE'])
if quote_flag:
self.update_quote()
if trade_flag:
self.update_trade()
def update_handler(self, data):
self.output('API_Symbol update: %s' % data)
self.rawdata = data
class API_Callback():
def __init__(self, api, id, label, callable, timeout=0):
"""callable is stored and used to return results later"""
api.output('API_Callback.__init__() %s' % self)
self.api = api
self.id = id
self.label = label
if not timeout:
timeout = api.callback_timeout
self.expire = int(mx.DateTime.now()) + timeout
self.callable = callable
self.done = False
self.data = None
def complete(self, results):
"""complete callback by calling callable function with value of results"""
self.api.output('API_Callback.complete() %s' % self)
if not self.done:
if self.callable.callback.__name__ == 'write':
results = '%s.%s: %s\n' % (
self.api.channel, self.label, json.dumps(results))
self.callable.callback(results)
self.done = True
else:
self.api.output('error: callback: %s was already done!' % self)
def check_expire(self):
self.api.output('API_Callback.check_expire() %s' % self)
if not self.done:
if int(mx.DateTime.now()) > self.expire:
self.api.WriteAllClients(
'error: callback expired: %s' % repr((self.id, self.label)))
if self.callable.callback.__name__ == 'write':
self.callable.callback(
'%s.error: %s callback expired\n', (self.api.channel, self.label))
else:
self.callable.callback(None)
self.done = True
# set an update_handler to handle async updates
# set response pending,
class RTX_Connection():
def __init__(self, api, service, topic):
self.api = api
self.id = str(uuid1())
self.service = service
self.topic = topic
self.key = '%s;%s' % (service, topic)
self.api.cxn_register(self)
self.api.gateway_send('connect %s %s' % (self.id, self.key))
self.response_pending = 'CONNECTION PENDING'
self.response_callback = None
self.status_pending = 'OnInitAck'
self.status_callback = None
self.update_callback = None
self.update_handler = None
self.connected = False
self.on_connect_action = None
self.update_ready()
def update_ready(self):
self.ready = not(
self.response_pending or self.response_callback or self.status_pending or self.status_callback or self.update_callback or self.update_handler)
self.api.output('update_ready() %s %s' % (self, self.ready))
if self.ready:
self.api.cxn_activate(self)
def receive(self, type, data):
if type == 'response':
self.handle_response(data)
elif type == 'status':
self.handle_status(data)
elif type == 'update':
self.handle_update(data)
else:
self.api.error_handler(
self.id, 'Message Type Unexpected: %s' % data)
self.update_ready()
def handle_response(self, data):
self.api.output('Connection Response: %s %s' % (self, data))
if self.response_pending:
if data == self.response_pending:
self.response_pending = None
else:
self.api.error_handler(id, 'Response Error: %s' % data)
if self.response_callback:
self.response_callback.complete(data)
self.response_callback = None
else:
self.api.error_handler(id, 'Response Unexpected: %s' % data)
def handle_status(self, s):
self.api.output('Connection Status: %s %s' % (self, s))
if self.status_pending and s['msg'] == self.status_pending:
self.status_pending = None
if s['status'] == '1':
if s['msg'] == 'OnInitAck':
self.connected = True
if self.on_connect_action:
self.ready = True
cmd, arg, exr, cbr, exs, cbs, cbu, uhr = self.on_connect_action
self.api.output('Sending on_connect_action: %s' %
repr(self.on_connect_action))
self.send(cmd, arg, exr, cbr, exs, cbs, cbu, uhr)
self.on_connect_action = None
else:
self.api.error_handler(self.id, 'Status Error: %s' % data)
else:
self.api.error_handler(self.id, 'Status Unexpected: %s' % data)
def handle_update(self, d):
self.api.output('Connection Update: %s %s' % (self, repr(d)))
if self.update_callback:
self.update_callback.complete(d)
self.update_callback = None
else:
if self.update_handler:
self.update_handler(self, d)
else:
self.api.error_handler(
self.id, 'Update Unexpected: %s' % repr(d))
def query(self, cmd, table, what, where, ex_response, cb_response, ex_status, cb_status, cb_update, update_handler):
ret = self.send(cmd, '%s;%s;%s' % (table, what, where), ex_response,
cb_response, ex_status, cb_status, cb_update, update_handler)
def request(self, table, what, where, callback):
return self.query('request', table, what, where, 'REQUEST_OK', None, None, None, callback, None)
def advise(self, table, what, where, handler):
return self.query('advise', table, what, where, 'ADVISE_OK', None, 'OnOtherAck', None, None, handler)
def adviserequest(self, table, what, where, callback, handler):
return self.query('adviserequest', table, what, where, 'ADVISEREQUEST_OK', None, 'OnOtherAck', None, callback, handler)
def unadvise(self, table, what, where, callback):
return self.query('unadvise', table, what, where, 'UNADVISE_OK', None, 'OnOtherAck', callback, None, None)
def poke(self, table, what, where, data, callback):
return self.send('poke', '%s;%s;%s!%s' % (table, what, where, data), "POKE_OK", callback)
def execute(self, command, callback):
return self.send('execute', command, "EXECUTE_OK", callback)
def terminate(self, code, callback):
return self.send('terminate', str(code), "TERMINATE_OK", callback)
def send(self, cmd, args, ex_response=None, cb_response=None, ex_status=None, cb_status=None, cb_update=None, update_handler=None):
if self.ready:
ret = self.api.gateway_send('%s %s %s' % (cmd, self.id, args))
self.response_pending = ex_response
self.response_callback = cb_response
self.status_pending = ex_status
self.status_callback = cb_status
self.update_callback = cb_update
self.update_handler = update_handler
else:
if self.on_connect_action:
self.api.error_handler(
self.id, 'Failure: on_connect_action already exists: %s' % repr(self.on_connect_action))
ret = False
else:
self.api.output('storing on_connect_action...%s' % self)
self.on_connect_action = (
cmd, args, ex_response, cb_response, ex_status, cb_status, cb_update, update_handler)
ret = True
return ret
class RTX_LocalCallback:
def __init__(self, api, handler):
self.api = api
self.callback_handler = handler
def callback(self, data):
if self.callback_handler:
self.callback_handler(data)
else:
self.api.error_handler(
self.id, 'Failure: undefined callback_handler for Connection: %s' % repr(self))
class RTX():
def __init__(self):
self.label = 'RTX Gateway'
self.channel = 'rtx'
self.id = 'RTX'
self.output('RTX init')
self.config = Config(self.channel)
self.api_hostname = self.config.get('API_HOST')
self.api_port = int(self.config.get('API_PORT'))
self.username = self.config.get('USERNAME')
self.password = self.config.get('PASSWORD')
self.xmlrpc_port = int(self.config.get('XMLRPC_PORT'))
self.tcp_port = int(self.config.get('TCP_PORT'))
self.callback_timeout = int(self.config.get('CALLBACK_TIMEOUT'))
if not self.callback_timeout:
self.callback_timeout = DEFAULT_CALLBACK_TIMEOUT
self.output('callback_timeout=%d' % self.callback_timeout)
self.enable_ticker = bool(int(self.config.get('ENABLE_TICKER')))
self.current_account = ''
self.clients = set([])
self.orders = {}
self.pending_orders = {}
self.openorder_callbacks = []
self.accounts = None
self.account_data = {}
self.pending_account_data_requests = set([])
self.positions = {}
self.position_callbacks = []
self.executions = {}
self.execution_callbacks = []
self.bardata_callbacks = []
self.cancel_callbacks = []
self.order_callbacks = []
self.add_symbol_callbacks = []
self.accountdata_callbacks = []
self.set_account_callbacks = []
self.account_request_callbacks = []
self.account_request_pending = True
self.timer_callbacks = []
self.connected = False
self.last_connection_status = ''
self.connection_status = 'Initializing'
self.LastError = -1
self.next_order_id = -1
self.last_minute = -1
self.symbols = {}
self.primary_exchange_map = {}
self.gateway_sender = None
self.active_cxn = {}
self.idle_cxn = {}
self.cx_time = None
self.seconds_disconnected = 0
self.repeater = LoopingCall(self.EverySecond)
self.repeater.start(1)
def cxn_register(self, cxn):
self.output('cxn_register: %s' % repr(cxn))
self.active_cxn[cxn.id] = cxn
def cxn_activate(self, cxn):
self.output('cxn_activate: %s' % repr(cxn))
if not cxn.key in self.idle_cxn.keys():
self.idle_cxn[cxn.key] = []
self.idle_cxn[cxn.key].append(cxn)
def cxn_get(self, service, topic):
key = '%s;%s' % (service, topic)
if key in self.idle_cxn.keys() and len(self.idle_cxn[key]):
cxn = self.idle_cxn[key].pop()
else:
cxn = RTX_Connection(self, service, topic)
self.output('cxn_get() returning: %s' % repr(cxn))
return cxn
def gateway_connect(self, protocol):
if protocol:
self.gateway_sender = protocol.sendLine
self.gateway_transport = protocol.transport
else:
self.gateway_sender = None
self.connected = False
self.seconds_disconnected = 0
self.account_request_pending = False
self.accounts = None
self.update_connection_status('Disconnected')
self.WriteAllClients('error: API Disconnected')
return self.gateway_receive
def gateway_send(self, msg):
self.output('<-- %s' % repr(msg))
if self.gateway_sender:
self.gateway_sender('%s\n' % msg)
def gateway_receive(self, msg):
"""handle input from rtgw """
o = json.loads(msg)
msg_type = o['type']
msg_id = o['id']
msg_data = o['data']
self.output('--> %s %s %s' % (msg_type, msg_id, msg_data))
if msg_type == 'system':
self.handle_system_message(msg_id, msg_data)
else:
if msg_id in self.active_cxn.keys():
c = self.active_cxn[msg_id].receive(msg_type, msg_data)
else:
self.error_handler(
self.id, 'Message Received on Unknown connection: %s' % repr(msg))
return True
def handle_system_message(self, id, data):
if data['msg'] == 'startup':
self.connected = True
self.accounts = None
self.update_connection_status('Connected')
self.output('Connected to %s' % data['item'])
self.setup_local_queries()
else:
self.error_handler(
self.id, 'Unknown system message: %s' % repr(data))
def setup_local_queries(self):
"""Upon connection to rtgw, start automatic queries"""
self.rtx_request('ACCOUNT_GATEWAY', 'ORDER', 'ACCOUNT', '*', '',
'accounts', self.handle_accounts, self.accountdata_callbacks, 5)
def output(self, msg):
sys.stderr.write('%s\n' % msg)
sys.stderr.flush()
def open_client(self, client):
self.clients.add(client)
def close_client(self, client):
self.clients.discard(client)
symbols = self.symbols.values()
for ts in symbols:
if client in ts.clients:
ts.del_client(client)
if not ts.clients:
del(self.symbols[ts.symbol])
def set_primary_exchange(self, symbol, exchange):
if exchange:
self.primary_exchange_map[symbol] = exchange
else:
del(self.primary_exchange_map[symbol])
return self.primary_exchange_map
def CheckPendingResults(self):
# check each callback list for timeouts
for cblist in [self.timer_callbacks, self.position_callbacks, self.openorder_callbacks, self.execution_callbacks, self.bardata_callbacks, self.order_callbacks, self.cancel_callbacks, self.add_symbol_callbacks, self.accountdata_callbacks, self.set_account_callbacks, self.account_request_callbacks]:
dlist = []
for cb in cblist:
cb.check_expire()
if cb.done:
dlist.append(cb)
# delete any callbacks that are done
for cb in dlist:
cblist.remove(cb)
def handle_order_status(self, msg):
mid = str(msg.orderId)
pid = str(msg.permId)
if not pid in self.orders.keys():
self.orders[pid] = {}
m = self.orders[pid]
if 'status' in m.keys():
oldstatus = json.dumps(m)
else:
oldstatus = ''
m['permid'] = msg.permId
m['id'] = msg.orderId
m['status'] = msg.status
m['filled'] = msg.filled
m['remaining'] = msg.remaining
m['avgfillprice'] = msg.avgFillPrice
m['parentid'] = msg.parentId
m['lastfillprice'] = msg.lastFillPrice
m['clientid'] = msg.clientId
m['whyheld'] = msg.whyHeld
# callbacks are keyed by message-id, not permid
for cb in self.cancel_callbacks:
if cb.id == mid:
self.output('cancel_callback[%s] completed' % mid)
cb.complete(m)
for cb in self.order_callbacks:
if cb.id == mid:
self.output('order_callback[%s] completed' % mid)
cb.complete(m)
if json.dumps(m) != oldstatus:
self.send_order_status(m)
def send_order_status(self, order):
self.WriteAllClients('order.%s: %s' %
(order['permid'], json.dumps(order)))
def handle_open_order(self, msg):
mid = str(msg.orderId)
pid = str(msg.order.m_permId)
if not pid in self.orders.keys():
self.orders[pid] = {}
m = self.orders[pid]
if 'status' in m.keys():
oldstatus = json.dumps(m)
else:
oldstatus = ''
m['id'] = msg.orderId
m['symbol'] = msg.contract.m_symbol
m['action'] = msg.order.m_action
m['quantity'] = msg.order.m_totalQuantity
m['account'] = msg.order.m_account
m['clientid'] = msg.order.m_clientId
m['permid'] = msg.order.m_permId
m['price'] = msg.order.m_lmtPrice
m['aux_price'] = msg.order.m_auxPrice
m['type'] = msg.order.m_orderType
m['status'] = msg.orderState.m_status
m['warning'] = msg.orderState.m_warningText
if oldstatus != json.dumps(m):
self.WriteAllClients('open-order.%s: %s' %
(m['permid'], json.dumps(m)))
def handle_accounts(self, msg):
if msg:
self.accounts = []
for row in msg:
account = '%s.%s.%s.%s.%s' % (
row['BANK'], row['BRANCH'], row['CUSTOMER'], row['DEPOSIT'], row['ACCT_TYPE'])
self.accounts.append(account)
self.accounts.sort()
self.account_request_pending = False
self.WriteAllClients('accounts: %s' % json.dumps(self.accounts))
for cb in self.account_request_callbacks:
cb.complete(self.accounts)
for cb in self.set_account_callbacks:
self.outptut('set_account: processing deferred response.')
process_set_account(cb.id, cb)
else:
self.error_handler(
self.id, 'handle_accounts: unexpected null input')
def set_account(self, account_name, callback):
cb = API_Callback(self, account_name, 'set-account', callback)
if self.accounts:
self.process_set_account(account_name, cb)
elif self.account_request_pending:
self.account_set_callbacks.append(cb)
else:
self.output(
'Error: set_account; no data, but no account_request_pending')
cb.complete(None)
def process_set_account(self, account_name, callback):
if account_name in self.accounts:
self.current_account = account_name
msg = 'current account set to %s' % account_name
self.output(msg)
ret = True
else:
msg = 'account %s not found' % account_name
self.output('Error: set_account(): %s' % msg)
ret = False
self.WriteAllClients('current-account: %s' % self.current_account)
if callback:
callback.complete(ret)
else:
return ret
def rtx_request(self, service, topic, table, what, where, label, handler, cb_list, timeout=0):
cxn = self.cxn_get(service, topic)
cb = API_Callback(self, cxn.id, label,
RTX_LocalCallback(self, handler), timeout)
cxn.request(table, what, where, cb)
cb_list.append(cb)
def EverySecond(self):
if self.connected:
if ENABLE_TICK_REQUESTS:
self.rtx_request('TA_SRV', 'LIVEQUOTE', 'LIVEQUOTE', 'DISP_NAME,TRDTIM_1,TRD_DATE',
"DISP_NAME='$TIME'", 'tick', self.handle_time, self.timer_callbacks, 5)
else:
self.seconds_disconnected += 1
if self.seconds_disconnected > DISCONNECT_SECONDS:
self.output(
'Realtick Gateway is disconnected; forcing shutdown')
if SHUTDOWN_ON_DISCONNECT:
reactor.stop()
self.CheckPendingResults()
def WriteAllClients(self, msg):
self.output('WriteAllClients: %s.%s' % (self.channel, msg))
msg = str('%s.%s\n' % (self.channel, msg))
for c in self.clients:
c.transport.write(msg)
def error_handler(self, id, msg):
"""report error messages"""
self.output('ERROR: %s %s' % (id, msg))
self.WriteAllClients('error: %s %s' % (id, msg))
def handle_time(self, rows):
print('handle_time: %s' % json.dumps(rows))
if rows:
hour, minute = [int(i)
for i in rows[0]['TRDTIM_1'].split(':')[0:2]]
if minute != self.last_minute:
self.last_minute = minute
self.WriteAllClients('time: %s %02d:%02d:00' %
(rows[0]['TRD_DATE'], hour, minute))
else:
self.error_handler('handle_time: unexpected null input')
def create_contract(self, symbol, sec_type, exch, prim_exch, curr):
"""Create a Contract object defining what will
be purchased, at which exchange and in which currency.
symbol - The ticker symbol for the contract
sec_type - The security type for the contract ('STK' is 'stock')
exch - The exchange to carry out the contract on
prim_exch - The primary exchange to carry out the contract on
curr - The currency in which to purchase the contract
In cases where SMART exchange results in ambiguity SYMBOL:PRIMARY_EXCHANGE can be passed."""
contract = Contract()
contract.m_symbol = symbol
contract.m_secType = sec_type
contract.m_exchange = exch
if symbol in self.primary_exchange_map.keys():
contract.m_primaryExch = self.primary_exchange_map[symbol]
else:
contract.m_primaryExch = prim_exch
contract.m_currency = curr
return contract
def create_order(self, order_type, quantity, action):
"""Create an Order object (Market/Limit) to go long/short.
order_type - 'MKT', 'LMT' for Market or Limit orders
quantity - Integral number of assets to order
action - 'BUY' or 'SELL'"""
order = Order()
order.m_orderType = order_type
order.m_totalQuantity = quantity
order.m_action = action
order.m_account = self.current_account
return order
def connect(self):
self.update_connection_status('Connecting')
self.output('Awaiting startup response from RTX gateway at %s:%d...' % (
self.api_hostname, self.api_port))
def market_order(self, symbol, quantity, callback):
return self.submit_order('market', 0, 0, symbol, int(quantity), callback)
def limit_order(self, symbol, limit_price, quantity, callback):
return self.submit_order('limit', float(limit_price), 0, symbol, int(quantity), callback)
def stop_order(self, symbol, stop_price, quantity, callback):
return self.submit_order('stop', 0, float(stop_price), symbol, int(quantity), callback)
def stoplimit_order(self, symbol, stop_price, limit_price, quantity, callback):
return self.submit_order('stoplimit', float(limit_price), float(stop_price), symbol, int(quantity), callback)
def submit_order(self, order_type, price, stop_price, symbol, quantity, callback):
self.output('ERROR: submit_order unimplemented')
def cancel_order(self, id, callback):
self.output('ERROR: cancel_order unimplemented')
self.output('cancel_order%s' % repr((id)))
mid = str(id)
tcb = TWS_Callback(self, mid, 'cancel_order', callback)
order = self.find_order_with_id(mid)
if order:
if order['status'] == 'Cancelled':
tcb.complete(
{'status': 'Error', 'errorMsg': 'Already cancelled.', 'id': id})
else:
resp = self.tws_conn.cancelOrder(mid)
self.output('cancelOrder(%s) returned %s' %
(repr(mid), repr(resp)))
self.cancel_callbacks.append(tcb)
else:
tcb.complete(
{'status': 'Error', 'errorMsg': 'Order not found', 'id': mid})
def symbol_enable(self, symbol, client, callback):
self.output('symbol_enable(%s,%s,%s)' % (symbol, client, callback))
if not symbol in self.symbols.keys():
cb = API_Callback(self, symbol, 'add-symbol', callback)
symbol = API_Symbol(self, symbol, client, cb)
self.add_symbol_callbacks.append(cb)
else:
self.symbols[symbol].add_client(client)
API_Callback(self, 0, 'add-symbol', callback).complete(True)
self.output('symbol_enable: symbols=%s' % repr(self.symbols))
def symbol_init(self, symbol):
ret = not 'SYMBOL_ERROR' in symbol.rawdata[0].keys()
if not ret:
self.symbol_disable(symbol.symbol, list(symbol.clients)[0])
symbol.callback.complete(ret)
return ret
def symbol_disable(self, symbol, client):
self.output('symbol_disable(%s,%s)' % (symbol, client))
self.output('self.symbols=%s' % repr(self.symbols))
if symbol in self.symbols.keys():
ts = self.symbols[symbol]
ts.del_client(client)
if not ts.clients:
del(self.symbols[symbol])
self.output('ret True: self.symbols=%s' % repr(self.symbols))
return True
self.output('ret False: self.symbols=%s' % repr(self.symbols))
def update_connection_status(self, status):
self.connection_status = status
if status != self.last_connection_status:
self.last_connection_status = status
self.WriteAllClients('connection-status-changed: %s' % status)
def request_accounts(self, callback):
cb = API_Callback(self, 0, 'request-accounts', callback)
if self.accounts:
cb.complete(self.accounts)
elif self.account_request_pending:
self.account_request_callbacks.append(cb)
else:
self.output(
'Error: request_accounts; no data, but no account_request_pending')
cb.complete(None)
def request_positions(self, callback):
cxn = self.cxn_get('ACCOUNT_GATEWAY', 'ORDER')
cb = API_Callback(self, 0, 'positions', callback)
cxn.request('POSITION', '*', '', cb)
self.position_callbacks.append(cb)
return cxn.id
def request_orders(self, callback):
cxn = self.cxn_get('ACCOUNT_GATEWAY', 'ORDER')
cb = API_Callback(self, 0, 'orders', callback)
cxn.request('ORDERS', '*', '', cb)
self.openorder_callbacks.append(cb)
return cxn.id
def request_executions(self, callback):
cxn = self.cxn_get('ACCOUNT_GATEWAY', 'ORDER')
cb = API_Callback(self, 0, 'executions', callback)
cxn.request('ORDERS', '*',
"CURRENT_STATUS='COMPLETED',TYPE='ExchangeTradeOrder'", cb)
self.execution_callbacks.append(cb)
return cxn.id
def request_account_data(self, account, fields, callback):
cxn = self.cxn_get('ACCOUNT_GATEWAY', 'ORDER')
cb = API_Callback(self, 0, 'account_data', callback)
cxn.request('DEPOSIT', '*', '', cb)
self.accountdata_callbacks.append(cb)
return cxn.id
def request_global_cancel(self):
self.tws_conn.reqGlobalCancel()
def query_bars(self, symbol, period, bar_start, bar_end, callback):
id = self.next_id()
self.output('bardata request id=%s' % id)
# 30 second timeout for bar data
cb = TWS_Callback(self, id, 'bardata', callback, 30)
contract = self.create_contract(symbol, 'STK', 'SMART', 'SMART', 'USD')
if type(bar_start) != types.IntType:
mxd = mx.DateTime.ISO.ParseDateTime(bar_start)
bar_start = datetime.datetime(
mxd.year, mxd.month, mxd.day, mxd.hour, mxd.minute, int(mxd.second))
if type(bar_end) != types.IntType:
mxd = mx.DateTime.ISO.ParseDateTime(bar_end)
bar_end = datetime.datetime(
mxd.year, mxd.month, mxd.day, mxd.hour, mxd.minute, int(mxd.second))
# try:
if 1 == 1:
endDateTime = bar_end.strftime('%Y%m%d %H:%M:%S')
durationStr = '%s S' % (bar_end - bar_start).seconds
barSizeSetting = {'1': '1 min', '5': '5 mins'}[
str(period)] # legal period values are '1' and '5'
whatToShow = 'TRADES'
useRTH = 0
formatDate = 1
self.bardata_callbacks.append(cb)
self.output('edt:%s ds:%s bss:%s' %
(endDateTime, durationStr, barSizeSetting))
self.tws_conn.reqHistoricalData(
id, contract, endDateTime, durationStr, barSizeSetting, whatToShow, useRTH, formatDate)
# except:
if 1 == 2:
cb.complete(['Error', 'query_bars(%s) failed!' % repr(
(bar_symbol, bar_period, bar_start, bar_end)), 'Count: 0'])
def handle_historical_data(self, msg):
for cb in self.bardata_callbacks:
if cb.id == msg.reqId:
if not cb.data:
cb.data = []
if msg.date.startswith('finished'):
cb.complete(['OK', cb.data])
else:
cb.data.append(dict(msg.items()))
# self.output('historical_data: %s' % msg) #repr((id, start_date, bar_open, bar_high, bar_low, bar_close, bar_volume, count, WAP, hasGaps)))
def query_connection_status(self):
return self.connection_status
| [
"mkrueger@rstms.net"
] | mkrueger@rstms.net |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.