blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ffb17e90ec91c824a02892c3dda985a8848bee6b | 07034c6fb49f7c8fc26ff9752bc4672551985023 | /PumpData/table_to_excel.py | be522bb1e6b4274a77dfcf9cb314673c36583172 | [] | no_license | wolfhawkld/Python-Learning-Code | 1ce78e3c0b0ffbf56ef5e7b0c665cf6aa1bbdcb9 | 3cf769a6c5b74158bcad66f93fef0c400fca5136 | refs/heads/master | 2023-08-10T14:40:08.440840 | 2023-08-01T06:54:54 | 2023-08-01T06:54:54 | 203,334,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,049 | py | '''目前仅支持SQL Server脚本语法'''
import pump_to_excel
# Global Params Start.
SCRIPT_FILE = 'CDM_DEV_20210222.sql'
# PUMP_RE = '.*?PostAPI.*?webApi.*?' #SFDC_API_Post
PUMP_RE = 'CREATE TABLE'
# PUMP_RE = '.*?GetAPI.*?webApi.*?' #SFDC_API_Get
ROOT_PATH = 'C:\\Users\\M293906\\Documents\\Project\\HK_CDM'
EXCEL_FILE_NAME = u'CDM_ER_Schema.xls'
# Global Params End.
# 读取本地sql脚本中的table
def read_script(file):
file_obj = codecs.open(file, 'r', 'utf-8')
EXCEL_SHEET_LIST.append(cache_object())
i = 1
while True:
line = file_obj.readline() # 只读取一行内容
if not line: # 判断是否读取到内容
break
line_match_list = find_re(line, i, file, pump_re)
i += 1
if line_match_list is not None:
ret_list.extend(line_match_list)
# print(ret_list)
file_obj.close()
return 0
# 解析table各个column并缓存至对象
def cache_object():
col_list = []
return col_list
# 将对象缓存的表及其column输出为excel的sheet页及列
def save_excel():
return 0
def main():
table_list, table_cols_list = pump_to_excel.get_all_tables(ROOT_PATH + '\\'+ SCRIPT_FILE, PUMP_RE)
print(len(table_list))
print(len(table_cols_list))
dict_1 = {}
""" zip打包用法,同时遍历两个list """
for tab, cols in zip(table_list, table_cols_list):
dict_1[tab] = cols
for (key,value) in dict_1.items():
#print(key+':'+value)
pump_to_excel.create_excel_sheet(key, value)
# pump_to_excel.append_excel_data(ret_list, cols, sheet)
pump_to_excel.save_excel_file(EXCEL_FILE_NAME)
# for file in file_list:
# ret_list = pump_to_excel.pump_file(file, PUMP_RE)
# if ret_list is not None:
# pump_to_excel.append_excel_data(ret_list, EXCEL_COLUMNS, sheet)
# pump_to_excel.save_excel_file(EXCEL_FILE_NAME)
if __name__ == '__main__':
main() | [
"m293906@one.merckgroup.com"
] | m293906@one.merckgroup.com |
07e77c32a21bb14eb73d60a292be401093309297 | 3a693a9e4766d63a2af07339aeb94e4823857505 | /APL/A6B/Report Files/code6.py | d734735e81c2681159e49f17d45715fce1f5b7df | [] | no_license | DevaO7/Applied-Programming-Assignments | 0f53320243cda45dacc58cac0be79a6407ee36e9 | 653bfea5a43d021ba187925b44a86467538b9748 | refs/heads/master | 2023-07-10T11:14:36.893381 | 2021-09-06T10:32:29 | 2021-09-06T10:32:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | def outputPlotter(H,u,t,title):
t,y,svec = sp.lsim(H,u,t)
plt.plot(t,y)
plt.title(title)
plt.xlabel('Time')
plt.ylabel('Output')
plt.show()
#Q6
t = arange(0,3e-2,1e-7) #Large Time Scale
u = cos((1e3)*t)-cos((1e6)*t)
outputPlotter(H,u,t,'Output plot (Large time scale)') | [
"ee19b018@smail.iitm.ac.in"
] | ee19b018@smail.iitm.ac.in |
814cd83774c4d087f409c51956e170a690fceb02 | 646e2932ee4da82434f0a7c4c3baba85bcd329a2 | /MestaDB/mestadb/content/management/commands/fetch_mail.py | a79d3712f61ddec15a83626fb8e1c941433a8aa4 | [] | no_license | sizzlelab/Arki | 182e71d662198dc34e03f896f8fdc116e1a9bcea | 329998985f8c15663afe47719c385e7b57bc7171 | refs/heads/master | 2021-01-19T05:33:39.484302 | 2012-05-15T06:20:05 | 2012-05-15T06:20:05 | 1,278,500 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,235 | py | # -*- coding: utf-8 -*-
import imaplib
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand
from content.models import Mail
settings.DEBUG = False
import logging
log = logging.getLogger('fetch_mail')
def imap_connect(host):
try:
return imaplib.IMAP4_SSL(host = host)
except:
log.error("Failed to open IMAP4_SSL-connection to %s" % (host))
return None
# raise
def imap_disconnect(M):
M.close()
M.logout()
def fetch_message(M, msgnum, delete_msg = True):
log.info("Starting to fetch mail %s" % msgnum)
typ, data = M.fetch(msgnum, '(RFC822)')
log.info("Fetch done: %s. Saving message as a Mail object." % typ)
mailfiledata = data[0][1]
mail = Mail()
mail.set_file(mailfiledata, M.host)
log.info("Save done")
if typ == 'OK':
log.info("Status ok. Setting Deleted FLAGS: %s" % delete_msg)
if delete_msg:
M.store(msgnum, '+FLAGS', '\\Deleted')
else:
log.info("Status %s" % typ)
return mail
class Command(BaseCommand):
# Limit max number of mails to process
option_list = BaseCommand.option_list + (
make_option('--limit',
action='store',
dest='limit',
type='int',
default=0,
help='Limit the number of mails to handle'),
)
# Don't delete mail from INBOX after retrieving it
option_list = option_list + (
make_option('--nodelete',
action='store_true',
dest='nodelete',
default=False,
help=u'Do not delete mail from INBOX after retrieving it'),
)
args = ''
help = 'Fetch Content mails from IMAP mailbox'
def handle(self, *args, **options):
limit = options.get('limit')
verbosity = options.get('verbosity')
nodelete = options.get('nodelete')
delete = not nodelete
imapconf = settings.MAILCONF
login, passwd = imapconf['login'], imapconf['passwd']
host = imapconf['host']
M = imap_connect(imapconf['host'])
if M is None:
log.error("Failed to imap_connect(%s)" % imapconf['host'])
return False
try:
typ, data = M.login(login, passwd)
except Exception, err:
log.error("Failed to login(%s) to host %s: %s" % (login, host, str(err)))
return False
try:
M.select()
typ, data = M.search(None, 'ALL')
except Exception, err:
log.error("Failed to select or search %s's mailbox on host %s: %s" % (login, host, str(err)))
imap_disconnect(M)
msg_ids = data[0].split()
# Fetch 'limit' messages from INBOX
cnt = 0
try:
for msgnum in msg_ids:
fetch_message(M, msgnum, delete_msg = delete)
cnt += 1
if limit > 0 and cnt >= limit:
break
except Exception, err:
log.error("IMAP command failed: %s" % str(err))
imap_disconnect(M)
M.expunge()
imap_disconnect(M)
| [
"aapo.rista@aalto.fi"
] | aapo.rista@aalto.fi |
1b9d522a68c779c6fbab61ff905ce1cec0ad5f22 | 4dd18f00564dada51d30d7ea7477ec4e28447c1a | /myInceptionRestNet_FeatExt.py | eedc32294c1aedfba7441353bd3fdbaded4601c2 | [] | no_license | zxecho/FedFER4robot | 61a328889165389a8fae3aee33bd87b03f4c6009 | 6bfde7eb8f6e077dcbf7201bf908bb9dae6e1649 | refs/heads/main | 2023-07-03T08:57:15.077466 | 2021-08-06T08:33:22 | 2021-08-06T08:33:22 | 393,309,915 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,620 | py | import torch
from torch import nn
import torch.nn.functional as F
class Inception_ResNetv2(nn.Module):
def __init__(self, in_channels=3, classes=1000):
super(Inception_ResNetv2, self).__init__()
blocks = []
blocks.append(Stem(in_channels))
blocks.append(my_IncepResNet_unit(256, 1))
for i in range(3):
blocks.append(my_IncepResNet_unit(128, 1))
blocks.append(nn.MaxPool2d(2, stride=1, padding=0))
# blocks.append(Inception_ResNet_B(256, 1))
# for i in range(2):
# blocks.append(Inception_ResNet_C(256, 1))
# blocks.append(Inception_ResNet_C(256, activation=False))
self.features = nn.Sequential(*blocks)
self.conv = Conv2d(128, 1024, 1, stride=1, padding=0, bias=False)
self.global_average_pooling = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(1024, classes)
def forward(self, x):
x = self.features(x)
x = self.conv(x)
x = self.global_average_pooling(x)
x = x.view(x.size(0), -1)
x = self.linear(x)
return x
class My_IncepRestNet(nn.Module):
def __init__(self, in_channels=3, classes=6):
super(My_IncepRestNet, self).__init__()
self.stem = Stem(in_channels)
self.incepres_1 = my_IncepResNet_unit(224, 64, 1)
self.incepres_2 = my_IncepResNet_unit(64, 64, 1)
self.incepres_3 = my_IncepResNet_unit(64, 128, 1)
self.conv = Conv2d(128, 512, 1, stride=1, padding=0, bias=False)
self.global_average_pooling = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(512, classes)
def forward(self, x):
x = self.stem(x)
x = self.incepres_1(x)
x = self.incepres_2(x)
x = self.incepres_3(x)
x = self.conv(x)
x = self.global_average_pooling(x)
x = x.view(x.size(0), -1)
x = self.linear(x)
return x
class MySimpleNet(nn.Module):
def __init__(self, in_channels=3, classes=6):
super(MySimpleNet, self).__init__()
self.stem = SimpleStem(in_channels)
self.incepres_1 = my_IncepResNet_unit(160, 64, 1)
self.incepres_2 = my_IncepResNet_unit(64, 64, 1)
self.incepres_3 = my_IncepResNet_unit(64, 64, 1)
self.conv = Conv2d(64, 256, 1, stride=1, padding=0, bias=False)
self.global_average_pooling = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(256, classes)
def forward(self, x):
x = self.stem(x)
x = self.incepres_1(x)
x = self.incepres_2(x)
x = self.incepres_3(x)
x = self.conv(x)
x = self.global_average_pooling(x)
x = x.view(x.size(0), -1)
x = self.linear(x)
return x
class Stem(nn.Module):
def __init__(self, in_channels):
super(Stem, self).__init__()
self.features = nn.Sequential(
Conv2d(in_channels, 32, 3, stride=1, padding=0, bias=False), # 149 x 149 x 32
# Conv2d(32, 32, 3, stride=1, padding=0, bias=False), # 147 x 147 x 32
# Conv2d(32, 32, 3, stride=1, padding=1, bias=False), # 147 x 147 x 64
# nn.MaxPool2d(2, stride=1, padding=0), # 73 x 73 x 64
)
self.branch_0 = Conv2d(32, 32, 1, stride=1, padding=0, bias=False)
self.branch_1 = nn.Sequential(
Conv2d(32, 48, 1, stride=1, padding=0, bias=False),
Conv2d(48, 64, 5, stride=1, padding=2, bias=False),
)
self.branch_2 = nn.Sequential(
Conv2d(32, 48, 1, stride=1, padding=0, bias=False),
Conv2d(48, 64, 3, stride=1, padding=1, bias=False),
Conv2d(64, 64, 3, stride=1, padding=1, bias=False),
)
self.branch_3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
Conv2d(32, 64, 1, stride=1, padding=0, bias=False)
)
def forward(self, x):
x = self.features(x)
x0 = self.branch_0(x)
x1 = self.branch_1(x)
x2 = self.branch_2(x)
x3 = self.branch_3(x)
return torch.cat((x0, x1, x2, x3), dim=1)
class SimpleStem(nn.Module):
def __init__(self, in_channels):
super(SimpleStem, self).__init__()
self.features = nn.Sequential(
Conv2d(in_channels, 32, 3, stride=1, padding=0, bias=False), # 149 x 149 x 32
# Conv2d(32, 32, 3, stride=1, padding=0, bias=False), # 147 x 147 x 32
# Conv2d(32, 32, 3, stride=1, padding=1, bias=False), # 147 x 147 x 64
# nn.MaxPool2d(2, stride=1, padding=0), # 73 x 73 x 64
)
self.branch_0 = Conv2d(32, 32, 1, stride=1, padding=0, bias=False)
self.branch_1 = nn.Sequential(
Conv2d(32, 32, 1, stride=1, padding=0, bias=False),
Conv2d(32, 32, 5, stride=1, padding=2, bias=False),
)
self.branch_2 = nn.Sequential(
Conv2d(32, 64, 1, stride=1, padding=0, bias=False),
Conv2d(64, 64, 3, stride=1, padding=1, bias=False),
)
self.branch_3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
Conv2d(32, 32, 1, stride=1, padding=0, bias=False)
)
def forward(self, x):
x = self.features(x)
x0 = self.branch_0(x)
x1 = self.branch_1(x)
x2 = self.branch_2(x)
x3 = self.branch_3(x)
return torch.cat((x0, x1, x2, x3), dim=1)
class my_IncepResNet_unit(nn.Module):
def __init__(self, in_channels, br_channel=32, scale=1.0):
super(my_IncepResNet_unit, self).__init__()
self.scale = scale
# self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
# self.branch_0 = Conv2d(in_channels, 32, 1, stride=1, padding=0, bias=False)
# self.branch_1 = nn.Sequential(
# Conv2d(in_channels, br_channel, 1, stride=1, padding=0, bias=False),
# Conv2d(br_channel, br_channel*2, 3, stride=1, padding=1, bias=False)
# )
self.branch_2 = nn.Sequential(
# Conv2d(in_channels, br_channel, 1, stride=1, padding=0, bias=False),
Conv2d(in_channels, br_channel, 3, stride=1, padding=1, bias=False),
Conv2d(br_channel, br_channel, 3, stride=1, padding=1, bias=False),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.conv = nn.Sequential(
nn.Conv2d(in_channels, br_channel, 3, stride=1, padding=1, bias=True),
nn.MaxPool2d(kernel_size=2, stride=2)
)
# self.relu = nn.ReLU(inplace=True)
self.prelu = nn.PReLU()
def forward(self, x):
# x = self.maxpool(x)
# x0 = self.branch_0(x)
# x1 = self.branch_1(x)
x2 = self.branch_2(x)
# x_res = torch.cat((x1, x2), dim=1)
x = self.conv(x)
return self.prelu(x + self.scale * x2)
class Reduciton_B(nn.Module):
def __init__(self, in_channels):
super(Reduciton_B, self).__init__()
self.branch_0 = nn.Sequential(
Conv2d(in_channels, 32, 1, stride=1, padding=0, bias=False),
Conv2d(32, 64, 3, stride=2, padding=0, bias=False)
)
self.branch_1 = nn.Sequential(
Conv2d(in_channels, 32, 1, stride=1, padding=0, bias=False),
Conv2d(32, 64, 3, stride=2, padding=0, bias=False),
)
self.branch_2 = nn.Sequential(
Conv2d(in_channels, 32, 1, stride=1, padding=0, bias=False),
Conv2d(32, 64, 3, stride=1, padding=1, bias=False),
Conv2d(64, 128, 3, stride=2, padding=0, bias=False)
)
self.branch_3 = nn.MaxPool2d(3, stride=2, padding=0)
def forward(self, x):
x0 = self.branch_0(x)
x1 = self.branch_1(x)
x2 = self.branch_2(x)
x3 = self.branch_3(x)
return torch.cat((x0, x1, x2, x3), dim=1)
class Inception_ResNet_A(nn.Module):
def __init__(self, in_channels, scale=1.0):
super(Inception_ResNet_A, self).__init__()
self.scale = scale
self.branch_0 = Conv2d(in_channels, 32, 1, stride=1, padding=0, bias=False)
self.branch_1 = nn.Sequential(
Conv2d(in_channels, 32, 1, stride=1, padding=0, bias=False),
Conv2d(32, 32, 3, stride=1, padding=1, bias=False)
)
self.branch_2 = nn.Sequential(
Conv2d(in_channels, 32, 1, stride=1, padding=0, bias=False),
Conv2d(32, 48, 3, stride=1, padding=1, bias=False),
Conv2d(48, 64, 3, stride=1, padding=1, bias=False)
)
self.conv = nn.Conv2d(128, in_channels, 1, stride=1, padding=0, bias=True)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x0 = self.branch_0(x)
x1 = self.branch_1(x)
x2 = self.branch_2(x)
x_res = torch.cat((x0, x1, x2), dim=1)
x_res = self.conv(x_res)
return self.relu(x + self.scale * x_res)
class Inception_ResNet_B(nn.Module):
def __init__(self, in_channels, scale=1.0):
super(Inception_ResNet_B, self).__init__()
self.scale = scale
self.branch_0 = Conv2d(in_channels, 128, 1, stride=1, padding=0, bias=False)
self.branch_1 = nn.Sequential(
Conv2d(in_channels, 64, 1, stride=1, padding=0, bias=False),
Conv2d(64, 128, (1, 7), stride=1, padding=(0, 3), bias=False),
Conv2d(128, 128, (7, 1), stride=1, padding=(3, 0), bias=False)
)
self.conv = nn.Conv2d(256, in_channels, 1, stride=1, padding=0, bias=True)
self.relu = nn.PReLU()
def forward(self, x):
x0 = self.branch_0(x)
x1 = self.branch_1(x)
x_res = torch.cat((x0, x1), dim=1)
x_res = self.conv(x_res)
return self.relu(x + self.scale * x_res)
class Inception_ResNet_C(nn.Module):
def __init__(self, in_channels, scale=1.0, activation=True):
super(Inception_ResNet_C, self).__init__()
self.scale = scale
self.activation = activation
self.branch_0 = Conv2d(in_channels, 32, 1, stride=1, padding=0, bias=False)
self.branch_1 = nn.Sequential(
Conv2d(in_channels, 32, 1, stride=1, padding=0, bias=False),
Conv2d(32, 64, (1, 3), stride=1, padding=(0, 1), bias=False),
Conv2d(64, 128, (3, 1), stride=1, padding=(1, 0), bias=False)
)
self.conv = nn.Conv2d(160, in_channels, 1, stride=1, padding=0, bias=True)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x0 = self.branch_0(x)
x1 = self.branch_1(x)
x_res = torch.cat((x0, x1), dim=1)
x_res = self.conv(x_res)
if self.activation:
return self.relu(x + self.scale * x_res)
return x + self.scale * x_res
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding, stride=1, bias=True):
super(Conv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias)
# self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1)
self.gn = nn.GroupNorm(32, out_channels)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
# x = self.bn(x)
x = self.gn(x)
x = self.relu(x)
return x
| [
"noreply@github.com"
] | noreply@github.com |
11ed9e9b15a39888808ba8c5239f94214ee5a921 | 1159ef2febfd0a36cd51ac43dfe23a087913e05f | /Day_3/listdemo.py | a6b9f7d3ef697024dad7aa698099bba928f347d2 | [] | no_license | Satish980/JNTUK_Python_Workshop | 64903addf14439ef961db00b79d5d434da4464a9 | ba2ede71b7b1145402efce9566471d6d9bc3d6a6 | refs/heads/master | 2022-12-05T12:27:36.810481 | 2020-08-24T14:59:02 | 2020-08-24T14:59:02 | 288,081,719 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py |
alist = [10,20,30,40,50,60,70,56,54,54,5454]
blist = ["oracle","microsoft","unix"]
clist = [10,20,"oracle"]
# assigning new element to the list
alist[0] = 1000
print("after modifying :", alist)
print(alist[0])
print("elements are :", alist)
# slicing
print(alist[0:1])
print(alist[0:3])
print(alist[4:7])
print(alist[-1])
print(alist[::]) # all the values
print(alist[::-1]) # reverse the elements | [
"isatishkumar4008@gmail.com"
] | isatishkumar4008@gmail.com |
50a0c577419e9816c0044d6ecb525ba9e83b893c | f4ef876264bb4335257336723776e00fd6f0a77a | /day1a.py | 95eaa2548670f0a8d2f8ff8585549a63ee193d6f | [] | no_license | matt-j-e/advent_of_code_2020 | cd8d79f4509ad992b1cd2594aace4f6a78de8ebe | 8f767bf0b6690d69f274a1f41abdc7918d9dde4e | refs/heads/master | 2023-01-24T05:40:30.805350 | 2020-12-15T08:35:00 | 2020-12-15T08:35:00 | 320,642,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | arr = []
with open ("day1.txt") as f:
for line in f:
arr.append(int(line))
for i in range(len(arr)):
for j in range(i+1, len(arr)):
if arr[i] + arr[j] == 2020:
print(arr[i], arr[j], arr[i] * arr[j])
break
| [
"the.matt.edwards@googlemail.com"
] | the.matt.edwards@googlemail.com |
c61878608715a968411a0145bb77acbe05ffcc3a | 5c69c86387caf5c5b02ff71e2c65bad4568185bb | /Run/3.py | f4728c1d4041332a5870c511c0fa42ab47c9a6f9 | [] | no_license | PozAnta/SystemOrig | 991ca7c8d84062154d1cb658dc8f5a85e15ffebf | 357ed79cca28c21c188f0d578b8366b6d009545d | refs/heads/master | 2020-04-17T21:44:19.867531 | 2019-01-23T07:17:19 | 2019-01-23T07:17:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,245 | py |
recset_file = ""
f = ""
new_value = "4"
try:
recset_file = open('C:\\MC Projects\\UCI2\\StartPRG.PRG', "r")
list_file = recset_file.read()
recset_file.close()
print(list_file.find("dim setup_con as long = 1"))
if list_file.find("dim setup_con as long = 1") != -1:
cut_end = list_file[list_file.find("Load AX_SETUP.PRG"):]
cut_start = list_file[:list_file.find("dim setup_con as long = 1")]
print(cut_start)
print(cut_end)
f = open('C:\\MC Projects\\UCI2\\Start.PRG', "w")
f.write(cut_start)
f.write("dim setup_con as long = " + new_value)
f.write("\nselect case setup_con")
f.write("\n case 1")
f.write("\n Load EC_SETUP.PRG")
f.write("\n Stas EC_SETUP.PRG")
f.write("\n while EC_SETUP.PRG.state <> 10")
f.write("\n sleep 10")
f.write("\n end while")
f.write("\n case 2")
f.write("\n Load EC_SET_2.PRG")
f.write("\n Stas EC_SE_2.PRG")
f.write("\n while EC_SE_2.PRG.state <> 10")
f.write("\n sleep 10")
f.write("\n end while")
f.write("\nend select\n")
f.write("\n " + cut_end)
finally:
recset_file.close()
f.close()
| [
"33724307+PozAnta@users.noreply.github.com"
] | 33724307+PozAnta@users.noreply.github.com |
880d06ad03f6cf569657d3039340803cf78404fe | df8b0b089e2bf863ce2d1e471db8d34f12ada531 | /model/sg_perception/opt_classifier/opt_classifier.py | 9f68de78a461dcb0b161e54979001f68695e180e | [] | no_license | ICRA-2019/TCG | aba1af1c607626819c6900eb488b711d4e4890be | e917dada693d912c7dc6d46d379ad696c7e6a632 | refs/heads/master | 2022-02-13T07:30:28.827489 | 2018-09-04T15:49:44 | 2018-09-04T15:49:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,535 | py | import tensorflow as tf
# contains information relating to input data size
from model.sg_perception.constants import *
# network layer information for P_CNN
layer_elements = [-1, 16, 32, 128, OPT_CLASSES]
output_sizes = [32, 16, 4]
filter_sizes = [4, 4, 8]
stride_sizes = [2, 2, 4]
padding_size = [1, 1, 2]
'''
ClassifierModel generates q-values for a given input observation
'''
class ClassifierModel:
# Constructor
"""
batch_size - int (1 by default)
filename - string, location of file with saved model parameters (no model listed by default)
learning_rate - float, speed at which the model trains (1e-5 by default)
"""
def __init__(self, batch_size=1, filename="", learning_rate=1e-5):
self.graph = tf.Graph()
with self.graph.as_default():
self.__batch_size = batch_size
self.__alpha = learning_rate
# Model variables
def weight_variable(name, shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=name)
def bias_variable(name, shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name)
# Q variables
self.variables_pnt = {
"W1": weight_variable("W_conv1_pnt", [filter_sizes[0], filter_sizes[0],
pnt_dtype["num_c"], layer_elements[1]]),
"b1": bias_variable("b_conv1_pnt", [layer_elements[1]]),
"W2": weight_variable("W_conv2_pnt", [filter_sizes[1], filter_sizes[1],
layer_elements[1], layer_elements[2]]),
"b2": bias_variable("b_conv2_pnt", [layer_elements[2]]),
"W3": weight_variable("W_conv3_pnt", [filter_sizes[2], filter_sizes[2],
layer_elements[2], layer_elements[-2]]),
"b3": bias_variable("b_conv3_pnt", [layer_elements[-2]]),
"W_lstm": weight_variable("W_lstm", [layer_elements[-2], layer_elements[-1]]),
"b_lstm": bias_variable("b_lstm", [layer_elements[-1]]),
"W_fc": weight_variable("W_fc", [layer_elements[-1] + 1, layer_elements[-1]]),
"b_fc": bias_variable("b_fc", [layer_elements[-1]])
}
# Placeholder variables
# placeholder for the Optical Flow data
self.pnt_ph = tf.placeholder("float",
[self.__batch_size, None,
pnt_dtype["cmp_h"] * pnt_dtype["cmp_w"] * pnt_dtype["num_c"]],
name="pnt_placeholder")
# placeholder for the sequence length
self.seq_length_ph = tf.placeholder("int32", [self.__batch_size],
name="seq_len_placeholder")
# placeholder for the reward values to classify with
self.pnt_y_ph = tf.placeholder("float", [None, OPT_CLASSES], name="pnt_y_placeholder")
# Build Model Structure
# initialize all variables in the network
self.pred_wave_set = self.execute_wave_var_set() # used to initialize variables
# Q-value Generation Functions
# return the action with the highest q-value
self.wave_observed = tf.argmax(self.execute_wave(), 1)
self.observe = tf.argmax(self.execute_wave(), 1)
# Optimization Functions
# get the difference between the q-values and the true output
self.cross_entropy_wave = tf.nn.softmax_cross_entropy_with_logits(
labels=self.pnt_y_ph, logits=self.execute_wave())
# optimize the network
self.optimizer_wave = tf.train.AdamOptimizer(learning_rate=self.__alpha).minimize(
self.cross_entropy_wave)
# Evaluation Functions
# return a boolean indicating whether the system correctly predicted the output
self.correct_pred_wave = tf.equal(tf.argmax(self.wave_observed, 1),
tf.argmax(self.pnt_y_ph, 1))
# the accuracy of the current batch
self.accuracy_wave = tf.reduce_mean(tf.cast(self.correct_pred_wave, tf.float32))
# Initialization
# Generate Session
self.sess = tf.InteractiveSession(graph=self.graph)
# Variable for generating a save checkpoint
self.saver = tf.train.Saver()
if len(filename) == 0:
# initialize all model variables
init_op = tf.global_variables_initializer()
self.sess.run(init_op)
print("VARIABLE VALUES INITIALIZED")
else:
# restore variables from a checkpoint
self.saver.restore(self.sess, filename)
print("VARIABLE VALUES RESTORED FROM: " + filename)
# Helper Functions
def save_model(self, name="model.ckpt", save_dir=""):
"""
save the model to a checkpoint file
-name: (string) name of the checkpoint file
-save_dir: (string) directory to save the file into
"""
self.saver.save(self.sess, save_dir + '/' + name)
# Executor Functions
def execute_wave_var_set(self):
# Initialize the model's structure
return self.wave_model(
self.seq_length_ph,
self.pnt_ph,
tf.variable_scope("wave"),
tf.variable_scope("wave"),
self.variables_pnt
)
def execute_wave(self):
# Generate the q-values of Q for the given input
return self.wave_model(
self.seq_length_ph,
self.pnt_ph,
tf.variable_scope("wave"),
tf.variable_scope("wave", reuse=True),
self.variables_pnt
)
def gen_prediction(self, num_frames, opt_data, verbose=False):
"""
Generate q-values for an input passed in as seperate data points. Used when
by external systems (ROS) to run the model without having to import tensorflow
-num_frames: (int) the number of frames in the video
-opt_data: (numpy array) an array that contains the optical data
-verbose: (bool) print additional information
"""
opt_pred = self.sess.run(self.observe, feed_dict={
self.seq_length_ph: [num_frames],
self.pnt_ph: opt_data
})
if verbose:
available_actions = ["PMT", "REW", "ABT"]
print("Best action: " + available_actions[int(opt_pred[0])])
return int(opt_pred[0])
# The Model
def process_vars(self, seq, data_type):
# cast inputs to the correct data type
seq_inp = tf.cast(seq, tf.float32)
return tf.reshape(seq_inp, (self.__batch_size, -1, data_type["cmp_h"],
data_type["cmp_w"], data_type["num_c"]))
def check_legal_inputs(self, tensor, name):
# ensure that the current tensor is finite (doesn't have any NaN values)
return tf.verify_tensor_all_finite(tensor, "ERR: Tensor not finite - " + name, name=name)
def wave_model(self, seq_length, pnt_ph, variable_scope, variable_scope2, var_pnt):
"""
-seq_length: (placeholder) the number of frames in the video
-pnt_ph: (placeholder) an array that contains the optical flow data
-train_ph: (placeholder) a bool indicating whether the variables are being trained
-variable_scope: (variable_scope) scope for the CNN stacks
-variable_scope2: (variable_scope) scope for the temporal data
-var_pnt: (dict) the variables for the optical flow input
"""
# Convolution Functions
def convolve_data_3layer_pnt(input_data, variables, n, dtype):
# pass data into through P_CNN
def pad_tf(x, p):
return tf.pad(x, [[0, 0], [p, p], [p, p], [0, 0]], "CONSTANT")
def gen_convolved_output(sequence, W, b, stride, num_hidden, new_size, padding='SAME'):
conv = tf.nn.conv2d(sequence, W, strides=[1, stride, stride, 1],
padding=padding) + b
return tf.nn.relu(conv)
input_data = tf.reshape(input_data,
[-1, dtype["cmp_h"], dtype["cmp_w"], dtype["num_c"]],
name=n + "_inp_reshape")
for i in range(3):
si = str(i + 1)
input_data = pad_tf(input_data, padding_size[i])
padding = "VALID"
input_data = gen_convolved_output(input_data, variables["W" + si],
variables["b" + si], stride_sizes[i],
layer_elements[i + 1], output_sizes[i], padding)
input_data = self.check_legal_inputs(input_data, "conv" + si + "_" + n)
return input_data
# =======================================
# Model Execution Begins Here
# =======================================
# CNN Stacks
# Inception Network (INRV2)
with variable_scope as scope:
# P_CNN
inp_data = self.process_vars(pnt_ph, pnt_dtype)
conv_inp = convolve_data_3layer_pnt(inp_data, var_pnt, "pnt", pnt_dtype)
conv_inp = tf.reshape(conv_inp, [self.__batch_size, -1,
output_sizes[-1] * output_sizes[-1] *
layer_elements[-2]], name="combine_reshape")
# capture variables before changing scope
W_lstm = var_pnt["W_lstm"]
b_lstm = var_pnt["b_lstm"]
with variable_scope2 as scope:
# Internal Temporal Information (LSTM)
lstm_cell = tf.contrib.rnn.LSTMCell(layer_elements[-2],
use_peepholes=False,
cell_clip=None,
initializer=None,
num_proj=None,
proj_clip=None,
forget_bias=1.0,
state_is_tuple=True,
activation=None,
reuse=None
)
lstm_mat, _ = tf.nn.dynamic_rnn(
cell=lstm_cell,
inputs=conv_inp,
dtype=tf.float32,
sequence_length=seq_length,
time_major=False
)
# if lstm_out is NaN replace with 0 to prevent model breakage
lstm_mat = tf.where(tf.is_nan(lstm_mat), tf.zeros_like(lstm_mat), lstm_mat)
lstm_mat = self.check_legal_inputs(lstm_mat, "lstm_mat")
# extract relevant information from LSTM output using partitions
lstm_out = tf.expand_dims(lstm_mat[0, -1], 0)
# FC1
fc1_out = tf.matmul(lstm_out, W_lstm) + b_lstm
fc1_out = self.check_legal_inputs(fc1_out, "fc1")
return fc1_out
if __name__ == '__main__':
dqn = ClassifierModel()
| [
"eccarpio@hotmail.com"
] | eccarpio@hotmail.com |
2cfbcc49b59402a8068228ec76bd75bc3e6f4437 | 06cb5b7ec7388f131d11ab5eb49154d097ebefb2 | /menu.py | fdd38356047f1cfdc1f76879f4c5541386b0fbc3 | [] | no_license | conner-jowitt/miniproject | 85a624617cc2ddbacbb583bfc30da7ce1a5e5dd9 | 48fb4f1e1e547885570241189fdfbc6d600f0bfe | refs/heads/master | 2020-07-25T10:00:14.033871 | 2019-10-09T16:18:01 | 2019-10-09T16:18:01 | 208,251,601 | 0 | 1 | null | 2019-09-19T14:21:39 | 2019-09-13T11:31:04 | Python | UTF-8 | Python | false | false | 1,804 | py | class Menu(): # TODO add starting index
def __init__(self, initial_options, header="", index=0):
self.options = initial_options
self.header = header
self.index = index
def add_option(self, new_option):
self.options.append(new_option)
def remove_option(self, old_option):
try:
self.options.remove(old_option)
except:
pass
def print_options(self):
for option in self.options:
print(option)
def print_ioptions(self):
for i in range(len(self.options)):
print(f"[{i + self.index}] " + self.options[i])
def select_option(self, acceptable_values=[]):
self.print_ioptions()
choice = "\n"
while (choice not in range(index, len(self.options) + index)) and (choice not in acceptable_values):
choice = input("Please select an option: ")
if choice in acceptable_values:
return choice
elif not choice.isnumeric():
print("Please enter a number! ")
else:
choice = int(choice)
if choice not in range(index, len(self.options + index)):
print("That option is not in the list! Please try again")
return choice
def get_option(self, item):
if item in range(len(self.options)):
return self.options[item]
else:
raise Exception("Item not in list")
def get_ioption(self, item):
if item - self.index in range(len(self.options)):
return self.options[item - self.index]
def get_length(self):
return len(self.options)
def get_header(self):
return self.header
def set_header(self, new_header):
self.header = new_header
| [
"conner.jowitt@infinityworks.com"
] | conner.jowitt@infinityworks.com |
eb9ea2feaa474643ea39f42d1aa1f5a6de783699 | 15f2eda8d5c81922bdfb84edeffcaacefe0949b6 | /ex3.py | 216273549d9c83244d640adcda56ebb329ff9767 | [] | no_license | linus1211/thehardway | afe75924f01762dba09291cad65e74815fbb8a24 | f6c6ff9356c08b3b56e4ba04c8a1f37bca3e2a9c | refs/heads/master | 2016-08-09T23:10:39.700053 | 2016-03-11T09:25:17 | 2016-03-11T09:25:17 | 53,643,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | #!/usr/bin/env python3
'''
+ plus
- minus
/ slash
* asterisk
% percent
< less-than
> greater-than
<= less-than-equal
>= greater-than-equal
'''
print ('I will now count my chickens:')
print ("Hens",25+30/6)
print ("Roosters",100 - 25 * 3 % 4)
print ("Now I will count the eggs:")
print (3 + 2 + 1 - 5 + 4 % 2 - 1 / 4 + 6)
print ("Is it true that 3 + 2 < 5 - 7?")
print (3 + 2 < 5 -7)
print ("What is 3 + 2?", 3 + 2)
print ("What is 5 - 7?", 5 - 7)
print ("Oh, that's why it's False.")
print ("How about some more.")
print ("Is it greater?", 5 > -2)
print ("Is it greater or equal?", 5 >= -2)
print ("Is it less or equal?", 5 <= -2)
| [
"linus1211@163.com"
] | linus1211@163.com |
9ded1f8eb5f608062618445b5a27278af9cc3f6b | 16600afc8c99b78e0ed522574a8908d594ed26d5 | /IdentifyReplicates.py | 0b039ac1ae77e9e197aa5941b2e1c457a8d4f529 | [
"MIT"
] | permissive | srp33/TCGA_RNASeq_Benchmark | cc3d86c29c14c8dc49e9665a02bef58ca27b215f | 317ceac16ce58f9521b4a5ea488be494a83de400 | refs/heads/master | 2021-01-20T08:19:14.831654 | 2015-11-28T15:25:11 | 2015-11-28T15:25:11 | 41,886,603 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | import os, sys, glob
countDict = {}
for filePath in glob.glob("FeatureCounts/*"):
fileName = os.path.basename(filePath)
sampleID = fileName[:20]
#sampleID = fileName[:12]
if sampleID in countDict:
countDict[sampleID].append(fileName)
else:
countDict[sampleID] = [fileName]
numCommon = 0
for sampleID in countDict:
if len(countDict[sampleID]) > 1:
print countDict[sampleID]
numCommon += 1
print numCommon
| [
"stephen_piccolo@byu.edu"
] | stephen_piccolo@byu.edu |
2aef3812fb1788abf8827b8fc2b00f1387b7ffc1 | 2dbc4c21280483d380b7c3ad7c574cfe27fc62c2 | /studies/curso_em_video/ex046-contagem-regressiva.py | a360789cc2aa22149631f0ce39079d76b28bf077 | [
"MIT"
] | permissive | alexander-colaneri/python | 78c2c5b5119dc5fdd886d8e161427ef944b21985 | fbc84a2818d8324b712014e1a5abfc01474e49ea | refs/heads/main | 2023-01-29T22:07:48.891902 | 2020-12-11T18:24:20 | 2020-12-11T18:24:20 | 306,061,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | # Faça um programa que mostre na tela uma contagem regressiva para o estouro de fogos de artifício, indo de 10 até 0,
# com uma pausa de 1 segundo entre eles.
from time import sleep
class ContagemRegressiva():
fogos = '*** FELIZ ANO NOVO!!!! ***'
def __init__(self):
pass
def iniciar(self):
'''Menu principal.'''
print(f'{" CONTAGEM REGRESSIVA ":=^40}')
self.contar_tempo()
print(contagem.fogos)
def contar_tempo(self):
'''Contagem regressiva de 10 segundos'''
for i in range(10, 0, -1):
print(i)
sleep(1)
contagem = ContagemRegressiva()
contagem.iniciar()
| [
"alexcol@gmail.com"
] | alexcol@gmail.com |
aea9f74fbe8035b182ce5bec757a8d90c175a52d | 4677108deeb961b59aa8ab6ba0e386c0ab35f061 | /main.py | 1b95c01a06fd65dd5508e7dee444e12035dad7bc | [] | no_license | ferenc4/abn_lookup | 9ce4a15dec9abb2296cc141c8b37ff0e6aaf7653 | b3b417e91b8355d3cca19f4e51eae77d9329584d | refs/heads/master | 2020-09-16T06:22:21.666722 | 2019-11-24T03:25:51 | 2019-11-24T03:25:51 | 223,681,742 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,210 | py | from builtins import Exception, input, str
import requests
# https://3583bytesready.net/2016/08/17/scraping-data-python-xpath/
from lxml import html
import re
# //*[@id="content-matching"]/div/div/table/tbody/tr[1]
# //*[@id="content-matching"]/div/div/table/tbody/tr[1]/th[1]
# //*[@id="content-matching"]/div/div/table/tbody/tr[2]/td[1]/a
# Matching names
def main():
should_continue = True
while should_continue:
company_name = input("Enter the company name: ")
response = requests.get("https://abr.business.gov.au/Search/ResultsActive?SearchText=" + company_name +
"&AllNames=False&EntityName=False&BusinessName=False&TradingName=False&NarrowSearch=False&SearchType=ActiveAbns&AllStates=True&ACT=False&NSW=False&NT=False&QLD=False&TAS=False&SA=False&VIC=False&WA=False&PostcodeDisplayName=Postcode%20(required%20for%20DGR%20%26%20Charity%20search%20options%20if%20Name%20is%20blank)%3A&HideBreadcrumbs=False&HideSearchBox=False&HideLeftSideBar=False&ShowHelp=False&IsHomePage=False&NoIndex=False&ShowVersionNumber=False")
status = response.status_code
expected_status = 200
if status != expected_status:
raise Exception("Expected status code to be " + expected_status + ", but it was " + status)
tree = html.fromstring(response.content)
col = 0
row = 0
bsb = ""
company_ary = []
exit_char = 'e'
for cell in tree.xpath('//*[@id="content-matching"]/div/div/table/tbody/tr/td'):
if col == 0:
bsb = cell.getchildren()[0].text_content().strip()
elif col == 1 or col == 2 or col == 3:
company_ary.append(re.sub("\\s+", " ", cell.text_content().strip()))
if col == 3:
print(bsb + " | " + ", ".join(str(x) for x in company_ary))
bsb = ""
company_ary = []
col = (col + 1) % 4
row += 1
exit_input = input("Enter '" + exit_char + "' to exit, or any other key to continue: ")
should_continue = exit_input != exit_char
if __name__ == "__main__":
main()
| [
"ferenc4.fazekas@gmail.com"
] | ferenc4.fazekas@gmail.com |
04bd461e80569d6354730bef59803b1014bc988b | 3b7825faf7d28b6fe6957feed2dafb352e64e86e | /poll/views.py | 468a15e5a1e8902e4dfe507bbe40da7ede48dd1f | [] | no_license | omolojakazeem/pollApp | b34ca84326748341a303289d553c4ef0f8bcf0c6 | dd4fd2737ff574251948422aedecc36101f7f7e6 | refs/heads/main | 2023-02-26T06:29:42.176995 | 2021-02-07T16:57:52 | 2021-02-07T16:57:52 | 336,839,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,572 | py | from django.shortcuts import render, redirect, HttpResponse
from .forms import PollForm
from .models import Poll
def home(request):
template = 'poll/home.html'
polls = Poll.objects.all()
context = {
'polls': polls
}
return render(request, template_name=template, context=context)
def create(request):
template = 'poll/create.html'
if request.method == 'POST':
form = PollForm(request.POST)
if form.is_valid():
form.save()
return redirect('home')
else:
form = PollForm()
context = {
'form': form,
}
return render(request, template_name=template, context=context)
def result(request, poll_id):
template = 'poll/results.html'
poll = Poll.objects.get(id=poll_id)
context = {
'poll': poll,
}
return render(request, template_name=template, context=context)
def vote(request, poll_id):
template = 'poll/vote.html'
poll = Poll.objects.get(id=poll_id)
if request.method == 'POST':
selected_option = request.POST['poll']
if selected_option == 'option1':
poll.option_one_count += 1
elif selected_option == 'option2':
poll.option_two_count += 1
elif selected_option == 'option3':
poll.option_three_count += 1
else:
return HttpResponse(400, "Invalid Form")
poll.save()
return redirect('results', poll.id)
context = {
'poll': poll,
}
return render(request, template_name=template, context=context)
| [
"omolojakazeem@gmail.com"
] | omolojakazeem@gmail.com |
48d5dad900ecdf584f2ec639d5b62ce8f06d2c2c | 82074ba616918ede605dec64b038546a7b07bd7d | /empowerb/middleware.py | 85764001431cfbf3cedb9002fb6e1ccf8f38b859 | [] | no_license | chetankhopade/EmpowerRM | b7ab639eafdfa57c054a0cf9da15c3d4b90bbd66 | 8d968592f5e0d160c56f31a4870e79c30240b514 | refs/heads/main | 2023-07-05T03:20:13.820049 | 2021-08-26T11:56:28 | 2021-08-26T11:56:28 | 399,354,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | from _contextvars import ContextVar
from django.utils.deprecation import MiddlewareMixin
from empowerb.settings import DATABASES
db_ctx = ContextVar('var')
class WhichDatabaseIsTOUseMIddleware(MiddlewareMixin):
"""
Middleware to update the context var with the db alias
"""
@staticmethod
def process_request(request):
try:
db_name_path = request.path.split('/')[1]
db_name = db_name_path.split('_')[0] if '_' in db_name_path else db_name_path
# set contextvar with the database name if dbname exist in DATABASES dict
db_ctx.set(db_name) if db_name in DATABASES.keys() else db_ctx.set('NoOP')
except Exception as ex:
print(ex.__str__())
db_ctx.reset('NoOP')
| [
"noreply@github.com"
] | noreply@github.com |
41149f6477c2d1571dcea511c175f10051270178 | 1519eed36a133f988cbfe922f748c3941a58ed00 | /utils/utils.py | b005b1ea22bb8261ac135a0c5dddb223689750a0 | [] | no_license | MASILab/MontageQA | fc5e99c40707c58a90790e2efebf44dc1a8820f9 | fe3e410881a0fca4a43efda77a124af89529a5b0 | refs/heads/master | 2020-12-19T05:02:36.363705 | 2020-04-09T22:09:08 | 2020-04-09T22:09:08 | 235,628,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,552 | py | from tkinter import *
from datetime import datetime
class Window:
# TODO: Add a prompt window asking for starter label file path
# TODO: Add a label for displaying idx
"""
The master tkinter window
"""
def __init__(self, root, img_display):
"""
Configure the master window.
Enable full screen. The screen size is set to the maximum resolution of my machine's screen.
Press Escape key to exit the program
:param root: Tk()
"""
self.root = root
self.img_display = img_display
self.root.title("Montage Labeler")
self.root.geometry("1920x1080")
self.root.attributes('-fullscreen', True)
self.root.bind("<Escape>", exit)
def exit(self, event):
"""
Handler for exiting the program
:param event: stub parameter required by tkinter keypress callback functions
"""
print(event)
self.root.destroy()
class ImageDisplay:
"""
The core GUI: the canvas + all buttons
"""
def __init__(self, root, img_file_name, table):
"""
Set up the GUI layout.
:param root: The master window.
:param img_file_name: File path of the first image to be displayed
:param table: Label table. A pandas data frame read from a csv file.
"""
self.idx = 0
self.table = table
self.IMG_DIR = './images/'
self.NUM_IMGS = len(self.table.index)
self.OUT_FILE = './labels/labels_' + datetime.now().strftime("%m-%d-%Y %H_%M_%S") + '.csv'
self.AUTO_SAVE_TIME = 10000 # Autosave every 10 seconds
# Master frame - image + buttons for navigating directory
self.master = Frame(root)
# Master frame for all buttons + textbox
self.right_margin = Frame(self.master)
# TODO: Eliminate formatting numbers by packing them into a singleton
# Labels for displaying idx
self.idx_display = Label(self.right_margin, height=2, width=20, text=f"Montage #{self.idx + 1}")
self.idx_display.pack(side=TOP)
self.idx_display.config(font=("Courier", 15))
# Textbox for displaying labels of current image
self.label_display = Label(self.right_margin, height=2, width=20,
text=f"[T:{table['taken out'][0]} G:{table['grainy'][0]} B:{table['broken'][0]}]")
self.label_display.pack(side=TOP)
self.label_display.config(font=("Courier", 20))
# Buttons for navigating image directory
self.image_buttons = Frame(self.right_margin, width=50, height=4)
self.prev_image_button = Button(self.image_buttons, text='Previous Image', width=12,
command=self.prev_image)
self.next_image_button = Button(self.image_buttons, text='Next Image', width=12,
command=self.next_image)
# Arrange buttons
self.prev_image_button.pack(side=LEFT)
self.next_image_button.pack(padx=20, side=LEFT)
self.image_buttons.pack(side=TOP)
# Buttons for giving image labels
self.label_buttons = Frame(self.right_margin)
self.label_taken_button = Button(self.label_buttons, text='[T] Skull Taken out',
command=self.label_skull_taken_out, width=14)
self.label_grainy_button = Button(self.label_buttons, text='[G] Image Grainy',
command=self.label_grainy, width=14)
self.label_broken_button = Button(self.label_buttons, text='[B] Skull Broken',
command=self.label_skull_broken, width=14)
# Arrange buttons
self.label_taken_button.pack(padx=5, side=LEFT)
self.label_grainy_button.pack(padx=5, side=LEFT)
self.label_broken_button.pack(padx=5, side=LEFT)
self.label_buttons.pack(side=TOP, pady=20)
# Button for saving labels
self.label_save_button = Button(self.right_margin, text="S A V E", command=self.save)
self.label_save_button.pack(side=TOP, pady=20, fill=BOTH)
self.label_save_button.config(font=("Courier", 14))
# Buttons for marking bad montage
self.bad_clear_buttons = Frame(self.right_margin)
self.bad_montage_button = Button(self.bad_clear_buttons, text="Bad Montage",
width=12, command=lambda: self.update_all_labels(-1))
self.clear_labels_button = Button(self.bad_clear_buttons, text="Clear Labels",
width=12, command=lambda: self.update_all_labels(0))
# Arrange buttons
self.bad_montage_button.pack(side=LEFT, fill=BOTH)
self.clear_labels_button.pack(side=LEFT, padx=20, fill=BOTH)
self.bad_clear_buttons.pack(side=TOP, pady=20)
# Input box and button for jumping to another image
self.jump_to_image = Frame(self.right_margin)
self.go_to_button = Button(self.jump_to_image, text="Go to: ", command=self.go_to_image)
self.input_box = Entry(self.jump_to_image)
# Arrange widgets
self.go_to_button.pack(side=LEFT, padx=10)
self.input_box.pack(side=LEFT, padx=10)
self.jump_to_image.pack(side=TOP, pady=20)
# Display for warning messages
self.warning_message = Label(self.right_margin)
self.warning_message.pack(side=TOP)
self.right_margin.pack(side=RIGHT)
# Canvas for displaying the image
self.canvas = Canvas(self.master, width=1080, height=1080, bg='black')
self.canvas.pack(side=LEFT)
self.cur_img = PhotoImage(file=self.IMG_DIR + img_file_name)
self.cur_img_id = self.canvas.create_image((0, 0), image=self.cur_img, anchor=NW)
# Stub button for autosave
self.autosave_button = Button(root)
self.autosave_button.after(self.AUTO_SAVE_TIME, self.save)
self.master.pack()
def next_image(self):
"""
Handler for when "Next Image" button is pressed.
"""
if self.idx < self.NUM_IMGS - 1:
self.idx = self.idx + 1
else:
self.idx = 0
self._update_image()
self._update_label_display()
def prev_image(self):
"""
Handler for when "Previous Image" button is pressed.
"""
if self.idx > 0:
self.idx = self.idx - 1
else:
self.idx = self.NUM_IMGS - 1
self._update_image()
self._update_label_display()
# TODO: make these three methods more generic. Perhaps use some design patterns
def label_skull_taken_out(self):
"""
Handler for when "[T]Skull Taken Out" button is pressed.
Toggle the label between 0 and 1. Update label text and self.table as well.
"""
cur_label = self.table['taken out'][self.idx]
if cur_label >= 0:
# toggle between 0 and 1
cur_label = 1 - cur_label
self.table.iloc[self.idx, self.table.columns.get_loc('taken out')] = cur_label
self._update_label_display()
def label_grainy(self):
"""
Handler for when "[G]Image Grainy" button is pressed.
Toggle the label between 0 and 1. Update label text and self.table as well.
"""
cur_label = self.table['grainy'][self.idx]
if cur_label >= 0:
# toggle between 0 and 1
cur_label = 1 - cur_label
self.table.iloc[self.idx, self.table.columns.get_loc('grainy')] = cur_label
self._update_label_display()
def label_skull_broken(self):
"""
Handler for when "[B]Skull Broken" button is pressed.
Toggle the label between 0 and 1. Update label text and self.table as well.
"""
cur_label = self.table['broken'][self.idx]
if cur_label >= 0:
# toggle between 0 and 1
cur_label = 1 - cur_label
self.table.iloc[self.idx, self.table.columns.get_loc('broken')] = cur_label
self._update_label_display()
def save(self):
"""
Write the content of self.table to disk every self.AUTO_SAVE_TIME milliseconds.
"""
self.table.to_csv(self.OUT_FILE)
# reschedule the autosave event
self.autosave_button.after(self.AUTO_SAVE_TIME, self.save)
def update_all_labels(self, val):
"""
Update label text to val. Write the changes to self.table as well.
:param val: the value that labels update to.
"""
self.table.iloc[self.idx, self.table.columns.get_loc('taken out')] = val
self.table.iloc[self.idx, self.table.columns.get_loc('grainy')] = val
self.table.iloc[self.idx, self.table.columns.get_loc('broken')] = val
self._update_label_display()
def go_to_image(self):
"""
Handler for when "Go to:" button is pressed.
Verify user's entry in the input box is valid, then jump to the image
"""
msg = f"Invalid input. Please enter an integer between 1 and {self.NUM_IMGS}."
user_input = self.input_box.get()
if not user_input.isnumeric():
self.warning_message.config(text=msg)
else:
user_input = int(user_input)
if user_input < 1 or user_input > self.NUM_IMGS:
self.warning_message.config(text=msg)
else:
self.warning_message.config(text="")
self.idx = user_input - 1
self._update_image()
self._update_label_display()
def _update_image(self):
"""
Update the image displayed on the canvas.
"""
self.canvas.delete(self.cur_img_id)
img_file_name = self.table["image path"][self.idx].replace('/', '+')
img_file_name = self.IMG_DIR + img_file_name + ".png"
self.cur_img = PhotoImage(file=img_file_name)
self.cur_img_id = self.canvas.create_image((0, 0), image=self.cur_img, anchor=NW)
def _update_label_display(self):
"""
Update the label text to the content of the self.idx-th row of self.table.
"""
self.label_display.config(text=f"[T:{self.table['taken out'][self.idx]} " +
f"G:{self.table['grainy'][self.idx]} " +
f"B:{self.table['broken'][self.idx]}]")
self.idx_display.config(text=f"Montage #{self.idx + 1}")
| [
"marcus.yu.56@gmail.com"
] | marcus.yu.56@gmail.com |
880c458d3f3529d6d643d216ff4c6061674fcd20 | 18aee5d93a63eab684fe69e3aa0abd1372dd5d08 | /test/legacy_test/test_jit_save_load.py | 7f58638e7ac7ad178372817cd669397568a0e4c8 | [
"Apache-2.0"
] | permissive | Shixiaowei02/Paddle | 8d049f4f29e281de2fb1ffcd143997c88078eadb | 3d4d995f26c48f7792b325806ec3d110fc59f6fc | refs/heads/develop | 2023-06-26T06:25:48.074273 | 2023-06-14T06:40:21 | 2023-06-14T06:40:21 | 174,320,213 | 2 | 1 | Apache-2.0 | 2022-12-28T05:14:30 | 2019-03-07T10:09:34 | C++ | UTF-8 | Python | false | false | 57,829 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import shutil
import tempfile
import unittest
import numpy as np
import paddle
from paddle import fluid
from paddle.fluid import unique_name
from paddle.jit.api import to_static
from paddle.jit.translated_layer import INFER_PARAMS_INFO_SUFFIX
from paddle.nn import Linear
from paddle.static import InputSpec
BATCH_SIZE = 32
BATCH_NUM = 10
SEED = 10
def random_batch_reader(input_size, label_size):
def _get_random_inputs_and_labels(input_size, label_size):
np.random.seed(SEED)
input = np.random.random(size=input_size).astype('float32')
label = np.random.random(size=label_size).astype('int64')
return input, label
def __reader__():
for _ in range(BATCH_NUM):
batch_input, batch_label = _get_random_inputs_and_labels(
[BATCH_SIZE, input_size], [BATCH_SIZE, label_size]
)
yield batch_input, batch_label
return __reader__
class LinearNet(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear = Linear(in_size, out_size)
@to_static
def forward(self, x):
return self._linear(x)
class LinearNetWithInputSpec(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear = Linear(in_size, out_size)
@to_static(input_spec=[InputSpec(shape=[None, 784], dtype='float32')])
def forward(self, x):
return self._linear(x)
class LinearNetNotDeclarative(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear = Linear(in_size, out_size)
def forward(self, x):
return self._linear(x)
class LinerNetWithLabel(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear = Linear(in_size, out_size)
@to_static(
input_spec=[
InputSpec(shape=[None, 784], dtype='float32', name="image"),
InputSpec(shape=[None, 1], dtype='int64', name="label"),
]
)
def forward(self, x, label):
out = self._linear(x)
loss = paddle.nn.functional.cross_entropy(
out, label, reduction='none', use_softmax=False
)
avg_loss = paddle.mean(loss)
return out, avg_loss
class LinerNetWithPruneInput(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear = Linear(in_size, out_size)
@to_static(
input_spec=[
InputSpec(shape=[None, 784], dtype='float32', name="image"),
InputSpec(shape=[None, 1], dtype='int64', name="label"),
]
)
def forward(self, x, label):
out = self._linear(x)
loss = paddle.nn.functional.cross_entropy(
out, label, reduction='none', use_softmax=False
)
avg_loss = paddle.mean(loss)
return out
class LinerNetWithUselessInput(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear = Linear(in_size, out_size)
@to_static(
input_spec=[
InputSpec(shape=[None, 784], dtype='float32', name="image"),
InputSpec(shape=[None, 1], dtype='int64', name="label"),
]
)
def forward(self, x, label):
out = self._linear(x)
return out
class LinearNetReturnLoss(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear = Linear(in_size, out_size)
@to_static
def forward(self, x):
y = self._linear(x)
z = self._linear(y)
loss = paddle.mean(z)
return z, loss
class LinearNetMultiInput(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear1 = Linear(in_size, out_size)
self._linear2 = Linear(in_size, out_size)
@to_static(
input_spec=[
InputSpec([None, 8], dtype='float32'),
InputSpec([None, 8], dtype='float32'),
]
)
def forward(self, x, y):
x_out = self._linear1(x)
y_out = self._linear2(y)
loss = paddle.mean(x_out + y_out)
return x_out, y_out, loss
class LinearNetMultiInput1(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear1 = Linear(in_size, out_size)
self._linear2 = Linear(in_size, out_size)
@to_static(
input_spec=(
InputSpec([None, 8], dtype='float32'),
InputSpec([None, 8], dtype='float32'),
)
)
def forward(self, x, y):
x_out = self._linear1(x)
y_out = self._linear2(y)
loss = paddle.mean(x_out + y_out)
return x_out, y_out, loss
class MultiLoadingLinearNet(paddle.nn.Layer):
def __init__(self, size, model_path):
super().__init__()
self._linear = Linear(size, size)
self._load_linear1 = paddle.jit.load(model_path)
self._load_linear2 = paddle.jit.load(model_path)
@to_static
def forward(self, x):
tmp1 = self._linear(x)
tmp2 = self._load_linear1(tmp1)
tmp3 = self._load_linear2(tmp2)
y = self._linear(tmp3)
return y
class LinearNetReturnHidden(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear_1 = Linear(in_size, out_size)
self._linear_2 = Linear(in_size, out_size)
@to_static
def forward(self, x):
y = self._linear_1(x)
z = self._linear_2(y)
loss = paddle.mean(z)
return y, loss
class LinearNetWithNestOut(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear_1 = Linear(in_size, out_size)
self._linear_2 = Linear(in_size, out_size)
@to_static
def forward(self, x):
y = self._linear_1(x)
z = self._linear_2(y)
out = y + z
loss = paddle.mean(out)
return y, [(z, loss), out]
class LinearNetWithDictInput(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear = Linear(in_size, out_size)
@paddle.jit.to_static(
input_spec=[
{'img': InputSpec(shape=[None, 8], dtype='float32', name='img')},
{'label': InputSpec(shape=[None, 1], dtype='int64', name='label')},
]
)
def forward(self, img, label):
out = self._linear(img['img'])
# not return loss to avoid prune output
loss = paddle.nn.functional.cross_entropy(out, label['label'])
return out
class LinearNetWithDictInputNoPrune(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear = Linear(in_size, out_size)
def forward(self, img):
out = self._linear(img['img'] + img['img2'])
return out
class EmptyLayer(paddle.nn.Layer):
def __init__(self):
super().__init__()
@paddle.jit.to_static
def forward(self, x):
return x
class NoParamLayer(paddle.nn.Layer):
def __init__(self):
super().__init__()
@paddle.jit.to_static
def forward(self, x, y):
return x + y
class LinearNetWithMultiStaticFunc(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear_0 = Linear(in_size, out_size)
self._linear_1 = Linear(in_size, out_size)
self._scale = paddle.to_tensor([9.9])
@paddle.jit.to_static
def forward(self, x):
return self._linear_0(x)
@paddle.jit.to_static
def forward_no_param(self, x):
return x
@paddle.jit.to_static
def forward_general(self, x):
return self._linear_0(x) + self._linear_1(x) * self._scale
def train(layer, input_size=784, label_size=1):
# create optimizer
sgd = fluid.optimizer.SGDOptimizer(
learning_rate=0.01, parameter_list=layer.parameters()
)
# create data loader
train_loader = fluid.io.DataLoader.from_generator(capacity=5)
train_loader.set_batch_generator(
random_batch_reader(input_size, label_size)
)
# train
for data in train_loader():
img, label = data
label.stop_gradient = True
cost = layer(img)
loss = paddle.nn.functional.cross_entropy(
cost, label, reduction='none', use_softmax=False
)
avg_loss = paddle.mean(loss)
avg_loss.backward()
sgd.minimize(avg_loss)
layer.clear_gradients()
return [img], layer, avg_loss
def train_with_label(layer, input_size=784, label_size=1):
# create optimizer
sgd = fluid.optimizer.SGDOptimizer(
learning_rate=0.01, parameter_list=layer.parameters()
)
# create data loader
train_loader = fluid.io.DataLoader.from_generator(capacity=5)
train_loader.set_batch_generator(
random_batch_reader(input_size, label_size)
)
# train
for data in train_loader():
img, label = data
label.stop_gradient = True
out, avg_loss = layer(img, label)
avg_loss.backward()
sgd.minimize(avg_loss)
layer.clear_gradients()
return out
class TestJitSaveLoad(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
self.model_path = os.path.join(
self.temp_dir.name, "test_jit_save_load/model"
)
# enable dygraph mode
fluid.enable_dygraph()
# config seed
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
def tearDown(self):
self.temp_dir.cleanup()
def train_and_save_model(self, model_path=None):
layer = LinearNet(784, 1)
example_inputs, layer, _ = train(layer)
final_model_path = model_path if model_path else self.model_path
orig_input_types = [type(x) for x in example_inputs]
paddle.jit.save(
layer=layer, path=final_model_path, input_spec=example_inputs
)
new_input_types = [type(x) for x in example_inputs]
self.assertEqual(orig_input_types, new_input_types)
return layer
def test_save_load(self):
# train and save model
train_layer = self.train_and_save_model()
# load model
loaded_layer = paddle.jit.load(self.model_path)
self.load_and_inference(train_layer, loaded_layer)
self.load_dygraph_state_dict(train_layer)
self.load_and_finetune(train_layer, loaded_layer)
def load_and_inference(self, train_layer, infer_layer):
train_layer.eval()
infer_layer.eval()
# inference & compare
x = fluid.dygraph.to_variable(
np.random.random((1, 784)).astype('float32')
)
np.testing.assert_array_equal(
train_layer(x).numpy(), infer_layer(x).numpy()
)
def load_and_finetune(self, train_layer, load_train_layer):
train_layer.train()
load_train_layer.train()
# train & compare
img0, _, train_loss = train(train_layer)
img1, _, load_train_loss = train(load_train_layer)
np.testing.assert_array_equal(
train_loss.numpy(), load_train_loss.numpy()
)
def load_dygraph_state_dict(self, train_layer):
train_layer.eval()
# construct new model
new_layer = LinearNet(784, 1)
orig_state_dict = new_layer.state_dict()
load_state_dict = paddle.load(self.model_path)
for structured_name in orig_state_dict:
self.assertTrue(structured_name in load_state_dict)
new_layer.set_state_dict(load_state_dict)
new_layer.eval()
# inference & compare
x = fluid.dygraph.to_variable(
np.random.random((1, 784)).astype('float32')
)
np.testing.assert_array_equal(
train_layer(x).numpy(), new_layer(x).numpy()
)
def test_load_dygraph_no_path(self):
model_path = os.path.join(
self.temp_dir.name, "test_jit_save_load.no_path/model_path"
)
with self.assertRaises(ValueError):
model_dict = paddle.load(model_path)
def test_jit_load_no_path(self):
path = os.path.join(
self.temp_dir.name, "test_jit_save_load.no_path/model_path"
)
with self.assertRaises(ValueError):
loaded_layer = paddle.jit.load(path)
class TestSaveLoadWithNestOut(unittest.TestCase):
def setUp(self):
# enable dygraph mode
fluid.enable_dygraph()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_nest_output(self):
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32')
)
net = LinearNetWithNestOut(8, 8)
dy_outs = paddle.utils.flatten(net(x))
net = to_static(net, input_spec=[InputSpec([None, 8], name='x')])
model_path = os.path.join(self.temp_dir.name, "net_with_nest_out/model")
paddle.jit.save(net, model_path)
load_net = paddle.jit.load(model_path)
load_outs = paddle.utils.flatten(load_net(x))
self.assertTrue(len(dy_outs) == 4)
for dy_out, load_out in zip(dy_outs, load_outs):
np.testing.assert_allclose(
dy_out.numpy(), load_out.numpy(), rtol=1e-05
)
class TestSaveLoadWithDictInput(unittest.TestCase):
def test_dict_input(self):
# NOTE: This net cannot be executed, it is just
# a special case for exporting models in model validation
# We DO NOT recommend this writing way of Layer
net = LinearNetWithDictInput(8, 8)
# net.forward.concrete_program.inputs:
# (<__main__.LinearNetWithDictInput object at 0x7f2655298a98>,
# {'img': var img : fluid.VarType.LOD_TENSOR.shape(-1, 8).astype(VarType.FP32)},
# {'label': var label : fluid.VarType.LOD_TENSOR.shape(-1, 1).astype(VarType.INT64)})
self.assertEqual(len(net.forward.concrete_program.inputs), 3)
temp_dir = tempfile.TemporaryDirectory()
path = os.path.join(
temp_dir.name, "test_jit_save_load_with_dict_input/model"
)
# prune inputs
paddle.jit.save(
layer=net,
path=path,
input_spec=[
{'img': InputSpec(shape=[None, 8], dtype='float32', name='img')}
],
)
img = paddle.randn(shape=[4, 8], dtype='float32')
loaded_net = paddle.jit.load(path)
loaded_out = loaded_net(img)
# loaded_net._input_spec():
# [InputSpec(shape=(-1, 8), dtype=VarType.FP32, name=img)]
self.assertEqual(len(loaded_net._input_spec()), 1)
temp_dir.cleanup()
class TestSaveLoadWithDictInputNoPrune(unittest.TestCase):
def test_dict_input(self):
net = LinearNetWithDictInputNoPrune(8, 8)
temp_dir = tempfile.TemporaryDirectory()
path = os.path.join(
temp_dir.name, "test_jit_save_load_with_dict_input_no_prune/model"
)
# prune inputs
paddle.jit.save(
layer=net,
path=path,
input_spec=[
{
'img': InputSpec(
shape=[None, 8], dtype='float32', name='img'
),
'img2': InputSpec(
shape=[None, 8], dtype='float32', name='img2'
),
}
],
)
img = paddle.randn(shape=[4, 8], dtype='float32')
img2 = paddle.randn(shape=[4, 8], dtype='float32')
loaded_net = paddle.jit.load(path)
loaded_out = loaded_net(img, img2)
self.assertEqual(len(loaded_net._input_spec()), 2)
temp_dir.cleanup()
class TestSaveLoadWithInputSpec(unittest.TestCase):
def setUp(self):
# enable dygraph mode
fluid.enable_dygraph()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_with_input_spec(self):
net = LinearNetReturnLoss(8, 8)
# set x.shape = [None, 8]
net.forward = to_static(
net.forward, input_spec=[InputSpec([None, 8], name='x')]
)
model_path = os.path.join(
self.temp_dir.name, "input_spec.output_spec/model"
)
# check inputs and outputs
self.assertTrue(len(net.forward.inputs) == 1)
input_x = net.forward.inputs[0]
self.assertTrue(input_x.shape == (-1, 8))
self.assertTrue(input_x.name == 'x')
# 1. prune loss
output_spec = net.forward.outputs[:1]
paddle.jit.save(net, model_path, output_spec=output_spec)
# 2. load to infer
infer_layer = paddle.jit.load(model_path)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32')
)
pred = infer_layer(x)
def test_multi_in_out(self):
net = LinearNetMultiInput(8, 8)
model_path = os.path.join(
self.temp_dir.name, "multi_inout.output_spec1/model"
)
# 1. check inputs and outputs
self.assertTrue(len(net.forward.inputs) == 2)
input_x = net.forward.inputs[0]
input_y = net.forward.inputs[1]
self.assertTrue(input_x.shape == (-1, 8))
self.assertTrue(input_y.shape == (-1, 8))
# 2. prune loss
output_spec = net.forward.outputs[:2]
paddle.jit.save(net, model_path, output_spec=output_spec)
# 3. load to infer
infer_layer = paddle.jit.load(model_path)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32')
)
y = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32')
)
# 4. predict
pred_x, pred_y = infer_layer(x, y)
# 1. prune y and loss
model_path = os.path.join(
self.temp_dir.name, "multi_inout.output_spec2/model"
)
output_spec = net.forward.outputs[:1]
paddle.jit.save(net, model_path, [input_x], output_spec=output_spec)
# 2. load again
infer_layer2 = paddle.jit.load(model_path)
# 3. predict
pred_xx = infer_layer2(x)
# 4. assert pred_x == pred_xx
np.testing.assert_allclose(pred_x.numpy(), pred_xx.numpy(), rtol=1e-05)
def test_multi_in_out1(self):
net = LinearNetMultiInput1(8, 8)
model_path = os.path.join(
self.temp_dir.name, "multi_inout1.output_spec1/model"
)
# 1. check inputs and outputs
self.assertTrue(len(net.forward.inputs) == 2)
input_x = net.forward.inputs[0]
input_y = net.forward.inputs[1]
self.assertTrue(input_x.shape == (-1, 8))
self.assertTrue(input_y.shape == (-1, 8))
# 2. prune loss
output_spec = net.forward.outputs[:2]
paddle.jit.save(net, model_path, output_spec=output_spec)
# 3. load to infer
infer_layer = paddle.jit.load(model_path)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32')
)
y = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32')
)
# 4. predict
pred_x, pred_y = infer_layer(x, y)
# 1. prune y and loss
model_path = os.path.join(
self.temp_dir.name, "multi_inout1.output_spec2/model"
)
output_spec = net.forward.outputs[:1]
paddle.jit.save(net, model_path, (input_x,), output_spec=output_spec)
# 2. load again
infer_layer2 = paddle.jit.load(model_path)
# 3. predict
pred_xx = infer_layer2(x)
# 4. assert pred_x == pred_xx
np.testing.assert_allclose(pred_x.numpy(), pred_xx.numpy(), rtol=1e-05)
class TestJitSaveLoadConfig(unittest.TestCase):
def setUp(self):
# enable dygraph mode
fluid.enable_dygraph()
# config seed
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_output_spec(self):
train_layer = LinearNetReturnLoss(8, 8)
adam = fluid.optimizer.AdamOptimizer(
learning_rate=0.1, parameter_list=train_layer.parameters()
)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32')
)
for i in range(10):
out, loss = train_layer(x)
loss.backward()
adam.minimize(loss)
train_layer.clear_gradients()
model_path = os.path.join(
self.temp_dir.name, "save_load_config.output_spec"
)
output_spec = [out]
paddle.jit.save(
layer=train_layer,
path=model_path,
input_spec=[x],
output_spec=output_spec,
)
train_layer.eval()
infer_layer = paddle.jit.load(model_path)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32')
)
np.testing.assert_array_equal(
train_layer(x)[0].numpy(), infer_layer(x).numpy()
)
def test_save_no_support_config_error(self):
layer = LinearNet(784, 1)
path = os.path.join(self.temp_dir.name, "no_support_config_test")
with self.assertRaises(ValueError):
paddle.jit.save(layer=layer, path=path, model_filename="")
def test_load_empty_model_filename_error(self):
path = os.path.join(self.temp_dir.name, "error_model_filename_test")
with self.assertRaises(ValueError):
paddle.jit.load(path, model_filename="")
def test_load_empty_params_filename_error(self):
path = os.path.join(self.temp_dir.name, "error_params_filename_test")
with self.assertRaises(ValueError):
paddle.jit.load(path, params_filename="")
def test_load_with_no_support_config(self):
path = os.path.join(self.temp_dir.name, "no_support_config_test")
with self.assertRaises(ValueError):
paddle.jit.load(path, separate_params=True)
class TestJitMultipleLoading(unittest.TestCase):
def setUp(self):
self.linear_size = 4
self.temp_dir = tempfile.TemporaryDirectory()
self.model_path = os.path.join(
self.temp_dir.name, "jit_multi_load/model"
)
# enable dygraph mode
fluid.enable_dygraph()
# config seed
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
# train and save base model
self.train_and_save_orig_model()
def tearDown(self):
self.temp_dir.cleanup()
def train_and_save_orig_model(self):
layer = LinearNet(self.linear_size, self.linear_size)
example_inputs, layer, _ = train(layer, self.linear_size, 1)
paddle.jit.save(
layer=layer, path=self.model_path, input_spec=example_inputs
)
def test_load_model_retransform_inference(self):
multi_loaded_layer = MultiLoadingLinearNet(
self.linear_size, self.model_path
)
state_dict = multi_loaded_layer.state_dict()
name_set = set()
for _, var in state_dict.items():
self.assertTrue(var.name not in name_set)
name_set.add(var.name)
class TestJitPruneModelAndLoad(unittest.TestCase):
def setUp(self):
self.linear_size = 4
self.temp_dir = tempfile.TemporaryDirectory()
self.model_path = os.path.join(
self.temp_dir.name, "jit_prune_model_and_load/model"
)
# enable dygraph mode
fluid.enable_dygraph()
# config seed
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
def tearDown(self):
self.temp_dir.cleanup()
def train_and_save(self):
train_layer = LinearNetReturnHidden(8, 8)
adam = fluid.optimizer.AdamOptimizer(
learning_rate=0.1, parameter_list=train_layer.parameters()
)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32')
)
for i in range(10):
hidden, loss = train_layer(x)
loss.backward()
adam.minimize(loss)
train_layer.clear_gradients()
output_spec = [hidden]
paddle.jit.save(
layer=train_layer,
path=self.model_path,
input_spec=[x],
output_spec=output_spec,
)
return train_layer
def test_load_pruned_model(self):
train_layer = self.train_and_save()
train_layer.eval()
infer_layer = paddle.jit.load(self.model_path)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32')
)
np.testing.assert_array_equal(
train_layer(x)[0].numpy(), infer_layer(x).numpy()
)
def test_load_var_not_in_extra_var_info(self):
self.train_and_save()
# chage extra var info
var_info_path = self.model_path + INFER_PARAMS_INFO_SUFFIX
with open(var_info_path, 'rb') as f:
extra_var_info = pickle.load(f)
extra_var_info.clear()
with open(var_info_path, 'wb') as f:
pickle.dump(extra_var_info, f, protocol=2)
with self.assertRaises(RuntimeError):
paddle.jit.load(self.model_path)
class TestJitSaveMultiCases(unittest.TestCase):
def setUp(self):
# enable dygraph mode
fluid.enable_dygraph()
# config seed
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def verify_inference_correctness(
self, layer, model_path, with_label_and_loss=False, with_label=False
):
layer.eval()
loaded_layer = paddle.jit.load(model_path)
loaded_layer.eval()
# inference & compare
x = paddle.to_tensor(np.random.random((1, 784)).astype('float32'))
if with_label_and_loss:
y = paddle.to_tensor(np.random.random((1, 1)).astype('int64'))
pred, _ = layer(x, y)
pred = pred.numpy()
elif with_label:
y = paddle.to_tensor(np.random.random((1, 1)).astype('int64'))
pred = layer(x, y)
pred = pred.numpy()
else:
pred = layer(x).numpy()
loaded_pred = loaded_layer(x).numpy()
np.testing.assert_array_equal(
pred,
loaded_pred,
err_msg='Result diff when load and inference:\nlayer result:\n{}\nloaded layer result:\n{}'.format(
pred, loaded_pred
),
)
def test_no_prune_to_static_after_train(self):
layer = LinearNet(784, 1)
train(layer)
model_path = os.path.join(
self.temp_dir.name, "test_no_prune_to_static_after_train/model"
)
paddle.jit.save(layer, model_path)
self.verify_inference_correctness(layer, model_path)
def test_no_prune_to_static_no_train(self):
layer = LinearNetWithInputSpec(784, 1)
model_path = os.path.join(
self.temp_dir.name, "test_no_prune_to_static_no_train/model"
)
paddle.jit.save(layer, model_path)
self.verify_inference_correctness(layer, model_path)
def test_no_prune_no_to_static_after_train(self):
layer = LinearNetNotDeclarative(784, 1)
train(layer)
model_path = os.path.join(
self.temp_dir.name, "test_no_prune_no_to_static_after_train/model"
)
paddle.jit.save(
layer,
model_path,
input_spec=[InputSpec(shape=[None, 784], dtype='float32')],
)
self.verify_inference_correctness(layer, model_path)
def test_no_prune_no_to_static_after_train_with_examples(self):
layer = LinearNetNotDeclarative(784, 1)
example_inputs, _, _ = train(layer)
model_path = os.path.join(
self.temp_dir.name,
"test_no_prune_no_to_static_after_train_with_examples/model",
)
paddle.jit.save(layer=layer, path=model_path, input_spec=example_inputs)
self.verify_inference_correctness(layer, model_path)
def test_no_prune_no_to_static_no_train(self):
layer = LinearNetNotDeclarative(784, 1)
model_path = os.path.join(
self.temp_dir.name, "test_no_prune_no_to_static_no_train/model"
)
paddle.jit.save(
layer,
model_path,
input_spec=[InputSpec(shape=[None, 784], dtype='float32')],
)
self.verify_inference_correctness(layer, model_path)
def test_prune_to_static_after_train(self):
layer = LinerNetWithLabel(784, 1)
out = train_with_label(layer)
model_path = os.path.join(
self.temp_dir.name, "test_prune_to_static_after_train/model"
)
paddle.jit.save(
layer,
model_path,
input_spec=[
InputSpec(shape=[None, 784], dtype='float32', name="image")
],
output_spec=[out],
)
self.verify_inference_correctness(
layer, model_path, with_label_and_loss=True
)
def test_prune_to_static_no_train(self):
layer = LinerNetWithLabel(784, 1)
model_path = os.path.join(
self.temp_dir.name, "test_prune_to_static_no_train/model"
)
# TODO: no train, cannot get output_spec var here
# now only can use index
output_spec = layer.forward.outputs[:1]
paddle.jit.save(
layer,
model_path,
input_spec=[
InputSpec(shape=[None, 784], dtype='float32', name="image")
],
output_spec=output_spec,
)
self.verify_inference_correctness(
layer, model_path, with_label_and_loss=True
)
def test_prune_input_to_static_no_train(self):
layer = LinerNetWithPruneInput(784, 1)
model_path = os.path.join(
self.temp_dir.name, "test_prune_input_to_static_no_train/model"
)
paddle.jit.save(
layer,
model_path,
input_spec=[
InputSpec(shape=[None, 784], dtype='float32', name="image")
],
)
self.verify_inference_correctness(layer, model_path, with_label=True)
def test_prune_useless_input_to_static_no_train(self):
layer = LinerNetWithUselessInput(784, 1)
model_path = os.path.join(
self.temp_dir.name,
"test_prune_useless_input_to_static_no_train/model",
)
paddle.jit.save(
layer,
model_path,
input_spec=[
InputSpec(shape=[None, 784], dtype='float32', name="image")
],
)
self.verify_inference_correctness(layer, model_path, with_label=True)
def test_no_prune_input_spec_name_warning(self):
layer = LinearNetWithInputSpec(784, 1)
train(layer)
model_path = os.path.join(
self.temp_dir.name, "test_no_prune_input_spec_name_warning/model"
)
paddle.jit.save(
layer,
model_path,
input_spec=[InputSpec(shape=[None, 784], dtype='float32')],
)
paddle.jit.save(
layer,
model_path,
input_spec=[
InputSpec(shape=[None, 784], dtype='float32', name='feed_input')
],
)
self.verify_inference_correctness(layer, model_path)
def test_not_prune_output_spec_name_warning(self):
layer = LinearNet(784, 1)
train(layer)
model_path = os.path.join(
self.temp_dir.name, "test_not_prune_output_spec_name_warning/model"
)
out = paddle.to_tensor(np.random.random((1, 1)).astype('float'))
paddle.jit.save(layer, model_path, output_spec=[out])
self.verify_inference_correctness(layer, model_path)
def test_prune_input_spec_name_error(self):
layer = LinerNetWithLabel(784, 1)
model_path = os.path.join(
self.temp_dir.name, "test_prune_input_spec_name_error/model"
)
with self.assertRaises(ValueError):
paddle.jit.save(
layer,
model_path,
input_spec=[InputSpec(shape=[None, 784], dtype='float32')],
)
with self.assertRaises(ValueError):
paddle.jit.save(
layer,
model_path,
input_spec=[
InputSpec(
shape=[None, 784], dtype='float32', name='feed_input'
)
],
)
def test_prune_output_spec_name_error(self):
layer = LinerNetWithLabel(784, 1)
train_with_label(layer)
model_path = os.path.join(
self.temp_dir.name, "test_prune_to_static_after_train/model"
)
out = paddle.to_tensor(np.random.random((1, 1)).astype('float'))
with self.assertRaises(ValueError):
paddle.jit.save(
layer,
model_path,
input_spec=[
InputSpec(shape=[None, 784], dtype='float32', name="image")
],
output_spec=[out],
)
class TestJitSaveLoadEmptyLayer(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
self.model_path = os.path.join(
self.temp_dir.name, "jit_save_load_empty_layer/model"
)
# enable dygraph mode
paddle.disable_static()
def tearDown(self):
self.temp_dir.cleanup()
def test_save_load_empty_layer(self):
layer = EmptyLayer()
x = paddle.to_tensor(np.random.random(10).astype('float32'))
out = layer(x)
paddle.jit.save(layer, self.model_path)
load_layer = paddle.jit.load(self.model_path)
load_out = load_layer(x)
np.testing.assert_array_equal(out, load_out)
class TestJitSaveLoadNoParamLayer(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
self.model_path = os.path.join(
self.temp_dir.name, "jit_save_load_no_param_layer/model"
)
# enable dygraph mode
paddle.disable_static()
def tearDown(self):
self.temp_dir.cleanup()
def test_save_load_no_param_layer(self):
layer = NoParamLayer()
x = paddle.to_tensor(np.random.random(5).astype('float32'))
y = paddle.to_tensor(np.random.random(5).astype('float32'))
out = layer(x, y)
paddle.jit.save(layer, self.model_path)
load_layer = paddle.jit.load(self.model_path)
load_out = load_layer(x, y)
np.testing.assert_array_equal(out, load_out)
class TestJitSaveLoadMultiMethods(unittest.TestCase):
def setUp(self):
# enable dygraph mode
paddle.disable_static()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_jit_save_load_inference(self):
model_path_inference = os.path.join(
self.temp_dir.name, "jit_save_load_multi_methods/model"
)
IMAGE_SIZE = 224
layer = LinearNetWithMultiStaticFunc(IMAGE_SIZE, 10)
inps = paddle.randn([1, IMAGE_SIZE])
result_origin = {}
for func in dir(layer):
if func.startswith('forward'):
result_origin[func] = getattr(layer, func, None)(inps)
paddle.jit.save(layer, model_path_inference)
load_net = paddle.jit.load(model_path_inference)
for func, result in result_origin.items():
self.assertTrue(
float(
(result - getattr(load_net, func, None)(inps)).abs().max()
)
< 1e-5
)
def test_jit_save_load_multi_methods_inputspec(self):
model_path = os.path.join(
self.temp_dir.name, 'jit_save_load_multi_methods/model'
)
layer = LinearNetWithMultiStaticFunc(784, 1)
with self.assertRaises(ValueError):
paddle.jit.save(
layer, model_path, input_spec=[InputSpec(shape=[None, 784])]
)
def test_parse_name(self):
model_path_inference = os.path.join(
self.temp_dir.name, "jit_save_load_parse_name/model"
)
IMAGE_SIZE = 224
layer = LinearNet(IMAGE_SIZE, 1)
inps = paddle.randn([1, IMAGE_SIZE])
layer(inps)
paddle.jit.save(layer, model_path_inference)
paddle.jit.save(layer, model_path_inference + '_v2')
load_net = paddle.jit.load(model_path_inference)
self.assertFalse(hasattr(load_net, 'v2'))
class LayerSaved(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self.hidden = 100
self._linear_0 = Linear(in_size, self.hidden)
self._linear_1_0 = Linear(self.hidden, self.hidden)
self._linear_1_1 = Linear(self.hidden, self.hidden)
self._linear_2 = Linear(self.hidden, out_size)
self._scale = paddle.to_tensor([9.9])
@paddle.jit.to_static
def forward(self, x):
y = self._linear_0(x)
# Multiple blocks
if paddle.shape(x)[0] == 1:
y = self._linear_1_0(y)
else:
y += self._linear_1_1(y + self._scale)
return self._linear_2(y)
class Net(paddle.nn.Layer):
def __init__(self):
super().__init__()
self.fc1 = paddle.nn.Linear(4, 4)
self.fc2 = paddle.nn.Linear(4, 4)
self.bias = 0.4
self.flag = paddle.ones([2], dtype="int32")
@paddle.jit.to_static(input_spec=[InputSpec([None, 4], dtype='float32')])
def log_softmax(self, input):
return paddle.nn.functional.log_softmax(input, axis=-1)
@paddle.jit.to_static(input_spec=[InputSpec([None, 4], dtype='float32')])
def forward(self, x):
out = self.fc1(x)
out = paddle.nn.functional.relu(out)
out = paddle.mean(out)
return out
@paddle.jit.to_static(input_spec=[InputSpec([None, 4], dtype='float32')])
def infer(self, input):
out = self.fc2(input)
out = out + self.bias
out = paddle.mean(out)
return out
# For extra Python float
@paddle.jit.to_static(property=True)
def fbias(self):
return self.bias + 1
@paddle.jit.to_static(property=True)
def down_sampling(self):
return 4
@paddle.jit.to_static(property=True)
def fstr(self):
return "save str property"
@paddle.jit.to_static(property=True)
def ints(self):
return [10, 20]
@paddle.jit.to_static(property=True)
def floats(self):
return [1.1, 2.2]
@paddle.jit.to_static(property=True)
def strs(self):
return ["hello", "world"]
class NetTensor(paddle.nn.Layer):
def __init__(self):
super().__init__()
self.fc1 = paddle.nn.Linear(4, 4)
self.fc2 = paddle.nn.Linear(4, 4)
self.bias = 0.4
self.flag = paddle.ones([2], dtype="int32")
@paddle.jit.to_static(input_spec=[InputSpec([None, 4], dtype='float32')])
def forward(self, x):
out = self.fc1(x)
out = paddle.nn.functional.relu(out)
out = paddle.mean(out)
return out
@paddle.jit.to_static(property=True)
def fflag(self):
return True
class TestJitSaveCombineProperty(unittest.TestCase):
def setUp(self):
# enable dygraph mode
paddle.disable_static()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_jit_save_combine_property(self):
model_path = os.path.join(
self.temp_dir.name, "test_jit_save_combine/model"
)
# Use new namespace
with unique_name.guard():
net = Net()
# save
paddle.jit.save(net, model_path, combine_params=True)
def test_jit_save_tensor_property(self):
model_path = os.path.join(
self.temp_dir.name, "test_jit_save_combine/model"
)
# Use new namespace
with unique_name.guard():
net = NetTensor()
paddle.jit.save(net, model_path, combine_params=True)
class LayerLoadFinetune(paddle.nn.Layer):
def __init__(self, in_size, out_size, load_path):
super().__init__()
# Test duplicate name
self._linear_0 = Linear(in_size, in_size)
self._linear_1_0 = Linear(out_size, in_size)
self._linear_1_1 = Linear(out_size, in_size)
self._linear_2 = Linear(out_size, out_size)
self._scale = paddle.to_tensor([9.9])
# Load multiple times
self._load_l1 = paddle.jit.load(load_path)
self._load_l2 = paddle.jit.load(load_path)
@paddle.jit.to_static
def forward(self, x):
y = self._linear_0(x)
y = self._load_l1(y)
# Multiple blocks
if paddle.shape(x)[0] == 1:
y = self._linear_1_0(y)
y = self._load_l1(y)
else:
y += self._linear_1_1(x + self._scale)
y = self._load_l2(y)
y = self._linear_1_0(y)
y = self._load_l1(y)
y = self._linear_1_0(y)
# Use the same layer multiple times.
y = self._load_l1(y)
return y
class TestJitSaveLoadSaveWithoutRunning(unittest.TestCase):
def setUp(self):
# enable dygraph mode
paddle.disable_static()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_save_load_finetune_load(self):
model_path = os.path.join(
self.temp_dir.name, "test_jit_save_load_save_without_running/model"
)
IMAGE_SIZE = 224
inps0 = paddle.randn([1, IMAGE_SIZE])
inps1 = paddle.randn([2, IMAGE_SIZE])
# Use new namespace
with unique_name.guard():
layer_save = LayerSaved(IMAGE_SIZE, IMAGE_SIZE)
# save
paddle.jit.save(
layer_save,
model_path,
input_spec=[
paddle.static.InputSpec(
shape=[None, IMAGE_SIZE], dtype='float32'
)
],
)
result_00 = layer_save(inps0)
result_01 = layer_save(inps1)
# load and save without running
with unique_name.guard():
layer_load = paddle.jit.load(model_path)
paddle.jit.save(
layer_load,
model_path,
input_spec=[
paddle.static.InputSpec(
shape=[None, IMAGE_SIZE], dtype='float32'
)
],
)
# reload
layer_reload = paddle.jit.load(model_path)
result_10 = layer_reload(inps0)
result_11 = layer_reload(inps1)
self.assertTrue(float((result_00 - result_10).abs().max()) < 1e-5)
self.assertTrue(float((result_01 - result_11).abs().max()) < 1e-5)
class TestJitSaveLoadFinetuneLoad(unittest.TestCase):
def setUp(self):
# enable dygraph mode
paddle.disable_static()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_save_load_finetune_load(self):
model_path = os.path.join(
self.temp_dir.name, "test_jit_save_load_finetune_load/model"
)
IMAGE_SIZE = 224
inps0 = paddle.randn([1, IMAGE_SIZE])
inps1 = paddle.randn([2, IMAGE_SIZE])
# Use new namespace
with unique_name.guard():
layer_save = LayerSaved(IMAGE_SIZE, IMAGE_SIZE)
layer_save(inps0)
# save
paddle.jit.save(layer_save, model_path)
# load
with unique_name.guard():
layer_load = LayerLoadFinetune(IMAGE_SIZE, IMAGE_SIZE, model_path)
# train
train(layer_load, input_size=IMAGE_SIZE)
result_00 = layer_load(inps0)
result_01 = layer_load(inps1)
# save
paddle.jit.save(layer_load, model_path)
# load
layer_finetune = paddle.jit.load(model_path)
result_10 = layer_finetune(inps0)
result_11 = layer_finetune(inps1)
self.assertTrue(float((result_00 - result_10).abs().max()) < 1e-5)
self.assertTrue(float((result_01 - result_11).abs().max()) < 1e-5)
# NOTE(weixin): When there are multiple test functions in an
# `unittest.TestCase`, functions will affect each other,
# and there is a risk of random failure.
# So divided into three TestCase: TestJitSaveLoadFunctionCase1,
# TestJitSaveLoadFunctionCase2, TestJitSaveLoadFunctionCase3.
class TestJitSaveLoadFunctionCase1(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_jit_save_load_static_function(self):
@paddle.jit.to_static
def fun(inputs):
return paddle.tanh(inputs)
path = os.path.join(
self.temp_dir.name, 'test_jit_save_load_function_1/func'
)
inps = paddle.rand([3, 6])
origin = fun(inps)
paddle.jit.save(fun, path)
load_func = paddle.jit.load(path)
load_result = load_func(inps)
self.assertTrue((load_result - origin).abs().max() < 1e-10)
class TestJitSaveLoadFunctionCase2(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_jit_save_load_function_input_spec(self):
@paddle.jit.to_static(
input_spec=[
InputSpec(shape=[None, 6], dtype='float32', name='x'),
]
)
def fun(inputs):
return paddle.nn.functional.relu(inputs)
path = os.path.join(
self.temp_dir.name, 'test_jit_save_load_function_2/func'
)
inps = paddle.rand([3, 6])
origin = fun(inps)
paddle.jit.save(fun, path)
load_func = paddle.jit.load(path)
load_result = load_func(inps)
self.assertTrue((load_result - origin).abs().max() < 1e-10)
class TestJitSaveLoadFunctionCase3(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_jit_save_load_function_function(self):
def fun(inputs):
return paddle.tanh(inputs)
path = os.path.join(
self.temp_dir.name, 'test_jit_save_load_function_3/func'
)
inps = paddle.rand([3, 6])
origin = fun(inps)
paddle.jit.save(
fun,
path,
input_spec=[
InputSpec(shape=[None, 6], dtype='float32', name='x'),
],
)
load_func = paddle.jit.load(path)
load_result = load_func(inps)
self.assertTrue((load_result - origin).abs().max() < 1e-10)
class TestJitSaveLoadFunctionWithParamCase1(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_jit_save_load_function(self):
class LinearNet(paddle.nn.Layer):
def __init__(self):
super().__init__()
self._linear = paddle.nn.Linear(5, 6)
def forward(self, x):
return paddle.tanh(x)
def anothor_forward(self, x):
return self._linear(x)
layer = LinearNet()
inps = paddle.rand([3, 5])
origin = layer.anothor_forward(inps)
func = paddle.jit.to_static(
layer.anothor_forward, [paddle.static.InputSpec(shape=[-1, 5])]
)
path = os.path.join(
self.temp_dir.name,
'test_jit_save_load_function_with_params_case1/func',
)
paddle.jit.save(func, path)
load_func = paddle.jit.load(path)
load_result = load_func(inps)
np.testing.assert_array_equal(load_result.numpy(), origin.numpy())
class TestJitSaveLoadFunctionWithParamCase2(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_jit_save_load_function(self):
class LinearNet(paddle.nn.Layer):
def __init__(self):
super().__init__()
self._linear = paddle.nn.Linear(5, 6)
def forward(self, x):
return paddle.tanh(x)
@paddle.jit.to_static(input_spec=[InputSpec(shape=[-1, 5])])
def anothor_forward(self, x):
return self._linear(x)
layer = LinearNet()
inps = paddle.rand([3, 5])
path = os.path.join(
self.temp_dir.name,
'test_jit_save_load_function_with_params_case2/func',
)
paddle.jit.save(layer.anothor_forward, path)
origin_result = layer.anothor_forward(inps)
load_func = paddle.jit.load(path)
load_result = load_func(inps)
np.testing.assert_array_equal(
origin_result.numpy(), load_result.numpy()
)
class TestJitSaveLoadFunctionWithParamCase3(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_jit_save_load_function(self):
class LinearNet(paddle.nn.Layer):
def __init__(self):
super().__init__()
self._linear = paddle.nn.Linear(5, 6)
def forward(self, x):
return paddle.tanh(x)
@paddle.jit.to_static
def anothor_forward(self, x):
return self._linear(x)
layer = LinearNet()
inps = paddle.rand([3, 5])
origin = layer.anothor_forward(inps)
path = os.path.join(
self.temp_dir.name,
'test_jit_save_load_function_with_params_case3/func',
)
paddle.jit.save(layer.anothor_forward, path)
load_func = paddle.jit.load(path)
load_result = load_func(inps)
np.testing.assert_array_equal(load_result.numpy(), origin.numpy())
class TestJitSaveLoadDataParallel(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def verify_inference_correctness(self, layer, path):
layer.eval()
loaded_layer = paddle.jit.load(path)
loaded_layer.eval()
# inference & compare
x = paddle.to_tensor(np.random.random((1, 784)).astype('float32'))
pred = layer(x).numpy()
loaded_pred = loaded_layer(x).numpy()
np.testing.assert_array_equal(
pred,
loaded_pred,
err_msg='Result diff when load and inference:\nlayer result:\n{}\nloaded layer result:\n{}'.format(
pred, loaded_pred
),
)
def test_jit_save_data_parallel_with_inputspec(self):
layer = LinearNetNotDeclarative(784, 1)
layer = paddle.DataParallel(layer)
path = os.path.join(
self.temp_dir.name, "jit_save_data_parallel_with_inputspec/model"
)
paddle.jit.save(
layer=layer, path=path, input_spec=[InputSpec(shape=[None, 784])]
)
self.verify_inference_correctness(layer, path)
def test_jit_save_data_parallel_with_to_static(self):
layer = LinearNetWithInputSpec(784, 1)
layer = paddle.DataParallel(layer)
path = os.path.join(
self.temp_dir.name, "jit_save_data_parallel_with_to_static/model"
)
paddle.jit.save(layer, path)
self.verify_inference_correctness(layer, path)
class InputSepcLayer(paddle.nn.Layer):
'''
A layer with InputSpec to test InputSpec compatibility
'''
@paddle.jit.to_static(
input_spec=[
InputSpec(shape=[None, 8], dtype='float32', name='x'),
InputSpec(shape=[None, 1], dtype='float64', name='y'),
]
)
def forward(self, x, y):
return x, y
class TestInputSpecCompatibility(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def _assert_input_spec_layer_return(self, expect_layer, test_layer):
input_x = paddle.uniform([8, 8], dtype='float32')
input_y = paddle.uniform([8, 1], dtype='float64')
expected_result = expect_layer(input_x, input_y)
test_result = test_layer(input_x, input_y)
np.testing.assert_allclose(
expected_result[0].numpy(), test_result[0].numpy()
)
np.testing.assert_allclose(
expected_result[1].numpy(), test_result[1].numpy()
)
def test_jit_save_compatible_input_sepc(self):
layer = InputSepcLayer()
save_dir = os.path.join(
self.temp_dir.name, "jit_save_compatible_input_spec"
)
path = save_dir + "/model"
paddle.jit.save(layer=layer, path=path)
no_input_spec_layer = paddle.jit.load(path)
self._assert_input_spec_layer_return(layer, no_input_spec_layer)
shutil.rmtree(save_dir)
paddle.jit.save(
layer=layer,
path=path,
input_spec=[
InputSpec(shape=[None, 8], dtype='float32', name='x'),
InputSpec(shape=[None, 1], dtype='float64', name='y'),
],
)
same_input_spec_layer = paddle.jit.load(path)
self._assert_input_spec_layer_return(layer, same_input_spec_layer)
shutil.rmtree(save_dir)
paddle.jit.save(
layer=layer,
path=path,
input_spec=[
InputSpec(shape=[8, 8], dtype='float32'),
InputSpec(shape=[8, -1], dtype='float64'),
],
)
compatible_input_spec_layer = paddle.jit.load(path)
self._assert_input_spec_layer_return(layer, compatible_input_spec_layer)
shutil.rmtree(save_dir)
def test_jit_save_incompatible_input_sepc(self):
layer = InputSepcLayer()
save_dir = os.path.join(
self.temp_dir.name, "jit_save_compatible_input_spec"
)
path = save_dir + "/model"
with self.assertRaises(ValueError):
# type mismatch
paddle.jit.save(
layer=layer,
path=path,
input_spec=[
InputSpec(shape=[None, 8], dtype='float64'),
InputSpec(shape=[None, 1], dtype='float64'),
],
)
with self.assertRaises(ValueError):
# shape len mismatch
paddle.jit.save(
layer=layer,
path=path,
input_spec=[
InputSpec(shape=[None, 8, 1], dtype='float32'),
InputSpec(shape=[None, 1], dtype='float64'),
],
)
with self.assertRaises(ValueError):
# shape mismatch
paddle.jit.save(
layer=layer,
path=path,
input_spec=[
InputSpec(shape=[None, 8], dtype='float32'),
InputSpec(shape=[None, 2], dtype='float64'),
],
)
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
class NotJitForward(paddle.nn.Layer):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y
class TestNotJitForward(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_jit_not_save_forward(self):
layer = NotJitForward()
save_dir = os.path.join(self.temp_dir.name, "jit_not_save_forward")
path = save_dir + "/model"
paddle.jit.save(layer=layer, path=path, skip_forward=True)
self.assertTrue(not os.path.exists(path + ".pdmodel"))
self.assertTrue(not os.path.exists(path + ".pdparam"))
with self.assertRaises(ValueError):
paddle.jit.load(path=path)
shutil.rmtree(save_dir)
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | noreply@github.com |
628a26377a4ac11054ec002268c2916d3883eccf | 601e6891504cc9da063e3ef9993e7b5f142bbe35 | /examples/wifiStationsAndHosts.py | 8e68f22b85c8a2a65ee1968f5506555a2831560b | [] | no_license | caiqiqi/mininet-wifi | b8a13f83e4fbadea20865faecf6719abf8e68437 | 547cf3c01d85b9bfb38b3e9df3b5c52119b5b5e2 | refs/heads/master | 2021-01-20T16:44:34.270734 | 2016-05-16T12:55:56 | 2016-05-16T12:55:56 | 58,878,807 | 0 | 0 | null | 2016-05-15T19:01:01 | 2016-05-15T19:01:01 | null | UTF-8 | Python | false | false | 1,177 | py | #!/usr/bin/python
"""
This example shows how work with wireless and wired media
"""
from mininet.net import Mininet
from mininet.node import Controller, OVSKernelSwitch
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.link import TCLink
def topology():
"Create a network."
net = Mininet( controller=Controller, link=TCLink, switch=OVSKernelSwitch )
print "*** Creating nodes"
ap1 = net.addBaseStation( 'ap1', ssid="simplewifi", mode="g", channel="5" )
sta1 = net.addStation( 'sta1', ip='192.168.0.1/24' )
sta2 = net.addStation( 'sta2', ip='192.168.0.2/24' )
h3 = net.addHost( 'h3', ip='192.168.0.3/24' )
h4 = net.addHost( 'h4', ip='192.168.0.4/24' )
c0 = net.addController('c0', controller=Controller, ip='127.0.0.1' )
print "*** Adding Link"
net.addLink(sta1, ap1)
net.addLink(sta2, ap1)
net.addLink(h3, ap1)
net.addLink(h4, ap1)
print "*** Starting network"
net.build()
c0.start()
ap1.start( [c0] )
print "*** Running CLI"
CLI( net )
print "*** Stopping network"
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
topology()
| [
"ramonreisfontes@gmail.com"
] | ramonreisfontes@gmail.com |
3d3ef2ae01cfb64ab347fd0b2059de334af5e8a0 | 0953fdab77be774dc26e933735022e24b0035610 | /src/shop/migrations/0009_auto_20210828_1650.py | df6f9c6bf37eac8def20368d30f9f4e2cfdd7bc0 | [] | no_license | arindamdas612/bigtfox-django-app | 99a682f1708ad622800210c64e1c459c6de6eec7 | 7a4bc437647ddc0b3a0db9481a28bfd4da2171a6 | refs/heads/main | 2023-08-26T01:33:58.955268 | 2021-10-12T17:58:55 | 2021-10-12T17:58:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | # Generated by Django 3.2.6 on 2021-08-28 11:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0008_productattribute'),
]
operations = [
migrations.AddField(
model_name='category',
name='abr',
field=models.CharField(default='XXX', max_length=4),
preserve_default=False,
),
migrations.AddField(
model_name='primarycategory',
name='abr',
field=models.CharField(default='XXX', max_length=4),
preserve_default=False,
),
]
| [
"arindam@Arindams-MacBook-Pro.local"
] | arindam@Arindams-MacBook-Pro.local |
cc6f0d63c7fdb76f60da7a6fd13f80a6561bb2a2 | 96464e20c680bd9b7d8d9f4cc3668b338e92b949 | /Code/tree_project/amazons_game.py | d13f3bc95a77278f137aa060a87866f60d22fa86 | [] | no_license | SWHarrison/CS-2.1-Trees-Sorting | 3a5bc44f30ca9ad8cb2e70546a870ad66f0ad3a9 | 9da835d136c01445c65579c864289c549ecf84a1 | refs/heads/master | 2020-08-26T20:13:41.051958 | 2019-12-17T01:28:05 | 2019-12-17T01:28:05 | 217,133,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,721 | py | class gameBoard:
def __init__(self, size = 5, pieces = 2):
self.board = []
for rows in range(size):
new_row = []
for columns in range(size):
new_row.append(" ")
self.player1_pieces = player_pieces(size,1)
self.player2_pieces = player_pieces(size,2)
for piece in self.player1_pieces
self.board[piece[0]][piece[1]] = "1"
for piece in self.player2_pieces:
self.board[piece[0]][piece[1]] = "2"
def __repr__(self):
"""Return a string representation of this prefix tree."""
to_return = "-" * len(self.board) + "\n"
for r in range(len(self.board)):
to_return += "|"
for c in range(len(self.board)):
to_return += self.board[r][c] + "|"
to_return += "\n" + "-" * len(self.board)
return to_return
def all_available_moves(player_num):
pieces = None
if(player_num == 1):
pieces = self.player1_pieces
else:
pieces = self.player2_pieces
moves = []
for piece in pieces:
moves.append(self.piece_moves(piece))
def piece_moves(piece):
if board[piece[0]][piece[1]] == " ":
#Raise ValueError
print("ERROR")
return
moves = []
current_loc = [piece[0],piece[1]]
current_loc[1] += 1
# up moves
while current_loc[1] < len(self.board) and board[current_loc[0]][current_loc[1]] == " ":
moves.append(current_loc)
current_loc[1] += 1
current_loc = [piece[0],piece[1]]
current_loc[1] -= 1
# down moves
while current_loc[1] >= 0 and board[current_loc[0]][current_loc[1]] == " ":
moves.append(current_loc)
current_loc[1] -= 1
current_loc = [piece[0],piece[1]]
current_loc[0] -= 1
# left moves
while current_loc[0] >= 0 and board[current_loc[0]][current_loc[1]] == " ":
moves.append(current_loc)
current_loc[0] -= 1
current_loc = [piece[0],piece[1]]
current_loc[0] += 1
# right moves
while current_loc[0] < len(self.board) and board[current_loc[0]][current_loc[1]] == " ":
moves.append(current_loc)
current_loc[0] += 1
return moves
class player_pieces:
def __init__(self,size,player_num,pieces = 2):
self.player_pieces
if(player_num == 1):
self.player_pieces = [[0,0],[0,size-1]]
else:
self.player_pieces = [[size-1,0],[size-1,size-1]]
if __name__ == '__main__':
game = gameBoard()
print(game)
| [
"samuel.harrison@students.makeschool.com"
] | samuel.harrison@students.makeschool.com |
3f62be4bee13a60402b2cf034c3b14ae9504a016 | 68bdd0187747709c2df5258b5b8afb2af339e5c9 | /db_create.py | a1d96c7cb088512cc4fa2856596fa61f06f4ff58 | [
"MIT"
] | permissive | katmentecki/fiction | 3240bed4d4b6a154e1dc9a1b255cfede9d30700d | 05d3ef59959418805bf6f67d0c35c232ad65880c | refs/heads/master | 2021-01-19T18:17:50.898813 | 2016-11-16T22:20:08 | 2016-11-16T22:20:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | from application import db
from application.models import *
db.drop_all()
db.create_all()
print("DB created.")
| [
"lavin@pitt.edu"
] | lavin@pitt.edu |
11f6638b642a36c6cd899731274fcab13d234a23 | 14d35454bacfb77b2efd1e70700408ac270e3d58 | /Faire/lib.py | 9b4b1b87b7340cc8526b95a37e69acebd6fd05ee | [] | no_license | yrjie/genomeBackup | e1e17dfa402bdeee60d0a94f2c490e91f2ba3a58 | 3c42ab3eca16954fe438c0e5f56bf6ad135a9a18 | refs/heads/master | 2021-01-23T22:43:23.945250 | 2015-10-17T16:26:32 | 2015-10-17T16:26:32 | 26,046,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89,898 | py | #!/usr/bin/env python
import os,sys,re
from rpy2.robjects import r,FloatVector,StrVector
from glob import glob
from wig import Wig
from wigs import Wigs
from reads import reads
import numpy
from copy import deepcopy
from math import log10
#from functions import merge_peaks_by_head_tail_distance
def batchOccInRegions(wgs,outname=None,groupname='',outmode='w',chrColID=1,nameColID=0,startColIDpos=3,startColIDneg=4,endColIDpos=4,endColIDneg=3,straColID=2,sep='\t',second_sep=None,step=0,\
lines=None,heatMap=True,flankup=3000,flankdn=3000,vcal='mean',excludeP=1,region_size=3000):
'''
parameters:
wgs: the Wigs class object
'''
#calculate average wiggle density in regions and their flanking regions,e.g., gene
#for gene body: occInRegions(wg=wg,chrColID=1,nameColID=0,startColIDpos=3,startColIDneg=4,endColIDpos=4,endColIDneg=3,straColID=2,step=1000,sep='\t',second_sep=None,\
# lines=lines,heatmapname=None,avgcurvename=None,flankup=1000000,flankdn=1000000,vcal='mean',excludeP=0.01,bin_count=100)
#vcal: the method to calculate plot value, could be median or mean
keys=wgs.keys()
keys.sort()
if len(keys)<1:
print 'at least one wiggle data need to be specified!\n'
return
if step<1:
steps={}
for k in keys:steps[wgs.get(k).step]=1
steps=steps.keys()
if len(steps)>1:
steps.sort()
print 'step sizes in wiggles are not the same, will be set to a common step size',steps[0]
step=steps[0]
else:step=steps[0]
dic={}
for k in keys:
print '\ncalculating for ',k,'...'
wg=wgs.get(k)
tHeatMapName=None
#if heatMap:
#if groupname!=None:tHeatMapName=os.path.join(os.path.split(groupname)[0],os.path.split(groupname)[-1]+'_'+os.path.split(k)[-1]+'.heatmap')
if outname!=None:tHeatMapName=outname+'_heatmap'
else:tHeatMapName='heatmap'
if not os.path.isdir(tHeatMapName):os.mkdir(tHeatMapName)
if groupname!=None:tHeatMapName=os.path.join(tHeatMapName,os.path.split(groupname)[-1]+'.'+os.path.split(k)[-1]+'.heatmap')
else:tHeatMapName=k+'.heatmap'
dic[k]=occInRegions(wg=wg,chrColID=chrColID,nameColID=nameColID,startColIDpos=startColIDpos,startColIDneg=startColIDneg,endColIDpos=endColIDpos,endColIDneg=endColIDneg,straColID=straColID,step=step,sep=sep,second_sep=second_sep,lines=lines,heatmapname=tHeatMapName,avgcurvename=outname,flankup=flankup,flankdn=flankdn,vcal=vcal,excludeP=excludeP,region_size=region_size)
#print outmode,'outmode'
if outname!=None:
if outmode!='w':
fi=open(outname+'.xls')
lines=fi.readlines()
lines[0]=lines[0][:-1]
fi.close()
else:
lines=['pos']
#print outname
#print lines[0]
for k in keys:lines[0]+='\t'+k+'.'+groupname
#print lines[0]
fo=open(outname+'.xls','w')
fo.write(lines[0]+'\n')
poses=dic[keys[0]].keys()
poses.sort()
if outmode!='w' and len(poses)!=len(lines)-1:return {}
for i in range(len(poses)):
p=poses[i]
if outmode=='w':
if p<region_size:oline=str(p)
else:oline='+'+str(p-region_size)
else:oline=lines[i+1][:-1]
for k in keys:oline+='\t'+str(dic[k][p])
fo.write(oline+'\n')
fo.close()
return dic
def batchOccAroundPoints(wgs,outname=None,groupname='',outmode='w',chrColID=1,nameColID=0,posColIDpos=3,posColIDneg=4,straColID=2,sep='\t',second_sep=None,step=0,lines=None,heatMap=True,flankup=3000,flankdn=3000,vcal='mean',excludeP=1):
'''
parameters:
wgs: the Wigs class object
'''
#calculate average wiggle density along the flanking regions of Transcription Start sites, Transcription Terminal Sites, or exon/intron junctions
#for TSS:chrColID=1,nameColID=0,posColIDpos=3,posColIDneg=4,straColID=2,sep='\t',second_sep=None,
#for TTS:chrColID=1,nameColID=0,posColIDpos=4,posColIDneg=3,straColID=2,sep='\t',second_sep=None,
#for CDS_SS:chrColID=1,nameColID=0,posColIDpos=5,posColIDneg=6,straColID=2,sep='\t',second_sep=None,
#for CDS_TS:chrColID=1,nameColID=0,posColIDpos=6,posColIDneg=5,straColID=2,sep='\t',second_sep=None,
#for exonSS:chrColID=1,nameColID=0,posColIDpos=8,posColIDneg=9,straColID=2,sep='\t',second_sep=',',
#for exonTs:chrColID=1,nameColID=0,posColIDpos=9,posColIDneg=8,straColID=2,sep='\t',second_sep=',',
keys=wgs.keys()
keys.sort()
if len(keys)<1:
print 'at least one wiggle data need to be specified!\n'
return
if step<1:
steps={}
for k in keys:steps[wgs.get(k).step]=1
steps=steps.keys()
if len(steps)>1:
steps.sort()
print 'step sizes in wiggles are not the same, will be set to a common step size',steps[0]
step=steps[0]
else:step=steps[0]
dic={}
for k in keys:
print '\ncalculating for ',k,'...'
wg=wgs.get(k)
tHeatMapName=None
#if heatMap:
#if groupname!=None:tHeatMapName=os.path.join(os.path.split(groupname)[0],os.path.split(groupname)[-1]+'_'+os.path.split(k)[-1]+'.heatmap')
if outname!=None:tHeatMapName=outname+'_heatmap'
else:tHeatMapName='heatmap'
if not os.path.isdir(tHeatMapName):os.mkdir(tHeatMapName)
if groupname!=None:tHeatMapName=os.path.join(tHeatMapName,os.path.split(groupname)[-1]+'.'+os.path.split(k)[-1]+'.heatmap')
else:tHeatMapName=k+'.heatmap'
dic[k]=occAroundPoints(wg=wg,chrColID=chrColID,nameColID=nameColID,posColIDpos=posColIDpos,posColIDneg=posColIDneg,straColID=straColID,step=step,sep=sep,second_sep=second_sep,lines=lines,heatmapname=tHeatMapName,avgcurvename=None,flankup=flankup,flankdn=flankdn,vcal=vcal,excludeP=excludeP)
#dic[k]=occAroundPoints(wg=wg,chrColID=chrColID,nameColID=nameColID,posColIDpos=posColIDpos,posColIDneg=posColIDneg,straColID=straColID,step=step,sep=sep,second_sep=second_sep,lines=lines,heatmapname=tHeatMapName,avgcurvename=outname,flankup=flankup,flankdn=flankdn,vcal=vcal,excludeP=excludeP)
if outname!=None:
if outmode!='w':
fi=open(outname+'.xls')
lines=fi.readlines()
lines[0]=lines[0][:-1]
fi.close()
else:
lines=['pos']
for k in keys:lines[0]+='\t'+k+'.'+groupname
fo=open(outname+'.xls','w')
fo.write(lines[0]+'\n')
poses=dic[keys[0]].keys()
poses.sort()
if outmode!='w' and len(poses)!=len(lines)-1:return {}
for i in range(len(poses)):
p=poses[i]
if outmode=='w':oline=str(p)
else:oline=lines[i+1][:-1]
for k in keys:oline+='\t'+str(dic[k][p])
fo.write(oline+'\n')
fo.close()
return dic
def batchOccPSD(wgs,outname=None):
keys=wgs.keys()
if len(keys)<1:
print 'at least one wiggle data need to be specified!\n'
return
steps={}
for k in keys:
print 'calculating for',k,'...'
steps[wgs.get(k).step]=1
steps=steps.keys()
if len(steps)>1:
steps.sort()
print 'step sizes in wiggles are not the same, will be set to a common step size',steps[0]
for k in keys:wgs.get(k).changeStep(step=steps[0])
dic={}
for k in keys:dic[k]=occPSD(wgs.get(k),outname=None)
if outname!=None:
fo=open(outname+'.xls','w')
fo.write('Periodicity\t'+'\t'.join(keys)+'\n')
poses=dic[keys[0]].keys()
poses.sort()
for i in poses:
oline=str(i)
for k in keys:oline+='\t'+str(dic[k][i])
fo.write(oline+'\n')
return dic
def batchPositionDistanceDistribution(data,outname=None,min=100,max=250,step=1):#={'name':[]},outname=None,crColID=0,posColID=3,min=100,max=250,step=1):
keys=data.keys()
dic={}
for k in keys:dic[k]=positionDistanceDistribution(dic=data[k],min=min,max=max,step=step)#lines=dic[k],outname=None,crColID=crColID,posColID=posColID,min=min,max=max,step=step)
if outname!=None:
fo=open(outname+'.xls','w')
fo.write('Distance\t'+'\t'.join(keys)+'\n')
poses=dic[keys[0]].keys()
poses.sort()
for i in poses:
oline=str(i)
for k in keys:oline+='\t'+str(dic[k][i])
fo.write(oline+'\n')
return dic
def batchPositionValDistribution(data,outname=None,min=0,max=1500,step=3):
keys=data.keys()
dic={}
for k in keys:dic[k]=positionValDistribution(dic=data[k],outname=None,min=min,max=max,step=step)
if outname!=None:
fo=open(outname+'.xls','w')
fo.write('Value\t'+'\t'.join(keys)+'\n')
poses={}
for name in dic:#[keys[0]].keys()
for pos in dic[name]:poses[pos]=0
poses=poses.keys()
poses.sort()
for i in poses:
oline=str(i)
for k in keys:
if not dic[k].has_key(i):dic[k][i]=0
oline+='\t'+str(dic[k][i])
fo.write(oline+'\n')
return dic
def batchPositionAroundPoints(smts,outname=None,flankup=2500,flankdn=2500,step=10,chrColID=1,nameColID=0,posColIDpos=3,posColIDneg=4,straColID=2,sep='\t',\
second_sep=None,lines=[]):
dic={}
keys=smts.keys()
keys.sort()
for k in smts:dic[k]=positionAroundPoint(smts[k],outname=outname,flankup=flankup,flankdn=flankdn,step=step,chrColID=chrColID,nameColID=nameColID,\
posColIDpos=posColIDpos,posColIDneg=posColIDneg,straColID=straColID,sep=sep,second_sep=second_sep,\
lines=[])
if outname!=None:
fo=open(outname+'.xls','w')
fo.write('pos\t'+'\t'.join(keys)+'\n')
poses=dic[keys[0]].keys()
poses.sort()
for i in poses:
oline=str(i)
for k in keys:oline+='\t'+str(dic[k][i])
fo.write(oline+'\n')
return dic
def occAroundPoints(wg,chrColID,nameColID,posColIDpos,posColIDneg,straColID,step=0,sep='\t',second_sep=None,\
lines=[],heatmapname=None,avgcurvename=None,flankup=3000,flankdn=3000,vcal='mean',excludeP=0):
#calculate average wiggle density along the flanking regions of Transcription Start sites, Transcription Terminal Sites, or exon/intron junctions
#for TSS:chrColID=1,nameColID=0,posColIDpos=3,posColIDneg=4,straColID=2,sep='\t',second_sep=None,
#for TTS:chrColID=1,nameColID=0,posColIDpos=4,posColIDneg=3,straColID=2,sep='\t',second_sep=None,
#for CDS_SS:chrColID=1,nameColID=0,posColIDpos=5,posColIDneg=6,straColID=2,sep='\t',second_sep=None,
#for CDS_TS:chrColID=1,nameColID=0,posColIDpos=6,posColIDneg=5,straColID=2,sep='\t',second_sep=None,
#for exonSS:chrColID=1,nameColID=0,posColIDpos=8,posColIDneg=9,straColID=2,sep='\t',second_sep=',',
#for exonTs:chrColID=1,nameColID=0,posColIDpos=9,posColIDneg=8,straColID=2,sep='\t',second_sep=',',
#vcal: the method to calculate plot value, could be median or mean
if step<1:step=wg.step
else:wg.changeStep(step=step)
flankup/=step
flankdn/=step
if heatmapname==None:heatmapname='heatmapname'
outf=open(heatmapname+'.xls',"w")
outf.write('name\tmax\tmin\tsum')
for i in range(0-flankup,flankdn):outf.write('\t'+str(i))
outf.write('\n')
#if avgcurvename==None:avgcurvename='avgcurvename'
#outf2=open(avgcurvename+'.xls',"w")
#outf2.write('pos\tvalue\n')
#lst=resize([0.0],flankup+flankdn)
lst={}
for i in range(flankup+flankdn):lst[i]=[]
num=0
for line in lines:
if line[:-1]=='\n':line=line[:-1]
col=line.split(sep)
chr,name,stra=col[chrColID],col[nameColID],col[straColID]
if not wg.data.has_key(chr):continue
if stra=='+':
if second_sep!=None:poses=col[posColIDpos].split(second_sep)
else:poses=[col[posColIDpos]]
#print poses,line
elif stra=='-':
if second_sep!=None:poses=col[posColIDneg].split(second_sep)
else:poses=[col[posColIDneg]]
for pos in poses:
if pos=='':continue
tlst=[0.0]*(flankup+flankdn)
tss=int(pos)/step
if stra=='+':
for i in range(0-flankup,flankdn):
try:tlst[flankup+i]=wg.data[chr][tss+i]
except:continue
else:
for i in range(1-flankdn,flankup+1):
try:tlst[flankup-i]=wg.data[chr][tss+i]
except:continue
regionmax,regionmin,regionsum=max(tlst),min(tlst),sum(tlst)
ol=name+'\t'+'\t'.join([str(regionmax),str(regionmin),str(regionsum)])
for i in range(flankup+flankdn):ol+='\t'+str(tlst[i])
outf.write(ol+'\n')
num+=1
outf.close()
print 'caculating average curve ...'
dic={}
vec=numpy.array([0.0])
vec.resize(num,refcheck=0)
for i in range(4,flankup+flankdn+4):
vec=vec*0
fi=open(heatmapname+'.xls')
fi.readline()
ln=0
for line in fi:
vec[ln]=float(line.split()[i])
ln+=1
fi.close()
vec.sort()
if vcal=='mean':
s=sum(vec[int(num*excludeP):int(num-num*excludeP)])*1.0
v=s/(num-num*excludeP*2)#len(vec)
elif vcal=='median':v=vec[num/2]
#outf2.write(str((i-flankup-4)*step)+'\t'+str(v)+'\n')
dic[(i-flankup-4)*step]=v
print ''
return dic
def occInRegions(wg,chrColID,nameColID,startColIDpos,startColIDneg,endColIDpos,endColIDneg,straColID,step=0,sep='\t',second_sep=None,\
lines=[],heatmapname=None,avgcurvename=None,flankup=3000,flankdn=3000,vcal='mean',excludeP=0,region_size=3000):
#calculate average wiggle density in regions and their flanking regions,e.g., gene
#for gene body: occInRegions(wg=wg,chrColID=1,nameColID=0,startColIDpos=3,startColIDneg=4,endColIDpos=4,endColIDneg=3,straColID=2,step=1000,sep='\t',second_sep=None,\
# lines=lines,heatmapname=None,avgcurvename=None,flankup=1000000,flankdn=1000000,vcal='mean',excludeP=0.01,bin_count=100)
#vcal: the method to calculate plot value, could be median or mean
#print flankup,flankdn
owg=deepcopy(wg)
ostep=owg.step
if step<1:step=wg.step
else:wg.changeStep(step=step)
flankup/=step
flankdn/=step
bin_count=region_size/step
if heatmapname==None:heatmapname='heatmap'
outf=open(heatmapname+'.xls',"w")
outf.write('name\tmax\tmin\tsum')
#if bin_count<max(flankup,flankdn):bin_count=max(flankup,flankdn)
for i in range(0-flankup,0):outf.write('\t'+str(i))
for i in range(0,bin_count):outf.write('\t'+str(i))
for i in range(0,flankdn):outf.write('\t+'+str(i+bin_count))
outf.write('\n')
if avgcurvename==None:avgcurvename='avgcurvename'
#outf2=open(avgcurvename+'.xls',"w")
#outf2.write('pos\tvalue\n')
#lst={}
#for i in range(flankup+flankdn+bin_count):lst[i]=[]
num=0
tlst=numpy.array([0.0])
tlst.resize(flankup+flankdn+bin_count,refcheck=0)
for line in lines:
if line[:-1]=='\n':line=line[:-1]
col=line.split(sep)
chr,name,stra=col[chrColID],col[nameColID],col[straColID]
if not wg.data.has_key(chr):continue
if stra=='+':
if second_sep!=None:starts,ends=col[startColIDpos].split(second_sep),col[endColIDpos].split(second_sep)
else:starts,ends=[col[startColIDpos]],[col[endColIDpos]]
elif stra=='-':
if second_sep!=None:starts,ends=col[startColIDneg].split(second_sep),col[endColIDneg].split(second_sep)
else:starts,ends=[col[startColIDneg]],[col[endColIDneg]]
id,lth=0,len(starts)
if lth!=len(ends):continue
while id<lth:
tlst=tlst*0
if starts[id]=='':continue
tss,tes,otss,otes=int(starts[id])/step,int(ends[id])/step,int(starts[id])/ostep,int(ends[id])/ostep
if stra=='+':
for i in range(0-flankup,0):
try:tlst[flankup+i]=wg.data[chr][tss+i]
except:continue
bstep=(otes-otss)*1.0/bin_count
for i in range(0,bin_count):
try:tlst[flankup+i]=owg.data[chr][otss+int(i*bstep)]
except:continue
for i in range(0,flankdn):
try:tlst[flankup+bin_count+i]=wg.data[chr][tes+i]
except:continue
else:
for i in range(0-flankup,0):
try:tlst[flankup+i]=wg.data[chr][tss-i]
except:continue
bstep=(otss-otes)*1.0/bin_count
for i in range(0,bin_count):
try:tlst[flankup+i]=owg.data[chr][otss-int(i*bstep)]
except:continue
for i in range(0,flankdn):
try:tlst[flankup+bin_count+i]=wg.data[chr][tes-i]
except:continue
regionmax,regionmin,regionsum=max(tlst),min(tlst),sum(tlst)
#if heatmapname!=None:
ol=name+'\t'+'\t'.join([str(regionmax),str(regionmin),str(regionsum)])
for i in range(flankup+flankdn+bin_count):ol+='\t'+str(tlst[i])
outf.write(ol+'\n')
#outf.write(ol+'\t'+str(otes-otss)+'\t'+str(bstep)+'\n')
num+=1
id+=1
outf.close()
print 'caculating average curve ...'
dic={}
vec=numpy.array([0.0])
vec.resize(num,refcheck=0)
#print flankup,flankdn,bin_count,flankup+flankdn+bin_count
for i in range(4,flankup+flankdn+bin_count+4):
vec=vec*0
fi=open(heatmapname+'.xls')
fi.readline()
ln=0
for line in fi:
vec[ln]=float(line.split()[i])
ln+=1
fi.close()
vec.sort()
if vcal=='mean':
s=sum(vec[int(num*excludeP):int(num-num*excludeP)])*1.0
v=s/(num-num*excludeP*2)#len(vec)
elif vcal=='median':v=vec[num/2]
#outf2.write(str((i-flankup-4)*step)+'\t'+str(v)+'\n')
dic[(i-flankup-4)*step]=v
print ''
return dic
def positionSelectorOld(positionLines=[],selection=None,geneFile=None,outGeneFile=None,chrbinsize=10000000000000):
'''
positionLines: The first line in positionLines must be the title line, each line should have a '\n' at the end, positionLines should be in the default output format of DANPOS, see DANPOS documentation for example
selection: promoter:-350:50&control_smt_val:0:1000&0-log10(point_diff_pval):0:1e-10
'''
if selection==None:return positionLines
if len(positionLines)<2:return positionLines
if outGeneFile!=None:
ogf=open(outGeneFile,'w')
ogf.write('genes\t'+positionLines[0])
oglines={}
retr,tcol,tld=[positionLines[0]],positionLines[0].split('\t'),{}
for i in range(len(tcol)):tld[tcol[i]]=i
sels=selection.split('&')
flank=0
for i in range(len(sels)):
sels[i]=sels[i].split(':')
if sels[i][1]!='':
sels[i][1]=float(sels[i][1])
if sels[i][0]=='promoter':
if geneFile==None:
print 'Error: gene file is not provided.'
return []
if abs(sels[i][1])>flank:flank=abs(sels[i][1])
if sels[i][2]!='':
sels[i][2]=float(sels[i][2])
if sels[i][0]=='promoter':
if geneFile==None:
print 'Error: gene file is not provided.'
return []
if abs(sels[i][2])>flank:flank=abs(sels[i][2])
if flank>chrbinsize:chrbinsize=flank
if geneFile!=None:
gd={}
for line in open(geneFile).readlines()[1:]:
col=line.split()
if col[2]=='+':gname,cr,stra,tss=col[0],col[1],col[2],int(col[3])
else:gname,cr,stra,tss=col[0],col[1],col[2],int(col[4])
if not gd.has_key(cr):gd[cr]={}
bin=int(tss/chrbinsize)
if not gd[cr].has_key(bin):gd[cr][bin]={}
gd[cr][bin][tss]=[stra,gname]
for line in positionLines[1:]:
genes,ok,col='',True,line.split('\t')
for sel in sels:
if sel[0]!='promoter':
v=float(col[tld[sel[0]]])
if sel[1]!='':
if v<sel[1]:ok=False
if sel[2]!='':
if v>sel[2]:ok=False
for sel in sels:
if ok and sel[0]=='promoter':
cr,poses=col[0],[]
for i in ([1,2,3]):
try:poses.append(int(col[i]))
except:continue
minpos,maxpos=min(poses),max(poses)
minbin,maxbin=int((minpos-flank)/chrbinsize),int((maxpos+flank)/chrbinsize)
bins=range(minbin,maxbin+1)
for bin in bins:
if not gd[cr].has_key(bin):continue
for tss in gd[cr][bin]:
if gd[cr][bin][tss][0]=='+' and (maxpos>tss+sel[1] and minpos<tss+sel[2]):
if outGeneFile!=None:
if not oglines.has_key(gd[cr][bin][tss][1]):oglines[gd[cr][bin][tss][1]]=[]
oglines[gd[cr][bin][tss][1]].append(gd[cr][bin][tss][1]+'\t'+line)
genes+=gd[cr][bin][tss][1]+','
elif gd[cr][bin][tss][0]=='-' and (maxpos>tss-sel[2] and minpos<tss-sel[1]):
if outGeneFile!=None:
if not oglines.has_key(gd[cr][bin][tss][1]):oglines[gd[cr][bin][tss][1]]=[]
oglines[gd[cr][bin][tss][1]].append(gd[cr][bin][tss][1]+'\t'+line)
genes+=gd[cr][bin][tss][1]+','
if genes=='':ok=False
if ok:retr.append(line)
if outGeneFile!=None:
for gene in oglines:
for line in oglines[gene]:ogf.write(line)
ogf.close()
return retr
def GREATdomain(bup=-5000,bdn=1000,eup=-1000000,edn=1000000,geneFile=None,posStartCol=3,posEndCol=3,negStartCol=4,negEndCol=4,chrbinsize=1000000):
'''
#this function is not finished
if negStartCol==None:negStartCol=posEndCol
if negEndCol==None:negEndCol=posStartCol
tgd,gd,n={},{},0
#define basal and extenede domains for each element
for line in open(geneFile).readlines()[1:]:
n,col=n+1,line.split()
gname,cr,stra=col[0],col[1],col[2]
if not tgd.has_key(cr):tgd[cr]={}
if stra=='+':
starts,ends=col[posStartCol].split(','),col[posEndCol].split(',')
if starts[-1]=='':starts,ends=starts[:-1],ends[:-1]
for i in range(len(starts)):
start,end=int(starts[i]),int(ends[i])#biological start and end
bstart,bend,estart,eend=start+bup,end+bdn,start+bup+eup,end+bdn+edn#start and end of basal and extended domain
if not tgd[cr].has_key(bstart):tgd[cr][bstart]=[]
tgd[cr][bstart].append([bend,gname,start,end,estart,eend])
else:
starts,ends=col[negStartCol].split(','),col[negEndCol].split(',')
if starts[-1]=='':starts,ends=starts[:-1],ends[:-1]
for i in range(len(starts)):
start,end=int(starts[i]),int(ends[i])#biological start and end
bstart,bend,estart,eend=end-bdn,start-bup,end-bdn-edn,start-bup-eup#start and end of basal and extended domain, note that the physical starts is the biological ends on the negative strand
if not tgd[cr].has_key(bstart):tgd[cr][bstart]=[]
tgd[cr][bstart].append([bend,gname,start,end,estart,eend])
bends={}
for cr in tgd:
bends[cr]={}
for s in tgd[cr]:
e=0
for i in range(len(tgd[cr][s])):
if e<tgd[cr][s][i][0]:e=tgd[cr][s][i][0]
bends[cr][s]=e
#define final domain for each element
for cr in tgd:
if not gd.has_key(cr):gd[cr]={}
poses=tgd[cr].keys()
poses.sort()
i,lth=1,len(poses)-1
while i <lth:
bstart,prebstart,nxtbstart=poses[i],poses[i-1],poses[i+1]#basal start
#print tgd[cr][bstart],len(tgd[cr][bstart])
#print bends[cr]
for j in range(len(tgd[cr][bstart])):
#print j
#bend,prebend,nxtbend=tgd[cr][bstart][j][0],bends[prebstart],bends[nxtbstart]#basal end
bend=tgd[cr][bstart][j][0]#basal end
prebend,nxtbend=bends[cr][prebstart],bends[cr][nxtbstart]#basal end
gname,start,end,estart,eend=tgd[cr][bstart][j][1],tgd[cr][bstart][j][2],tgd[cr][bstart][j][3],tgd[cr][bstart][j][4],tgd[cr][bstart][j][5]
minpos,maxpos=min(bstart,max(estart,prebend)),max(bend,min(eend,nxtbstart))
if minpos<0:minpos=0
for bin in range(minpos/chrbinsize,maxpos/chrbinsize+1):
if not gd[cr].has_key(bin):gd[cr][bin]={}
if not gd[cr][bin].has_key(minpos):gd[cr][bin][minpos]=[]
gd[cr][bin][minpos].append([maxpos,gname,start,end])
i+=1
print cr,poses[0],tgd[cr][poses[0]]
if lth>=1:
#the first gene on chr
i=0
bstart,prebstart,nxtbstart=poses[i],0,poses[i+1]#basal start
for j in range(len(tgd[cr][bstart])):
bend,prebend,nxtbend=tgd[cr][bstart][j][0],0,bends[cr][nxtbstart]#basal end
gname,start,end,estart,eend=tgd[cr][bstart][j][1],tgd[cr][bstart][j][2],tgd[cr][bstart][j][3],tgd[cr][bstart][j][4],tgd[cr][bstart][j][5]
minpos,maxpos=min(bstart,max(estart,prebend)),max(bend,min(eend,nxtbstart))
if minpos<0:minpos=0
for bin in range(minpos/chrbinsize,maxpos/chrbinsize+1):
if not gd[cr].has_key(bin):gd[cr][bin]={}
if not gd[cr][bin].has_key(minpos):gd[cr][bin][minpos]=[]
gd[cr][bin][minpos].append([maxpos,gname,start,end])
#the last gene on chr
i=lth
bstart,prebstart,nxtbstart=poses[i],poses[i-1],None#basal start
for j in range(len(tgd[cr][bstart])):
bend,prebend,nxtbend=tgd[cr][bstart][j][0],bends[cr][prebstart],None#basal end
gname,start,end,estart,eend=tgd[cr][bstart][j][1],tgd[cr][bstart][j][2],tgd[cr][bstart][j][3],tgd[cr][bstart][j][4],tgd[cr][bstart][j][5]
minpos,maxpos=min(bstart,max(estart,prebend)),eend#max(bend,min(eend,nxtbstart))
if minpos<0:minpos=0
for bin in range(minpos/chrbinsize,maxpos/chrbinsize+1):
if not gd[cr].has_key(bin):gd[cr][bin]={}
if not gd[cr][bin].has_key(minpos):gd[cr][bin][minpos]=[]
gd[cr][bin][minpos].append([maxpos,gname,start,end])
elif lth>=0:#only one gene on chr
i=0
bstart,prebstart,nxtbstart=poses[i],None,None#basal start
for j in range(len(tgd[cr][bstart])):
bend,prebend,nxtbend=tgd[cr][bstart][j][0],None,None#basal end
gname,start,end,estart,eend=tgd[cr][bstart][j][1],tgd[cr][bstart][j][2],tgd[cr][bstart][j][3],tgd[cr][bstart][j][4],tgd[cr][bstart][j][5]
minpos,maxpos=estart,eend#min(bstart,max(estart,prebend)),max(bend,min(eend,nxtbstart))
if minpos<0:minpos=0
for bin in range(minpos/chrbinsize,maxpos/chrbinsize+1):
if not gd[cr].has_key(bin):gd[cr][bin]={}
if not gd[cr][bin].has_key(minpos):gd[cr][bin][minpos]=[]
gd[cr][bin][minpos].append([maxpos,gname,start,end])
return gd
'''
def positionSelectorByGreatTSS(positionLines=[],selection='-5000:1000:1000000',geneFile=None,chrbinsize=None):
'''
positionLines: The first line in positionLines must be the title line, each line should have a '\n' at the end, positionLines should be in the default output format of DANPOS, see DANPOS documentation for example
'''
if geneFile==None:
print 'Error: gene file is not provided.'
return []
if selection==None:return positionLines
sels=selection.split(':')
if len(sels)<3:
print 'Wrong! Less than three fields could be detected in the selector:',selection
return []
elif sels[0]=='':
print 'Wrong! Please set a upstream bound in the GREAT selector:',selection
return []
elif sels[1]=='':
print 'Wrong! Please set a downstream bound in the GREAT selector:',selection
return []
else:sels[0],sels[1]=int(sels[0]),int(sels[1])
if sels[2]=='':sels[2]=0
else:sels[2]=int(sels[2])
if chrbinsize==None:chrbinsize=max(sels)
if len(positionLines)<2:return positionLines
if positionLines[0][-1]=='\n':positionLines[0]=positionLines[0][:-1]
retr,tcol,tld=[positionLines[0]+'\trelatedGenes\n'],positionLines[0].split('\t'),{}
tgd,gd,n={},{},0
for line in open(geneFile).readlines()[1:]:
n,col=n+1,line.split()
gname,cr,stra=col[0],col[1],col[2]
if not tgd.has_key(cr):tgd[cr]={}
if stra=='+':
pos=int(col[3])
tgd[cr][pos+sels[0]]=[gname,pos,pos+sels[1]]
else:
pos=int(col[4])
tgd[cr][pos-sels[1]]=[gname,pos,pos-sels[0]]
for cr in tgd:
if not gd.has_key(cr):gd[cr]={}
if not gd[cr].has_key('TSS'):gd[cr]['TSS']={}
poses=tgd[cr].keys()
poses.sort()
i,lth=1,len(poses)-1
while i <lth:
bstart,prebstart,nxtbstart=poses[i],poses[i-1],poses[i+1]
bend,prebend,nxtbend=tgd[cr][bstart][2],tgd[cr][prebstart][2],tgd[cr][nxtbstart][2]
pos,gname=tgd[cr][bstart][1],tgd[cr][bstart][0]
minpos,maxpos=min(bstart,max(prebend,pos-sels[2])),max(bend,min(nxtbstart,pos+sels[2]))
for bin in range(minpos/chrbinsize,maxpos/chrbinsize+1):
if not gd[cr]['TSS'].has_key(bin):gd[cr]['TSS'][bin]={}
if not gd[cr]['TSS'][bin].has_key(minpos):gd[cr]['TSS'][bin][minpos]=[]
gd[cr]['TSS'][bin][minpos].append(['',gname,maxpos,pos])
i+=1
for line in positionLines[1:]:
genes,ok,col='',0,line.split('\t')
tgenes={}
cr,poses=col[0],[]
for i in ([1,2,3]):
try:poses.append(int(col[i]))
except:continue
minpos,maxpos=min(poses),max(poses)
minbin,maxbin=minpos/chrbinsize,maxpos/chrbinsize
bins=range(minbin,maxbin+1)
for bin in bins:
if not gd.has_key(cr):continue
if not gd[cr]['TSS'].has_key(bin):continue
for start in gd[cr]['TSS'][bin]:
for i in range(len(gd[cr]['TSS'][bin][start])):
gname,end,pos=gd[cr]['TSS'][bin][start][i][1],gd[cr]['TSS'][bin][start][i][2],gd[cr]['TSS'][bin][start][i][3]
if maxpos>start and minpos<end:tgenes[gname+'/'+str(pos)+'/'+str(pos)]=1
if len(tgenes)!=0:
ok+=1
genes+='TSS'+':'+','.join(tgenes.keys())+'|'
if ok>0:retr.append(line[:-1]+'\t'+genes[:-1]+'\n')
return retr
def positionSelectorByGeneStructure(positionLines=[],selection=None,geneFile=None,chrbinsize=10000000000000):
'''
positionLines: The first line in positionLines must be the title line, each line should have a '\n' at the end, positionLines should be in the default output format of DANPOS, see DANPOS documentation for example
selection: promoter:-350:50&control_smt_val:0:1000&0-log10(point_diff_pval):0:1e-10
'''
if geneFile==None:
print 'Error: gene file is not provided.'
return []
if selection==None:return positionLines
if len(positionLines)<2:return positionLines
'''
if outGeneFile!=None:
ogf=open(outGeneFile,'w')
ogf.write('genes\t'+positionLines[0])
oglines={}
'''
if positionLines[0][-1]=='\n':positionLines[0]=positionLines[0][:-1]
retr,tcol,tld=[positionLines[0]+'\trelatedGenes\n'],positionLines[0].split('\t'),{}
sels=selection.split(',')
#print sels
if sels[-1]=='and':andor='and'
elif sels[-1]=='or':andor='or'
elif len(sels)>1:
print "Error: the selection must be defined with 'and' or 'or' at the end"
return []
else:
sels.append('and')
andor='and'
sels=sels[:-1]
selsdic,flank,tsels={},0,[]
for i in range(len(sels)):
sels[i]=sels[i].split(':')
if not sels[i][0] in ['TSS','TTS','CSS','CTS','ESS','ETS','exon','intron','gene']:
print 'Error: can not do selection for',sels[i][0]
return []
selsdic[sels[i][0]]=1
if sels[i][1]!='':
sels[i][1]=int(sels[i][1])
if abs(sels[i][1])>flank:flank=abs(sels[i][1])
if sels[i][2]!='':
sels[i][2]=int(sels[i][2])
if abs(sels[i][2])>flank:flank=abs(sels[i][2])
tsels.append(sels[i])
sels=tsels
if flank>chrbinsize:chrbinsize=flank
gd={}
n=0
for line in open(geneFile).readlines()[1:]:
n+=1
col=line.split()
cr,stra=col[1],col[2]
if not gd.has_key(cr):gd[cr]={}
if selsdic.has_key('TSS'):
if not gd[cr].has_key('TSS'):gd[cr]['TSS']={}
if stra=='+':gname,cr,stra,pos=col[0],cr,stra,int(col[3])
else:gname,cr,stra,pos=col[0],cr,stra,int(col[4])
bin=int(pos/chrbinsize)
if not gd[cr]['TSS'].has_key(bin):gd[cr]['TSS'][bin]={}
if not gd[cr]['TSS'][bin].has_key(pos):gd[cr]['TSS'][bin][pos]=[]
gd[cr]['TSS'][bin][pos].append([stra,gname,pos])
if selsdic.has_key('TTS'):
if not gd[cr].has_key('TTS'):gd[cr]['TTS']={}
if stra=='+':gname,cr,stra,pos=col[0],cr,stra,int(col[4])
else:gname,cr,stra,pos=col[0],cr,stra,int(col[3])
bin=int(pos/chrbinsize)
if not gd[cr]['TTS'].has_key(bin):gd[cr]['TTS'][bin]={}
if not gd[cr]['TTS'][bin].has_key(pos):gd[cr]['TTS'][bin][pos]=[]
gd[cr]['TTS'][bin][pos].append([stra,gname,pos])
if selsdic.has_key('CSS'):
if not gd[cr].has_key('CSS'):gd[cr]['CSS']={}
if stra=='+':gname,cr,stra,pos=col[0],cr,stra,int(col[5])
else:gname,cr,stra,pos=col[0],cr,stra,int(col[6])
bin=int(pos/chrbinsize)
if not gd[cr]['CSS'].has_key(bin):gd[cr]['CSS'][bin]={}
if not gd[cr]['CSS'][bin].has_key(pos):gd[cr]['CSS'][bin][pos]=[]
gd[cr]['CSS'][bin][pos].append([stra,gname,pos])
if selsdic.has_key('CTS'):
if not gd[cr].has_key('CTS'):gd[cr]['CTS']={}
if stra=='+':gname,cr,stra,pos=col[0],cr,stra,int(col[6])
else:gname,cr,stra,pos=col[0],cr,stra,int(col[5])
bin=int(pos/chrbinsize)
if not gd[cr]['CTS'].has_key(bin):gd[cr]['CTS'][bin]={}
if not gd[cr]['CTS'][bin].has_key(pos):gd[cr]['CTS'][bin][pos]=[]
gd[cr]['CTS'][bin][pos].append([stra,gname,pos])
if selsdic.has_key('ESS'):
if not gd[cr].has_key('ESS'):gd[cr]['ESS']={}
if stra=='+':gname,cr,stra,poss=col[0],cr,stra,col[8][:-1].split(',')
else:gname,cr,stra,poss=col[0],cr,stra,col[9][:-1].split(',')
for pos in poss:
pos=int(pos)
bin=int(pos/chrbinsize)
if not gd[cr]['ESS'].has_key(bin):gd[cr]['ESS'][bin]={}
if not gd[cr]['ESS'][bin].has_key(pos):gd[cr]['ESS'][bin][pos]=[]
gd[cr]['ESS'][bin][pos].append([stra,gname,pos])
if selsdic.has_key('ETS'):
if not gd[cr].has_key('ETS'):gd[cr]['ETS']={}
if stra=='+':gname,cr,stra,poss=col[0],cr,stra,col[9][:-1].split(',')
else:gname,cr,stra,poss=col[0],cr,stra,col[8][:-1].split(',')
for pos in poss:
pos=int(pos)
bin=int(pos/chrbinsize)
if not gd[cr]['ETS'].has_key(bin):gd[cr]['ETS'][bin]={}
if not gd[cr]['ETS'][bin].has_key(pos):gd[cr]['ETS'][bin][pos]=[]
gd[cr]['ETS'][bin][pos].append([stra,gname,pos])
if selsdic.has_key('exon'):
if not gd[cr].has_key('exon'):gd[cr]['exon']={}
#if stra=='+':
gname,cr,stra,starts,ends=col[0],cr,stra,col[8][:-1].split(','),col[9][:-1].split(',')
#else:gname,cr,stra,starts=col[0],cr,stra,col[9][:-1].split(',')
num=len(ends)
for i in range(num):
#for start in starts:
start,end=int(starts[i]),int(ends[i])
#print start,end,chrbinsize
for pos in range(start,end,chrbinsize):
bin=int(pos/chrbinsize)
if not gd[cr]['exon'].has_key(bin):gd[cr]['exon'][bin]={}
if not gd[cr]['exon'][bin].has_key(start):gd[cr]['exon'][bin][start]=[]
gd[cr]['exon'][bin][start].append([stra,gname,end])
if selsdic.has_key('intron'):
if not gd[cr].has_key('intron'):gd[cr]['intron']={}
#if stra=='+':
gname,cr,stra,starts,ends=col[0],cr,stra,col[8][:-1].split(','),col[9][:-1].split(',')
#else:gname,cr,stra,starts=col[0],cr,stra,col[9][:-1].split(',')
num=len(ends)
for i in range(1,num):
#for start in starts:
start,end=int(ends[i-1]),int(starts[i])
for pos in range(start,end,chrbinsize):
bin=int(pos/chrbinsize)
if not gd[cr]['intron'].has_key(bin):gd[cr]['intron'][bin]={}
if not gd[cr]['intron'][bin].has_key(start):gd[cr]['intron'][bin][start]=[]
gd[cr]['intron'][bin][start].append([stra,gname,end])
if selsdic.has_key('gene'):
if not gd[cr].has_key('gene'):gd[cr]['gene']={}
gname,cr,stra,start,end=col[0],cr,stra,int(col[3]),int(col[4])
for pos in range(start,end,chrbinsize):
bin=int(pos/chrbinsize)
if not gd[cr]['gene'].has_key(bin):gd[cr]['gene'][bin]={}
if not gd[cr]['gene'][bin].has_key(start):gd[cr]['gene'][bin][start]=[]
gd[cr]['gene'][bin][start].append([stra,gname,end])
print n,'genes'
'''
n=0
for cr in gd:
for sel in gd[cr]:
for bin in gd[cr][sel]:
for start in gd[cr][sel][bin]:
n+=len(gd[cr][sel][bin][start])
#print n, 'genic sites'
'''
for line in positionLines[1:]:
genes,ok,col='',0,line.split('\t')
for sel in sels:
tgenes={}#sel[0]+":"
cr,poses=col[0],[]
for i in ([1,2,3]):
try:poses.append(int(col[i]))
except:continue
minpos,maxpos=min(poses),max(poses)
minbin,maxbin=int((minpos-flank)/chrbinsize),int((maxpos+flank)/chrbinsize)
bins=range(minbin,maxbin+1)
for bin in bins:
if not gd.has_key(cr):continue
if not gd[cr][sel[0]].has_key(bin):continue
for start in gd[cr][sel[0]][bin]:
for i in range(len(gd[cr][sel[0]][bin][start])):
end=gd[cr][sel[0]][bin][start][i][2]
if gd[cr][sel[0]][bin][start][i][0]=='+' and (maxpos>start+sel[1] and minpos<end+sel[2]):
tgenes[gd[cr][sel[0]][bin][start][i][1]+'/'+str(start)+'/'+str(end)]=1
elif gd[cr][sel[0]][bin][start][i][0]=='-' and (maxpos>start-sel[2] and minpos<end-sel[1]):
tgenes[gd[cr][sel[0]][bin][start][i][1]+'/'+str(start)+'/'+str(end)]=1
if len(tgenes)!=0:
ok+=1
genes+=sel[0]+':'+','.join(tgenes.keys())+'|'
if andor=='and' and ok==len(sels):
#print andor,ok
retr.append(line[:-1]+'\t'+genes[:-1]+'\n')
elif andor=='or' and ok>0:
#print andor,ok
retr.append(line[:-1]+'\t'+genes[:-1]+'\n')
return retr
def positionSelectorByValue(positionLines=[],selection=None):
'''
PositionLines: The first line in positionLines must be the title line, each line should have a '\n' at the end, positionLines should be in the default output format of DANPOS, see DANPOS documentation for example
selection: promoter:-350:50&control_smt_val:0:1000&0-log10(point_diff_pval):0:1e-10
'''
if selection==None:return positionLines
if len(positionLines)<2:return positionLines
retr,tcol,tld=[positionLines[0]],positionLines[0].split('\t'),{}
if tcol[-1][-1]=='\n':tcol[-1]=tcol[-1][:-1]
for i in range(len(tcol)):tld[tcol[i]]=i
sels=selection.split(',')
#print sels
if sels[-1]=='and':andor='and'
elif sels[-1]=='or':andor='or'
elif len(sels)>1:
print sels
print "Error: the selection must be defined with 'and' or 'or' at the end"
return []
else:
sels.append('and')
andor='and'
sels=sels[:-1]
#print sels
for i in range(len(sels)):
sels[i]=sels[i].split(':')
if not sels[i][0] in tcol[1:]:
print "Error:", sels[i][0], "is not a column name in the position file"
#print tcol
return []
if sels[i][1]!='':
sels[i][1]=float(sels[i][1])
if sels[i][2]!='':
sels[i][2]=float(sels[i][2])
for line in positionLines[1:]:
ok,col=0,line.split('\t')
for sel in sels:
v=float(col[tld[sel[0]]])
ok1,ok2=False,False
if sel[1]!='':
if v>=sel[1]:ok1=True
else:ok1=True
if sel[2]!='':
if v<=sel[2]:ok2=True
else:ok2=True
if ok1 and ok2: ok+=1
#print sel[0],tld[sel[0]],v,sel[1],sel[2],ok1,ok2,ok
#print ok,len(sels)
if andor=='and' and ok==len(sels):retr.append(line) #all selection condition must be ok
elif andor=='or' and ok>0:retr.append(line) #at least one selection condition must be ok
#print andor, ok
return retr
def retrieve_positions_by_value(in_file=None,out_file=None,cr_col_name='chr',pos_col_name='diff_smt_loca',val_col_name='point_diff_pval',direction_by=['treat_point_val','control_point_val'],top_value=1e-7,bottom_value=0.0,log10trans=False):
lines=open(in_file).readlines()
if len(lines)<2:return {}
col=lines[0].split()
ids={}
for i in range(0,len(col)):ids[col[i]]=i
crid,posid,vid=ids[cr_col_name],ids[pos_col_name],ids[val_col_name]
if len(direction_by)==2:dirb=[ids[direction_by[0]],ids[direction_by[1]]]###################
out={}
retrieve=0
if out_file:
fo=open(out_file,'w')
fo.write(lines[0])
for line in lines[1:]:
col=line.split()
if col[posid]=='-':continue
try:
if log10trans:chr,pos,v=col[crid],int(col[posid]),log10(float(col[vid])+1)
else:chr,pos,v=col[crid],int(col[posid]),float(col[vid])
except:
print line
continue
if top_value!=None and v>=top_value: continue
if bottom_value!=None and v<=bottom_value:continue
#################
if len(direction_by)==2:
if v==0:v=1e-323
if float(col[dirb[0]])<float(col[dirb[1]]):
v,col[vid]=0-v,str(0-v)
line='\t'.join(col)+'\n'
###################
if out_file:fo.write(line)
if not out.has_key(chr):out[chr]={}
out[chr][pos]=v
retrieve+=1
print '\nretrieved',retrieve,'summits out of',len(lines)-1, 'by',val_col_name,bottom_value,'to',top_value
return out
def retrieve_positions_by_rank(in_file=None,out_file=None,cr_col_name='chr',pos_col_name='diff_smt_loca',val_col_name='point_diff_pval',toprank=None,bottomrank=None,decreasing=False,direction_by=['treat_point_val','control_point_val']):
lines=open(in_file).readlines()
if len(lines)<2:return {}
col=lines[0].split()
ids={}
for i in range(0,len(col)):ids[col[i]]=i
crid,posid,vid=ids[cr_col_name],ids[pos_col_name],ids[val_col_name]
if len(direction_by)==2:dirb=[ids[direction_by[0]],ids[direction_by[1]]]###################
out={}
if out_file:
fo=open(out_file,'w')
fo.write(lines[0])
tosort={}
linesdic={}
for line in lines[1:]:
col=line.split()
try:
tosort[col[crid]+','+col[posid]]=float(col[vid])
linesdic[col[crid]+','+col[posid]]=line
except:
print line
continue
from operator import itemgetter
aftersort=sorted(tosort.items(),key=itemgetter(1),reverse=decreasing)
retrieve=0
if toprank==None:toprank=0#len(aftersort)
if bottomrank==None:bottomrank=len(aftersort)
for i in range(toprank,bottomrank):
chr,pos=aftersort[i][0].split(',')
v=aftersort[i][1]
#################
if len(direction_by)==2:
if v==0:v=1e-323
col=linesdic[aftersort[i][0]].split()
if float(col[dirb[0]])<float(col[dirb[1]]):
v,col[vid]=0-v,str(0-v)
linesdic[aftersort[i][0]]='\t'.join(col)+'\n'
###################
if out_file:fo.write(linesdic[aftersort[i][0]])
if not out.has_key(chr):out[chr]={}
out[chr][int(pos)]=v
retrieve+=1
print '\nretrieved',retrieve,'summits out of',len(lines)-1, 'by',col_name
return out
def positions2Points(positions={},out_file=None,up=350,down=50,chrColID=1,nameColID=0,posColIDpos=3,posColIDneg=4,straColID=2,sep='\t',second_sep=None,step=1,\
neglog=True, rankby='max',lines=[]):
#positions[chr][pos]=value
#suggest to download gene table from UCSC genome browser in format:
#"name chrom strand(+/-) txStart txEnd cdsStart cdsEnd exonCount exonStarts exonEnds proteinID"
summits=positions
tsmit={}
if neglog:
from math import log10
for cr in summits:
tsmit[cr]={}
for pos in summits[cr]:
if summits[cr][pos]>0:tsmit[cr][pos]=0-log10(summits[cr][pos])
elif summits[cr][pos]<0:tsmit[cr][pos]=log10(0-summits[cr][pos])
else:
print 'fail to do log transfer at',cr,pos,'due to value',summits[cr][pos]
summits=tsmit
smts,msmts={},{}
for chr in summits:
msmts[chr],smts[chr]={},{}
for pos in summits[chr]:
p=pos-(pos%step)#note that the orginal summits located within step will be merged
if not smts[chr].has_key(p):smts[chr][p]=summits[chr][pos]
elif rankby=='max':smts[chr][p]=max(smts[chr][p],summits[chr][pos])
else:smts[chr][p]=min(smts[chr][p],summits[chr][pos])
if out_file:fo=open(out_file,'w')
if out_file:fo.write('name\tchr\tpos\tmax_value\tmin_value\tvalues\n')
out={}
for line in lines:
if line[:-1]=='\n':line=line[:-1]
col=line.split(sep)
if not smts.has_key(col[chrColID]):continue
tup,tdn,pos,cr=up-up%step,down-down%step,col[posColIDpos],col[chrColID]
if col[straColID]!='+':tup,tdn,pos,cr=down-down%step,up-up%step,col[posColIDneg],col[chrColID]
if second_sep==None:poses=[pos]
else:poses=pos.split(second_sep)
for pos in poses:
pos=int(pos)-int(pos)%step
ps,vs=[],[]
for p in range(pos-tup,pos+tdn+step,step):
if smts[cr].has_key(p):
msmts[cr][p]=1
ps.append(str(p))
vs.append(smts[cr][p])
if len(ps)>0:
maxv,minv=max(vs),min(vs)
tvs=[]
for i in range(0,len(ps)):tvs.append(str(vs[i]))
line='\t'.join([col[nameColID],cr,str(pos),str(maxv),str(minv),','.join(tvs)])
if rankby=='min':out[line]=minv
else:out[line]=maxv
mcount,tcount,ocount,=0,0,0
for chr in msmts:mcount+=len(msmts[chr])
for chr in smts:tcount+=len(smts[chr])
for chr in summits:ocount+=len(summits[chr])
print ocount,'summits merged to',tcount,'by step size',step
print '\n',mcount,'of',tcount,'summits mapped to',len(out),'of',len(lines)-1,'genes\n'
from operator import itemgetter
if rankby=='min':aftersort=sorted(out.items(),key=itemgetter(1))
else:aftersort=sorted(out.items(),key=itemgetter(1),reverse=True)
rout=[]
for item in aftersort:
if out_file:fo.write(item[0]+'\n')
rout.append(item[0])
return rout
def plot(dic={'name':{}},outname='',main='',region_size=0,nrow=2,ncol=2,xmin=None,xmax=None,ymin=None,ymax=None,xlab='Relative distance to TSS',ylab='Average occupancy',colors=['black','gray','red','blue','orange','purple','skyblue','cyan','green','blue4','darkgoldenrod'],names=None):
if main=='':main=outname
rcode=''
if names==None:
names=dic.keys()
names.sort()
xmincal,xmaxcal,ymincal,ymaxcal=min(dic[names[0]].keys()),max(dic[names[0]].keys()),min(dic[names[0]].values()),max(dic[names[0]].values())
if len(colors)<len(names):
print 'Wrong:please specify ',len(names),'colors for the curves'
return ''
for i in range(len(names)):
name=names[i]
poses,vals=dic[name].keys(),dic[name].values()
txmin,txmax,tymin,tymax=min(poses),max(poses),min(vals),max(vals)
if xmincal>txmin:xmincal=txmin
if xmaxcal<txmax:xmaxcal=txmax
if ymincal>tymin:ymincal=tymin
if ymaxcal<tymax:ymaxcal=tymax
poses,vals=[],[]
tposes=dic[name].keys()
tposes.sort()
for pos in tposes:
poses.append(str(pos))
vals.append(str(dic[name][pos]))
rcode+='lines(c('+','.join(poses)+'),c('+','.join(vals)+'),col="'+colors[i]+'")\n'
rcode+='legend("topright",legend=c("'+'","'.join(names)+'"),col=c("'+'","'.join(colors[0:len(names)])+'"),lty=1)\n'
if xmin!=None:xmincal=xmin
if xmax!=None:xmaxcal=xmax
if ymin!=None:ymincal=ymin
if ymax!=None:ymaxcal=ymax
else:ymaxcal+=(ymaxcal-ymincal)*(len(names)*0.12+0.1)
if region_size==0:rcode='plot(0,0,type="n",main="'+main+'",xlim=c('+str(xmincal)+','+str(xmaxcal)+'),'+'ylim=c('+str(ymincal)+','+str(ymaxcal)+'),xlab="'+str(xlab)+'",ylab="'+str(ylab)+'",)\n'+rcode
else:
rcode='plot(0,0,type="n",main="'+main+'",xaxt="n",xlim=c('+str(xmincal)+','+str(xmaxcal)+'),'+'ylim=c('+str(ymincal)+','+str(ymaxcal)+'),xlab="'+str(xlab)+'",ylab="'+str(ylab)+'",)\n'+rcode
poses=dic[names[0]].keys()
poses.sort()
at,lb=['0',str(region_size)],['\"Start\"','\"End\"']
#print at,lb
rcode+='axis(side=1,at=c('+','.join(at)+'),labels=c('+','.join(lb)+'))\n'
lth=poses[-1]-poses[0]+poses[1]-poses[0]
#print poses[-1],poses[0]
lth=int(lth/6)
#tlth=str(lth)
#if len(tlth)>3:
at,lb=[],[]
if poses[0]<0:
for pos in range(lth,0-poses[0]+lth,lth):
at.append('-'+str(pos))
lb.append('\"-'+str(pos)+'\"')
rcode+='axis(side=1,at=c('+','.join(at)+'),labels=c('+','.join(lb)+'))\n'
at,lb=[],[]
for pos in range(lth,region_size-lth+1,lth):
at.append(str(pos))
lb.append('\"'+str(pos)+'\"')
rcode+='axis(side=1,at=c('+','.join(at)+'),labels=c('+','.join(lb)+'))\n'
at,lb=[],[]
for pos in range(region_size+lth,poses[-1]+lth,lth):
at.append(str(pos))
lb.append('"+'+str(pos-region_size)+'\"')
#print at,lb
rcode+='axis(side=1,at=c('+','.join(at)+'),labels=c('+','.join(lb)+'))\n'
if outname!='':
rcode='par(mfrow=c('+str(nrow)+','+str(ncol)+'))\n'+rcode
rcode='pdf("'+outname+'.pdf")\n'+rcode
rcode+='dev.off()\n'
fo=open(outname+'.R','w')
fo.write(rcode)
fo.close()
r(rcode)
return rcode
def vioplot(dic={'name':[]},outname='',main='',nrow=2,ncol=2,ymin=None,ymax=None,xlab='Relative distance to TSS',ylab='Average occupancy',colors=['black','gray','red','blue','orange','purple','skyblue','cyan','green','blue4','darkgoldenrod'],names=None):
if main=='':main=outname
if names==None:
names=dic.keys()
names.sort()
#xmincal,xmaxcal,ymincal,ymaxcal=min(dic[names[0]].keys()),max(dic[names[0]].keys()),min(dic[names[0]].values()),max(dic[names[0]].values())
if len(colors)<len(names):
print 'please specify ',len(names),'colors for the curves'
return ''
rcode="library('vioplot')\nvioplot("
for name in names:
temp=[]
for value in dic[name]:temp.append(str(value))
rcode+="c("+','.join(temp)+"),"
rcode+="ylim=c("+str(ymin)+","+str(ymax)+"),names=c("
for name in names:rcode+="'"+name+"',"
rcode=rcode[:-1]
rcode+="))\n"
rcode+="mtext('"+main+"')\n"
if outname!='':
rcode='par(mfrow=c('+str(nrow)+','+str(ncol)+'))\n'+rcode
rcode='pdf("'+outname+'.pdf")\n'+rcode
rcode+='dev.off()\n'
fo=open(outname+'.R','w')
fo.write(rcode)
fo.close()
r(rcode)
return rcode
def occPSD0(wg,outname=None):
psd=r('''function(q){return(spec.pgram(q,plot = FALSE)$spec)}''')
lth=100000/wg.step
d=wg.data
spe=[0.0]*(lth/2)
wn=0
#print 'calculating spectrum'
for cr in d:
print cr
sz=d[cr].size
for i in range(0,sz-lth,lth/2):
wn+=1
if wn%100==0:print wn,'window calculated ...'
v=psd(FloatVector(d[cr][i:(i+lth)]))
for j in range(lth/2):spe[j]+=v[j]
print wn,'window calculated.'
if outname!=None:fo=open(outname+'.xls','w')
dic={}
for j in range(int(lth*wg.step/250),int(lth*wg.step/100+1)):
dic[lth*wg.step*1.0/j]=spe[j]/wn
if outname!=None:fo.write(str(lth*wg.step*1.0/j)+'\t'+str(spe[j]/wn)+'\n')
return dic
def occPSD(wg,outname=None):
cor=r('''function(q1,q2){return(cor(q1,q2))}''')
#lth=100000/wg.step
d=wg.data
#spe=[0.0]*(lth/2)
#wn=0
#print 'calculating spectrum'
tsz,mi,ma,dic=0,100,250,{}
for i in range(mi,ma):dic[i]=0
for cr in d:
print cr
sz=d[cr].size-ma-1
tsz+=sz
for i in range(mi,ma):
v=cor(FloatVector(d[cr][0:sz]),FloatVector(d[cr][i:(i+sz)]))
v=float(str(v).split()[1])
dic[i]+=v*sz
if outname!=None:fo=open(outname+'.xls','w')
for i in range(mi,ma):
dic[i]=dic[i]/tsz
if outname!=None:fo.write(str(i)+'\t'+str(dic[i])+'\n')
return dic
def positionDistanceDistribution(dic,outname=None,min=100,max=250,step=1):
max=max+1
'''
dic={}
ct=0
for line in lines:
ct+=1
col=line.split()
if not dic.has_key(col[crColID]):dic[col[crColID]]={}
dic[col[crColID]][int(col[posColID])]=1#float(col[valColID])
'''
ct,dis=0,{}
for cr in dic:
poses=dic[cr].keys()
ct+=len(poses)
poses.sort()
lth=len(poses)
for i in range(1,lth):
d=int((poses[i]-poses[i-1])/step)
if not dis.has_key(d):dis[d]=1
else:dis[d]+=1
if outname!=None:fo=open(outname+'.xls','w')
if min==None:min=min(dis.keys())
if max==None:max=max(dis.keys())
odic={}
for d in range(int(min/step),int(max/step)):
if not dis.has_key(d):dis[d]=0
dis[d]=dis[d]*100.0/ct #change to percentage
odic[d*step]=dis[d]
if outname!=None:fo.write( str(d*step)+'\t'+str(dis[d])+'\n')
return odic
def positionDistance(dic,outname=None,min=0,max=350):
max=max+1
'''
dic={}
ct=0
for line in lines:
ct+=1
col=line.split()
if not dic.has_key(col[crColID]):dic[col[crColID]]={}
dic[col[crColID]][int(col[posColID])]=1#float(col[valColID])
'''
ct,dis=0,{}
for cr in dic:
dis[cr]={}
poses=dic[cr].keys()
poses.sort()
lth=len(poses)
for i in range(1,lth):
#print poses[i],poses[i]-poses[i-1]
#if poses[i]-poses[i-1]>min and poses[i]-poses[i-1]<max:dis[cr][poses[i-1]]=poses[i]-poses[i-1]
dis[cr][poses[i-1]]=poses[i]-poses[i-1]
'''
if not dis.has_key(d):dis[d]=1
else:dis[d]+=1
if outname!=None:fo=open(outname+'.xls','w')
if min==None:min=min(dis.keys())
if max==None:max=max(dis.keys())
odic={}
for d in range(int(min/step),int(max/step)):
if not dis.has_key(d):dis[d]=0
dis[d]=dis[d]*100.0/ct #change to percentage
odic[d*step]=dis[d]
if outname!=None:fo.write( str(d*step)+'\t'+str(dis[d])+'\n')
'''
return dis
def positionValDistribution(dic,outname=None,min=0,max=1500,step=3):
#for occupancy:valColID=4,min=0,max=1500,step=3
#for fuzziness:valColID=6,min=15,max=25,step=0.01
vdic={}
ct=0
for cr in dic:
ct+=len(dic[cr])
for v in dic[cr].values():
v=int(v/step+0.5)
if not vdic.has_key(v):vdic[v]=1
else:vdic[v]+=1
if ct<1:return {}
if outname!=None:fo=open(outname+'.xls','w')
odic={}
#print int(min/step),int(max/step)
for d in range(int(min/step),int(max/step)):
if not vdic.has_key(d):vdic[d]=0
vdic[d]=vdic[d]*100.0/ct #change to percentage
if outname!=None:fo.write( str(d*step)+'\t'+str(vdic[d])+'\n')
odic[d*step]=vdic[d]
rodic={}
for k in odic:
if odic[k]>0:rodic[k]=odic[k]
return rodic
def positionAroundPoint(smts,outname=None,flankup=2500,flankdn=2500,step=10,chrColID=1,nameColID=0,posColIDpos=3,posColIDneg=4,straColID=2,sep='\t',second_sep=None,lines=None):
#calculate the percentage of summits distributed in falnking region of TTS
flankup-=flankup%step
flankdn-=flankdn%step
tsmtdic=smts#.data
smtdic={}
for chr in tsmtdic.keys():
smtdic[chr]={}
for pos in tsmtdic[chr]:
tpos=pos-pos%step
smtdic[chr][tpos]=tsmtdic[chr][pos]
dis={}
num=0
for line in lines:
col=line.split()
chr,stra=col[chrColID],col[straColID]
tss=col[posColIDpos]
if stra!='+':tss=col[posColIDneg]
if second_sep==None:tsses=[tss]
else:tsses=tss.split(second_sep)
for tss in tsses:
if tss=='':continue
tss=int(tss)
tss-=tss%step
if not smtdic.has_key(chr):continue
num+=1
if stra=='+':
for pos in range(tss-flankup,tss+flankdn,step):
if smtdic[chr].has_key(pos):
if dis.has_key(pos-tss):dis[pos-tss]+=1#smtdic[chr][pos]
else: dis[pos-tss]=1#smtdic[chr][pos]
elif stra=='-':
for pos in range(tss-flankdn+step,tss+flankup+step,step):
if smtdic[chr].has_key(pos):
if dis.has_key(tss-pos):dis[tss-pos]+=1#smtdic[chr][pos]
else:dis[tss-pos]=1#smtdic[chr][pos]
num*=step
if outname!=None:outf=open(outname+'.xls',"w")
for k in range(0-flankup,flankdn,step):
if not dis.has_key(k):dis[k]=0
dis[k]=dis[k]*1.0/num
if outname!=None:outf.write(str(k)+"\t"+str(dis[k])+"\n")
return dis
def runall(inpath=None,outpath=None,flankup=3000,flankdn=3000,minfuz=15,maxfuz=25,minocc=0,maxocc=1500,step=10,lines=None):
'''
if outpath==None:outpath=inpath
for file in glob(os.path.join(inpath,'*positions.differential.xls')):
pks=retrieve_positions_by_rank(in_file=file,out_file=None,col_name='point_diff_pval',toprank=None,bottomrank=None,decreasing=True,direction_by=['treat_point_val','control_point_val'])
positions2Points(positions=pks,out_file=os.path.join(outpath,os.path.split(file)[-1][:-3]+'2tss.xls'),up=350,down=50,chrColID=1,nameColID=0,posColIDpos=3,posColIDneg=4,straColID=2,sep='\t',second_sep=None,step=1,\
neglog=True, rankby='max',lines=lines)
'''
'''
if outpath==inpath:outpath=None
pth=os.path.join(inpath,'diff')
if outpath==None:outpath=pth
smts={}
for file in glob(os.path.join(pth,'*positions.xls')):
fn=os.path.split(file)[-1]
fn=re.sub('positions.xls','',fn)
fn=re.sub('\.\w+\_diff','',fn)
smts[fn]=retrieve_positions_by_rank(in_file=file,out_file=None,col_name='smt_value',toprank=0,bottomrank=2000,decreasing=True,direction_by=[])
dic=batchPositionAroundPoints(smts,outname=os.path.join(outpath,'diff_position_ard_ESS'),flankup=flankup,flankdn=flankdn,step=step,chrColID=1,nameColID=0,posColIDpos=8,posColIDneg=9,straColID=2,sep='\t',second_sep=',',lines=lines)
plot(dic=dic,outname=os.path.join(outpath,'diff_position_ard_ESS'),xlab='distance to ESS',ylab='position count')
smts={}
for file in glob(os.path.join(pth,'*positions.xls')):
fn=os.path.split(file)[-1]
fn=re.sub('positions.xls','',fn)
fn=re.sub('\.\w+\_diff','',fn)
smts[fn]=retrieve_positions_by_rank(in_file=file,out_file=None,col_name='smt_value',toprank=0,bottomrank=2000,decreasing=True,direction_by=[])
dic=batchPositionAroundPoints(smts,outname=os.path.join(outpath,'diff_position_ard_ETS'),flankup=flankup,flankdn=flankdn,step=step,chrColID=1,nameColID=0,posColIDpos=9,posColIDneg=8,straColID=2,sep='\t',second_sep=',',lines=lines)
plot(dic=dic,outname=os.path.join(outpath,'diff_position_ard_ETS'),xlab='distance to ETS',ylab='position count')
smts={}
for file in glob(os.path.join(pth,'*positions.xls')):
fn=os.path.split(file)[-1]
fn=re.sub('positions.xls','',fn)
fn=re.sub('\.\w+\_diff','',fn)
smts[fn]=retrieve_positions_by_rank(in_file=file,out_file=None,col_name='smt_value',toprank=0,bottomrank=2000,decreasing=True,direction_by=[])
dic=batchPositionAroundPoints(smts,outname=os.path.join(outpath,'diff_position_ard_TSS'),flankup=flankup,flankdn=flankdn,step=step,chrColID=1,nameColID=0,posColIDpos=3,posColIDneg=4,straColID=2,sep='\t',second_sep=',',lines=lines)
plot(dic=dic,outname=os.path.join(outpath,'diff_position_ard_TSS'),xlab='distance to TSS',ylab='position count')
smts={}
for file in glob(os.path.join(pth,'*positions.xls')):
fn=os.path.split(file)[-1]
fn=re.sub('positions.xls','',fn)
fn=re.sub('\.\w+\_diff','',fn)
smts[file]=retrieve_positions_by_rank(in_file=file,out_file=None,col_name='smt_value',toprank=0,bottomrank=2000,decreasing=True,direction_by=[])
dic=batchPositionAroundPoints(smts,outname=os.path.join(outpath,'diff_position_ard_TTS'),flankup=flankup,flankdn=flankdn,step=step,chrColID=1,nameColID=0,posColIDpos=4,posColIDneg=3,straColID=2,sep='\t',second_sep=',',lines=lines)
plot(dic=dic,outname=os.path.join(outpath,'diff_position_ard_TTS'),xlab='distance to TTS',ylab='position count')
smts={}
for file in glob(os.path.join(pth,'*positions.xls')):
fn=os.path.split(file)[-1]
fn=re.sub('positions.xls','',fn)
fn=re.sub('\.\w+\_diff','',fn)
smts[fn]=retrieve_positions_by_rank(in_file=file,out_file=None,col_name='smt_value',toprank=0,bottomrank=2000,decreasing=True,direction_by=[])
dic=batchPositionValDistribution(data=smts,outname=os.path.join(outpath,'diff_position_Value_distribution'),min=minocc,max=maxocc,step=1)
plot(dic=dic,outname=os.path.join(outpath,'diff_position_Value_distribution'),xlab='-log P value',ylab='Percentage')
smts={}
for file in glob(os.path.join(pth,'*positions.xls')):
fn=os.path.split(file)[-1]
fn=re.sub('positions.xls','',fn)
fn=re.sub('\.\w+\_diff','',fn)
smts[fn]=retrieve_positions_by_rank(in_file=file,out_file=None,col_name='smt_value',toprank=0,bottomrank=2000,decreasing=True,direction_by=[])
dic=batchPositionDistanceDistribution(data=smts,outname=os.path.join(outpath,'diff_position_distance_distribution'),min=100,max=250,step=1)
plot(dic=dic,outname=os.path.join(outpath,'diff_position_distance_distribution'),xlab='distance',ylab='Percentage')
wgs=Wigs(pth)
for k in wgs.keys():
fn=os.path.split(k)[-1]
fn=re.sub('\.\w+\_diff','',fn)
print fn
twg=deepcopy(wgs.get(k))
twg.rvNeg()
wgs.set(fn[:-3]+'gain',twg)
wgs.get(k).foldChange(-1)
wgs.get(k).rvNeg()
wgs.set(fn[:-3]+'loss',wgs.pop(k))
for wg in wgs.keys():print wg,wgs.get(wg).sum()
tts=batchOccAroundPoints(wgs=wgs,outname=os.path.join(outpath,'diff_value_ard_TTS'),flankup=flankup,flankdn=flankdn,chrColID=1,nameColID=0,posColIDpos=4,posColIDneg=3,straColID=2,sep='\t',second_sep=None,lines=lines)
plot(dic=tts,nrow=2,ncol=2,outname=os.path.join(outpath,'diff_value_ard_TTS'),xlab='Relative distance to TTS',ylab='Average value')
tss=batchOccAroundPoints(wgs=wgs,outname=os.path.join(outpath,'diff_value_ard_TSS'),flankup=flankup,flankdn=flankdn,chrColID=1,nameColID=0,posColIDpos=3,posColIDneg=4,straColID=2,sep='\t',second_sep=None,lines=lines)
plot(dic=tss,nrow=2,ncol=2,outname=os.path.join(outpath,'diff_value_ard_TSS'),xlab='Relative distance to TSS',ylab='Average value')
ess=batchOccAroundPoints(wgs=wgs,outname=os.path.join(outpath,'diff_value_ard_ESS'),flankup=flankup,flankdn=flankdn,chrColID=1,nameColID=0,posColIDpos=8,posColIDneg=9,straColID=2,sep='\t',second_sep=',',lines=lines)
plot(dic=ess,nrow=2,ncol=2,outname=os.path.join(outpath,'diff_value_ard_ESS'),xlab='Relative distance to ESS',ylab='Average value')
ets=batchOccAroundPoints(wgs=wgs,outname=os.path.join(outpath,'diff_value_ard_ETS'),flankup=flankup,flankdn=flankdn,chrColID=1,nameColID=0,posColIDpos=9,posColIDneg=8,straColID=2,sep='\t',second_sep=',',lines=lines)
plot(dic=ets,nrow=2,ncol=2,outname=os.path.join(outpath,'diff_value_ard_ETS'),xlab='Relative distance to ETS',ylab='Average value')
'''
#if outpath==pth:outpath=None
pth=os.path.join(inpath,'pooled')
if outpath==None:outpath=pth
'''
smts={}
for file in glob(os.path.join(pth,'*positions.xls')):
fn=os.path.split(file)[-1]
fn=re.sub('positions.xls','',fn)
smts[file]=retrieve_positions_by_rank(in_file=file,out_file=None,col_name='fuzziness_score',toprank=None,bottomrank=None,decreasing=False,direction_by=[])
dic=batchPositionAroundPoints(smts,outname=os.path.join(outpath,'position_ard_ESS'),flankup=flankup,flankdn=flankdn,step=step,chrColID=1,nameColID=0,posColIDpos=8,posColIDneg=9,straColID=2,sep='\t',second_sep=',',lines=lines)
plot(dic=dic,outname=os.path.join(outpath,'position_ard_ESS'),xlab='distance to ESS',ylab='position count')
smts={}
for file in glob(os.path.join(pth,'*positions.xls')):
fn=os.path.split(file)[-1]
fn=re.sub('positions.xls','',fn)
smts[file]=retrieve_positions_by_rank(in_file=file,out_file=None,col_name='fuzziness_score',toprank=None,bottomrank=None,decreasing=False,direction_by=[])
dic=batchPositionAroundPoints(smts,outname=os.path.join(outpath,'position_ard_ETS'),flankup=flankup,flankdn=flankdn,step=step,chrColID=1,nameColID=0,posColIDpos=9,posColIDneg=8,straColID=2,sep='\t',second_sep=',',lines=lines)
plot(dic=dic,outname=os.path.join(outpath,'position_ard_ETS'),xlab='distance to ETS',ylab='position count')
smts={}
for file in glob(os.path.join(pth,'*positions.xls')):
fn=os.path.split(file)[-1]
fn=re.sub('positions.xls','',fn)
smts[file]=retrieve_positions_by_rank(in_file=file,out_file=None,col_name='fuzziness_score',toprank=None,bottomrank=None,decreasing=False,direction_by=[])
dic=batchPositionAroundPoints(smts,outname=os.path.join(outpath,'position_ard_TSS'),flankup=flankup,flankdn=flankdn,step=step,chrColID=1,nameColID=0,posColIDpos=3,posColIDneg=4,straColID=2,sep='\t',second_sep=',',lines=lines)
plot(dic=dic,outname=os.path.join(outpath,'position_ard_TSS'),xlab='distance to TSS',ylab='position count')
smts={}
for file in glob(os.path.join(pth,'*positions.xls')):
fn=os.path.split(file)[-1]
fn=re.sub('positions.xls','',fn)
smts[file]=retrieve_positions_by_rank(in_file=file,out_file=None,col_name='fuzziness_score',toprank=None,bottomrank=None,decreasing=False,direction_by=[])
dic=batchPositionAroundPoints(smts,outname=os.path.join(outpath,'position_ard_TTS'),flankup=flankup,flankdn=flankdn,step=step,chrColID=1,nameColID=0,posColIDpos=4,posColIDneg=3,straColID=2,sep='\t',second_sep=',',lines=lines)
plot(dic=dic,outname=os.path.join(outpath,'position_ard_TTS'),xlab='distance to TTS',ylab='position count')
'''
smts={}
for file in glob(os.path.join(pth,'*positions.xls')):
fn=os.path.split(file)[-1]
fn=re.sub('positions.xls','',fn)
smts[file]=retrieve_positions_by_rank(in_file=file,out_file=None,col_name='fuzziness_score',toprank=None,bottomrank=None,decreasing=False,direction_by=[])
dic=batchPositionValDistribution(data=smts,outname=os.path.join(outpath,'position_fuzziness_distribution'),min=minfuz,max=maxfuz,step=0.01)
plot(dic=dic,outname=os.path.join(outpath,'position_fuzziness_distribution'),nrow=1,ncol=2,xlab='Fuzziness',ylab='Percentage')
smts={}
for file in glob(os.path.join(pth,'*positions.xls')):
fn=os.path.split(file)[-1]
fn=re.sub('positions.xls','',fn)
smts[file]=retrieve_positions_by_rank(in_file=file,out_file=None,col_name='smt_value',toprank=None,bottomrank=None,decreasing=False,direction_by=[])
dic=batchPositionValDistribution(data=smts,outname=os.path.join(outpath,'position_occupancy_distribution'),min=minocc,max=maxocc,step=3)
plot(dic=dic,outname=os.path.join(outpath,'position_occupancy_distribution'),xlab='occupancy',ylab='Percentage')
smts={}
for file in glob(os.path.join(pth,'*positions.xls')):
fn=os.path.split(file)[-1]
fn=re.sub('positions.xls','',fn)
smts[file]=retrieve_positions_by_rank(in_file=file,out_file=None,col_name='smt_value',toprank=None,bottomrank=None,decreasing=False,direction_by=[])
dic=batchPositionDistanceDistribution(data=smts,outname=os.path.join(outpath,'position_distance_distribution'),min=50,max=400,step=step)
plot(dic=dic,outname=os.path.join(outpath,'position_distance_distribution'),xlab='distance',ylab='Percentage')
'''
wgs=Wigs(pth,step=step)
for wg in wgs.keys():print wg,wgs.get(wg).sum()
tts=batchOccAroundPoints(wgs=wgs,outname=os.path.join(outpath,'occ_ard_random'),flankup=flankup,flankdn=flankdn,chrColID=2,nameColID=1,posColIDpos=5,posColIDneg=4,straColID=3,sep='\t',second_sep=None,lines=lines)
plot(dic=tts,nrow=2,ncol=2,outname=os.path.join(outpath,'occ_ard_random'),xlab='Relative distance to random sites',ylab='Average occupancy')
for wg in wgs.keys():print wg,wgs.get(wg).sum()
tts=batchOccAroundPoints(wgs=wgs,outname=os.path.join(outpath,'occ_ard_TTS'),flankup=flankup,flankdn=flankdn,chrColID=2,nameColID=1,posColIDpos=5,posColIDneg=4,straColID=3,sep='\t',second_sep=None,lines=lines)
plot(dic=tts,nrow=2,ncol=2,outname=os.path.join(outpath,'occ_ard_TTS'),xlab='Relative distance to TTS',ylab='Average occupancy')
tss=batchOccAroundPoints(wgs=wgs,outname=os.path.join(outpath,'occ_ard_TSS'),flankup=flankup,flankdn=flankdn,chrColID=2,nameColID=1,posColIDpos=4,posColIDneg=5,straColID=3,sep='\t',second_sep=None,lines=lines)
plot(dic=tss,nrow=2,ncol=2,outname=os.path.join(outpath,'occ_ard_TSS'),xlab='Relative distance to TSS',ylab='Average occupancy')
ess=batchOccAroundPoints(wgs=wgs,outname=os.path.join(outpath,'occ_ard_ESS'),flankup=flankup,flankdn=flankdn,chrColID=2,nameColID=1,posColIDpos=9,posColIDneg=10,straColID=3,sep='\t',second_sep=',',lines=lines)
plot(dic=ess,nrow=2,ncol=2,outname=os.path.join(outpath,'occ_ard_ESS'),xlab='Relative distance to ESS',ylab='Average occupancy')
ets=batchOccAroundPoints(wgs=wgs,outname=os.path.join(outpath,'occ_ard_ETS'),flankup=flankup,flankdn=flankdn,chrColID=2,nameColID=1,posColIDpos=10,posColIDneg=9,straColID=3,sep='\t',second_sep=',',lines=lines)
plot(dic=ets,nrow=2,ncol=2,outname=os.path.join(outpath,'occ_ard_ETS'),xlab='Relative distance to ETS',ylab='Average occupancy')
psd=batchOccPSD(wgs,outname=os.path.join(outpath,'psd'))
plot(dic=psd,outname=os.path.join(outpath,'psd'),xlab='Periodicity unit length',ylab='Strength')
'''
print 'job done\n'
def randomTSS(genomefile=None, genefile=None):
gd={}
from random import randint
for line in open(genomefile):
col=line.split()
gd[col[0]]=int(col[1])-1
'''
stras=['+','-']
n=0
for cr in gd:
for i in range(gd[col[0]]/10000):
n+=1
start,end,stra=str(randint(0,gd[cr])),str(randint(0,gd[cr])),stras[randint(0,1)]
print '\t'.join([str(n),str(n),cr,stra,start,end,'end'])
'''
fi=open(genefile)
print fi.readline()[:-1]
for line in fi:
col=line[:-1].split('\t')
col[4]=str(randint(0,gd[col[2]]))
col[5]=str(randint(0,gd[col[2]]))
print '\t'.join(col)
def positionDicMinMax(dic,lowPercent=0,highPercent=100):
outmin,outmax,values,dvalues=0,0,[],{}
for name in dic:
for chr in dic[name]:
values+=dic[name][chr].values()
values.sort()
lth=len(values)
outmin,outmax=values[int(lth*lowPercent/100.0)],values[int(lth*highPercent/100.0)-1]
for name in dic:
dvalues[name]=[]
for chr in dic[name]:
#values+=dic[name][chr].values()
for value in dic[name][chr].values():
if value>outmin and value<outmax:dvalues[name].append(value)
return [outmin,outmax,dvalues]
def translocationReads(file,bindic={},binSize=1000000,outReadsFile='out.sam',outmode='a',pdis=3000,step=1,mapq=30,clipSize=3,inter=True,intra=True,allWigFile=None,transWigFile=None,readsCount={}):
#if inter and intra:outReadsFile,transWigFile,allWigFile=os.path.join(name,file[:-3]+'trans.sam'),os.path.join(name,file[:-3]+'trans.wig'),os.path.join(name,file[:-3]+'all.wig')
#elif inter:outReadsFile,transWigFile,allWigFile=os.path.join(name,file[:-3]+'inter.sam'),os.path.join(name,file[:-3]+'inter.wig'),os.path.join(name,file[:-3]+'all.wig')
#elif intra:outReadsFile,transWigFile,allWigFile=os.path.join(name,file[:-3]+'intra.sam'),os.path.join(name,file[:-3]+'intra.wig'),os.path.join(name,file[:-3]+'all.wig')
#else:return []
transwig=Wig(step=step)
fo=open(outReadsFile,outmode)
infile=open(file)#os.popen('samtools view -XS '+file)
line=infile.readline()
hlines=[]
while line[0]=='@':
col=line.split()
if col[0]=='@SQ':
chr,clen=col[1][3:],int(col[2][3:])+step
if not bindic.has_key(chr):bindic[chr]={}
for bin in range(clen/binSize+1):
if not bindic[chr].has_key(bin):bindic[chr][bin]={}
transwig.data[chr]=numpy.array([0.0])
transwig.data[chr].resize(clen/step,refcheck=0)
hlines.append(line)
line=infile.readline()
#rdlth=len(line.split('\t')[9])
infile=open(file)
for i in range(len(hlines)):infile.readline()
#if allWigFile!=None:
allwig=deepcopy(transwig)
nt,ne,na,no,nl,nu=-1,0,0,0,0,0
line1,line2='',''
for line in infile:#.readline()
nt+=1
if nt%2==0:
line1=line
continue
else:line2=line
col1,col2=line1[:-1].split('\t'),line2[:-1].split('\t')
chr1,chr2,mapq1,mapq2,pos1,pos2=col1[2],col2[2],float(col1[4]),float(col2[4]),int(col1[3]),int(col2[3])
if chr1=='*' or chr2=='*':
nu+=2 #un-mappable
continue
unique=True
if mapq1<mapq or mapq2<mapq:nl,unique=nl+2,False #low mapping-quality ->non-unique
tra,ter=False,False
if chr1==chr2:
dis=pos1-pos2
if dis<0:dis=0-dis
if dis>pdis:
tra=True
else:
ter=True
trans=False
if inter and ter:trans=True
elif intra and tra:trans=True
bre=False #will be set to True if clipSize<=0 or any of the reads in the pair has soft clip
if col1[5]=='*' or col2[5]=='*':continue
for line in [line1,line2]:
col=line.split('\t')
rdlth=len(col[9])
t1=re.findall('\d+',col[5])
t2=re.findall('\D+',col[5])
'''
if not unique:#len(t1)!=len(t2):
print line
continue
'''
start=int(col[3])
tbre=False# will set bre to True if tbre become True later.
if clipSize<=0:tbre=True
else:
if t2[0]=='S':
if int(t1[0])>=clipSize:tbre=True
elif t2[-1]=='S':
if int(t1[-1])>=clipSize:tbre=True
if tbre:bre=True#set bre to True if tbre become True.
mlth=0
for i in range(len(t2)):
t1[i]=int(t1[i])
if t2[i]=='M' :mlth+=t1[i]
for i in range(len(t2)):
if t2[i]=='M' :#all matched nucleotide will be counted into wiggle format data.
end=start+t1[i]
allwig.data[col[2]][start/step:end/step]+=1.0/mlth
if unique and tbre and trans:transwig.data[col[2]][start/step:end/step]+=1.0#/rdlth
start=end
elif t2[i]=='D':start=start+t1[i]
elif t2[i]=='N':start=start+t1[i]
if bre and trans:
fo.write(line1+line2)
if ter and bre:ne+=2 #inter-chromosome translocation
elif tra and bre:na+=2 #intra-chromosome translocation
else:no+=2
fo.close()
if allWigFile!=None:allwig.save(allWigFile)
if transWigFile!=None:transwig.save(transWigFile)
print 'all raw reads:',nt+1
print 'unmappable:',nu,nu*100.0/nt,'%'
print 'low map quality (non-unique):',nl,nl*100.0/nt,'%'
if inter:print 'inter-chromosome translocated and unique:',ne,ne*100.0/nt,'%'
if intra:print 'intra-chromosome translocated and unique:',na,na*100.0/nt,'%'
print 'other unique:',no,no*100.0/nt,'%'
print 'All unique:',ne+na+no,(ne+na+no)*100.0/nt,'%'
if not readsCount.has_key('all'):readsCount['all']=nt
else:readsCount['all']+=nt
if not readsCount.has_key('unmappable'):readsCount['unmappable']=nu
else:readsCount['unmappable']+=nu
if not readsCount.has_key('non_unique'):readsCount['non_unique']=nl
else:readsCount['non_unique']+=nl
if not readsCount.has_key('unique_inter'):readsCount['unique_inter']=ne
else:readsCount['unique_inter']+=ne
if not readsCount.has_key('unique_intra'):readsCount['unique_intra']=na
else:readsCount['unique_intra']+=na
if not readsCount.has_key('unique_other'):readsCount['unique_other']=no
else:readsCount['unique_other']+=no
if not readsCount.has_key('unique'):readsCount['unique']=ne+na+no
else:readsCount['unique']+=ne+na+no
if not readsCount.has_key('mappable'):readsCount['mappable']=ne+na+no+nl
else:readsCount['mappable']+=ne+na+no+nl
if not readsCount.has_key('trans'):readsCount['trans']=0
if inter:readsCount['trans']+=ne#readsCount['unique_inter']
if intra:readsCount['trans']+=na#readsCount['unique_intra']
#if allWigFile!=None:return [transwig,allwig,]#trans reads count, all reads coverage, trans reads coverage
return [transwig,allwig]
def translocationLinks(peaks,samFile,linkfile,bindic={},binSize=1000000,wsize=500,wstep=0,fold=0,logP=0):
#fisherTest=r('''function(x,m,k,t){return(phyper(x - 1, m, t-m, k, lower.tail = FALSE,log.p = TRUE)/log(10))}''')
#fisherTest=r('''function(ov,n1,n2,n){return(phyper(ov - 1, n1, n-n1, n2, lower.tail = FALSE,log.p = TRUE)/log(10))}''')
fisherTest=r('''function(ov,n1,n2,n){return(phyper(ov - 1, n1, n-n1, n2, lower.tail = FALSE))}''')
pks,lks=bindic,{}
infile=open(samFile)#os.popen('samtools view -XS '+file)
line=infile.readline()
hlines=[]
while line[0]=='@':
hlines.append(line)
line=infile.readline()
infile=open(samFile)
for i in range(len(hlines)):infile.readline()
pklines=['-\t-\t-\t-\t-']#'\t'.join(title.split('\t')[:3]+['len'])]
id=0
bn={}
bn[0]=0
for cr in peaks:
starts=peaks[cr].keys()
starts.sort()
for start in starts:
end = peaks[cr][start]
if wstep>0:
for nstart in range(start-wsize/2,end-wsize/2,wstep):
if nstart<0:nstart=0
id+=1
lks[id],bn[id]={},0
pklines.append('\t'.join([cr,str(start),str(end),str(nstart),str(nstart+wsize)]))
sBin,eBin=nstart/binSize,(nstart+wsize)/binSize
for bin in range(sBin,eBin+1):
if not pks[cr].has_key(bin):pks[cr][bin]={}
pks[cr][bin][nstart]=[nstart+wsize,id]
else:
if end-start<wsize:
mid=(start+end)/2
nstart,nend=mid-wsize/2,mid+wsize/2
if nstart<0:nstart,nend=0,wsize
else:
nstart,nend=start,end
id+=1
lks[id],bn[id]={},0
pklines.append('\t'.join([cr,str(start),str(end),str(nstart),str(nend)]))
sBin,eBin=nstart/binSize,nend/binSize
for bin in range(sBin,eBin+1):
if not pks[cr].has_key(bin):pks[cr][bin]={}
pks[cr][bin][nstart]=[nend,id]
#print 'peaks count:',id
lks[0]={}
ids=lks.keys()
ids.sort()
tn=0
for line in infile:
tn+=0.5
col=line[:-1].split('\t')
cr1,cr2,pos1,pos2=col[2],col[6],int(col[3]),int(col[7])
if cr2=='=':cr2=cr1
bin1,bin2=pos1/binSize,pos2/binSize
id1,id2=[],[]
for start in pks[cr1][bin1]:
if pos1>start and pos1<pks[cr1][bin1][start][0]:id1.append(pks[cr1][bin1][start][1])
for start in pks[cr2][bin2]:
if pos2>start and pos2<pks[cr2][bin2][start][0]:id2.append(pks[cr2][bin2][start][1])
if len(id2)==0:id2=[0]
if len(id1)==0:id1=[0]
for fid in id1:
bn[fid]+=0.5
for tid in id2:
#if pklines[fid]>pklines[tid]:continue
#elif pklines[fid]==pklines[tid]:adv=0.5#the value to be added
#bn[fid]+=0.5
if fid!=tid:bn[tid]+=0.5
if fid>tid:
temp=tid
tid=fid
fid=temp
if not lks[fid].has_key(tid):lks[fid][tid]=0
lks[fid][tid]+=0.5
#if ln-tn-temp>0.5:
# temp=ln-tn
#if len(id1)>1 or len(id2)>1:print tn,ln,cr1,cr2,pos1,pos2,id1,id2,pklines[fid],pklines[tid]
#tn=int(tn)
#print 'total edges:',ln,ln*2
print 'total edges:',tn,sum(bn.values())#tn may be smaller than the sum of values in bn or lks, due to the fact that there are overlapped bins in bn keys
lkf=open(linkfile,'w')
lkf.write('chrA\tstartA\tendA\twindowAstart\twindowAend\tedgeA\tchrB\tstartB\tendB\twindowBstart\twindowBend\tedgeB\tedgeAB\texpected\tlog10P\n')
ids2=deepcopy(ids)
for fk in ids:
tks=lks[fk].keys()
tks.sort()
for tk in ids2:#lks[fk]:
if lks[fk].has_key(tk):ov=lks[fk][tk]
else:ov=0
pv=str(fisherTest(ov,bn[fk],bn[tk],tn)).split()[-1]
exp=bn[tk]*bn[fk]*1.0/tn
lkf.write('\t'.join( [pklines[fk],str(int(bn[fk])),pklines[tk],str(int(bn[tk])),str(int(ov)),str(exp),str(pv)] )+'\n')
lkf.close()
#print tn
return tn
def mergeLinks(inlinkfile,samFile,outlinkfile,binSize=1000000,wsize=500,wstep=100,fold=100,logP=-100):
pf=open(inlinkfile)
pf.readline()
fdColID,pvColID=9,10
peaks={}
for line in pf:
col=line.split()
if float(col[fdColID])<fold:continue
if float(col[pvColID])>logP:continue
cr,start,end=col[0],int(col[1]),int(col[2])
if not peaks.has_key(cr):peaks[cr]={}#dic[col[0]]
peaks[cr][start]=end
peaks=merge_peaks_by_head_tail_distance(peaks,distance=0)
fo=open(outlinkfile,'w')
for cr in peaks:
for start in peaks[cr]:fo.write(cr+'\t'+str(start)+'\t'+str(peaks[cr][start])+'\n')
fo.close()
translocationLinks(peakFile=outlinkfile,samFile=samFile,linkfile=outlinkfile,binSize=binSize,wsize=0,wstep=0,fold=0,logP=0)
def peaks2bed(pfile,bfile=None,flank=100):
if bfile==None:bfile=pfile[:-3]+'bed'
pf=open(pfile)
bf=open(bfile,'w')
pf.readline()
for line in pf:
col=line.split()
cr,ps=col[0],[int(col[1]),int(col[2])]
for tp in col[3].split(','):ps.append(int(tp))
ps.sort()
bf.write('\t'.join([cr,str(max(ps[0]-flank,0)),str(ps[-1]+flank),'1','1','+'])+'\n')
def transPair(linkFile):
lf=open(linkFile)
lf.readline()
dic={}
for line in lf:
col=line.split()
s1,s2='\t'.join(col[:3]),'\t'.join(col[6:9])
if not dic.has_key(s1):dic[s1]={}
dic[s1][s2]=[float(col[12]),float(col[15])]
'''
for s1 in dic:
for s2 in dic[s1]:
if s1!=s2: print s1+'\t'+s2
'''
return dic
def pksDic(peakFile,dic={}):
dic={}
pf=open(peakFile)
pf.readline()
for line in pf:
col=line.split()
cr,start,end=col[0],int(col[1]),int(col[2])
if not dic.has_key(cr):dic[cr]={}
if not dic[cr].has_key(start):dic[cr][start]=end
else:
if dic[cr][start]<end:dic[cr][start]=end
return dic
if __name__ == "__main__":
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # This allow DANPOS to print each message on screen immediately.
print ''
#wg=Wig('bowtie_20120601.Young.unique.wig')
#lines=open('/lilab/kaifuc/ref_data/genomes/sacCer3/SGD_Genes_sacCer3.2012-04-05.xls').readlines()[:1000]
#occInRegions(wg=wg,chrColID=1,nameColID=0,startColIDpos=3,startColIDneg=4,endColIDpos=4,endColIDneg=3,straColID=2,step=10,sep='\t',second_sep=None,\
# lines=lines,heatmapname=None,avgcurvename=None,flankup=300,flankdn=300,vcal='mean',excludeP=0.01,bin_count=300)
#occAroundPoints(wg=wg,chrColID=1,nameColID=0,posColIDpos=3,posColIDneg=4,straColID=2,step=10,sep='\t',second_sep=None,\
# lines=lines,heatmapname='bh',avgcurvename='bc',flankup=300,flankdn=300,vcal='mean',excludeP=0.01)
#lines=open('ucscKnownGene.select.xls').readlines()
#wname,gname='fetal.mCH','ko4ko-wt4ko.exclude'
#wname,gname='Nuc_diff','ko4ko-wt4ko.exclude'
#lines=open('/mount/weili4/kaifu/Huda/RNA/ucscKnowGene/edgeR/mm9.UCSCgenes.knownGene.'+gname+'.xls').readlines()
#wg=Wig('/mount/weili4/kaifu/Huda/MECP2/clonal1_wig/pooled/nosub/pooled/chip.Fnor.wig')
#wg=Wig('/mount/weili4/kaifu/Huda/MECP2/wig/result/pooled/MECP2.bgsub.Fnor.ajClonal.smooth.wig')
#wg=Wig(wname+'.wig')
#wg=Wig('../wig/wt_pool.Fnor.ajClonal.smooth.wig')
#wg=Wig('../wig/ko4ko-wt4ko.pois_diff.wig')
#occAroundPoints(wg=wg,chrColID=1,nameColID=0,posColIDpos=3,posColIDneg=4,straColID=2,step=10,sep='\t',second_sep=None,\
# lines=lines,heatmapname='heatmap_promoter',avgcurvename='avgcurve_promoter',flankup=500,flankdn=1000,vcal='mean',excludeP=0)
#occInRegions(wg=wg,chrColID=1,nameColID=0,startColIDpos=3,startColIDneg=4,endColIDpos=4,endColIDneg=3,straColID=2,step=1000,sep='\t',second_sep=None,\
# lines=lines,heatmapname=wname+'.heatmap_genebody.'+gname,avgcurvename=wname+'.avgcurve_genebody.'+gname,flankup=100000,flankdn=100000,vcal='mean',excludeP=0,bin_count=100)
#print 'job done!'
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # This allow DANPOS to print each message on screen immediately.
#transPair(linkFile=sys.argv[1])
#translocationReads(file=sys.argv[1],pdis=3000,step=1,mapq=30)
#translocationReads0(file='Young.transloc.sam',outfile='temp.sam',pdis=3000,step=1,mapq=30)
#translocationLinks(peakFile='Old.inter.peaks.xls',samFile='Old.inter.sam',binSize=1000000,readLen=75)
#translocationReads(file='Old.sam',pdis=1000,step=1)
#translocationLinks1(samFile='test.sam',linkfile='test.links.xls',binSize=10000,window=500,step=100)
#translocationLinks2(peakFile='Old.peaks.xls',samFile='Old.transloc.sam',linkfile='out.xls',binSize=10000,wsize=500,wstep=100)
#o=positionSelectorByGreatTSS(positionLines=open(sys.argv[1]).readlines(),geneFile=sys.argv[2],selection='1000:1000:',chrbinsize=None)
#for i in o:print i
#GREATdomain(geneFile=sys.argv[1],bup=-5000,bdn=1000,eup=-1000000,edn=1000000,posStartCol=3,posEndCol=3,negStartCol=4,negEndCol=4,chrbinsize=1000000)
| [
"yrjie0@gmail.com"
] | yrjie0@gmail.com |
78c13825dd9efc3dcdf4483d0bb317a9788cefd6 | 3cd9435ce80ed6388e5c69bfc2d7d61481acbcfe | /problems/programmers/42577-list-of-telephone/agrajak.py | 39b293089e46fd8adbfb1747d54a27422558ce63 | [] | no_license | five-per-week/algorithms | 6eca96c2b87b2b92f0dfbd59850b184bc91bdd56 | 3ed573e89985fea4b1a62c523abf4e230aea3873 | refs/heads/master | 2023-01-04T09:59:51.358233 | 2020-11-05T01:31:01 | 2020-11-05T01:31:01 | 297,027,057 | 5 | 3 | null | 2020-11-05T01:30:32 | 2020-09-20T07:45:24 | JavaScript | UTF-8 | Python | false | false | 423 | py | def solution(phone_book):
root = {}
for num in phone_book:
cursor = root
for idx, n in enumerate(num):
if n not in cursor:
cursor[n] = {}
cursor = cursor[n]
if '*' in cursor:
return False
if cursor != {}:
return False
cursor['*'] = '*'
return True | [
"noreply@github.com"
] | noreply@github.com |
344bd38c621d2e0cf736ba0347abf79b7721cd6e | f1818f6ff8269b39892bb3ba98047db8e0370fe6 | /article/migrations/0002_auto_20190901_1918.py | b96a6e0e0c4d46dc0262bb393ba58236863985d9 | [] | no_license | ozythings/blog | 7d3f2c4f769406433d590fc8832b901b2400aad9 | ad4ecf9c4bb0033223e9055cb808433be71450e9 | refs/heads/master | 2020-07-21T10:14:26.899148 | 2019-09-06T16:25:30 | 2019-09-06T16:25:30 | 206,822,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,331 | py | # Generated by Django 2.2.4 on 2019-09-01 16:18
import ckeditor.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('article', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='article_image',
field=models.FileField(blank=True, null=True, upload_to='', verbose_name='Makaleye Fotoğraf Ekleyin'),
),
migrations.AlterField(
model_name='article',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Yazar'),
),
migrations.AlterField(
model_name='article',
name='content',
field=ckeditor.fields.RichTextField(verbose_name='İçerik'),
),
migrations.AlterField(
model_name='article',
name='created_date',
field=models.DateTimeField(auto_now_add=True, verbose_name='Oluşturulma Tarihi'),
),
migrations.AlterField(
model_name='article',
name='title',
field=models.CharField(max_length=50, verbose_name='Başlık'),
),
]
| [
"oguzhanciyiz@gmail.com"
] | oguzhanciyiz@gmail.com |
eb78a4b2dd125da394029b0e4dc4fd7f4c796455 | 68e99da17343937339df7ff5aadcfe0c017697b5 | /catalog/models.py | e19cf903e1379e9065949c0f9c2f2375c31e88d1 | [] | no_license | piusmons/django_local_library | 2fc9ba77529af536199eb769f8bbe55b2bcacace | c34faeb63a53a90cb6d414daccd60b263cdb46a7 | refs/heads/main | 2023-02-09T08:41:37.142106 | 2021-01-03T22:52:20 | 2021-01-03T22:52:20 | 326,514,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,112 | py | from django.db import models
from django.urls import reverse # Used to generate URLs by reversing the URL patterns
import uuid
from django.contrib.auth.models import User
from datetime import date
# Create your models here.
class Genre(models.Model):
"""model representing a book genre"""
name = models.CharField(max_length=200, help_text='Enter a book genre (e.g. Science Fiction)')
def __str__(self):
"""String for representing the Model object"""
return self.name
class BookInstance(models.Model):
"""Model representing a specific copy of a book (i.e. that can be borrowed from the library)."""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, help_text='Unique ID for this particular book across whole library')
book = models.ForeignKey('Book', on_delete=models.SET_NULL, null=True)
imprint = models.CharField(max_length=200)
due_back = models.DateField(null=True, blank=True)
borrower = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True)
LOAN_STATUS = (
('m', 'Maintenance'),
('o', 'On loan'),
('a', 'Available'),
('r', 'Reserved'),
)
status = models.CharField(
max_length=1,
choices=LOAN_STATUS,
blank=True,
default='m',
help_text='Book availability',
)
class Meta:
ordering = ['due_back']
def __str__(self):
"""String for representing the Model object."""
return f'{self.id} ({self.book.title})'
@property
def is_overdue(self):
if self.due_back and date.today() > self.due_back:
return True
return False
class Author(models.Model):
"""Model representing an author"""
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
date_of_birth = models.DateField(null=True, blank=True)
date_of_death = models.DateField('Died', null=True, blank=True)
class Meta:
ordering = ['last_name', 'first_name']
def get_absolute_url(self):
"""returns the url to access a particular author instance"""
return reverse('author-detail', args=[str(self.id)])
def __str__(self):
"""String for representing the Model object"""
return f'{self.last_name}, {self.first_name}'
class Language(models.Model):
"""Model representing a Language (e.g. English, French, Japanese, etc.)"""
name = models.CharField(max_length=200,
help_text="Enter the book's natural language (e.g. English, French, Japanese etc.)")
def __str__(self):
"""String for representing the Model object (in Admin site etc.)"""
return self.name
class Book(models.Model):
"""Model representing a book (but not a specific copy of a book)."""
title = models.CharField(max_length=200)
# Foreign Key used because book can only have one author, but authors can have multiple books
# Author as a string rather than object because it hasn't been declared yet in the file
author = models.ForeignKey('Author', on_delete=models.SET_NULL, null=True)
summary = models.TextField(max_length=1000, help_text='Enter a brief description of the book')
isbn = models.CharField('ISBN', max_length=13, unique=True,
help_text='13 Character <a href="https://www.isbn-international.org/content/what-isbn">ISBN number</a>')
# ManyToManyField used because genre can contain many books. Books can cover many genres.
# Genre class has already been defined so we can specify the object above.
genre = models.ManyToManyField(Genre, help_text='Select a genre for this book')
def __str__(self):
"""String for representing the Model object."""
return self.title
def get_absolute_url(self):
"""Returns the url to access a detail record for this book."""
return reverse('book-detail', args=[str(self.id)])
def display_genre(self):
"""create a string for the Genre. This is required to display genre in Admin"""
return ', '.join(genre.name for genre in self.genre.all()[:3])
display_genre.short_description = 'Genre' | [
"piuslchua@gmail.com"
] | piuslchua@gmail.com |
93676b1ccd5b8f20a34473032ab5d4db03956a52 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5634947029139456_1/Python/AlonH/2014A1A.py | a02bf27c55c7c31dc651a2d0b8c8393d949d0274 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,292 | py | def match(now,needed):
now.sort()
needed.sort()
for i in range(len(now)):
if now[i] != needed[i]:
return True
return False
def count(ar,l):
ret = [0]*l
for i in range(l):
for s in ar:
ret[i] += int(s[i])
return(ret)
def compare(n,o,l):
ret = [0]*l
for i in range(l):
if n[i] != o[i]:
ret[i] = 1
return tuple(ret)
f = open("A-large.in","r")
o = open("A-large-answers.txt","w")
T = int(f.readline())
for t in range(1,T+1):
inp = [int(a) for a in f.readline().split()]
n = inp[0]
l = inp[1]
lifts = [0]*l
start = [a for a in f.readline().split()]
needed = [a for a in f.readline().split()]
cnow = count(start,l)
cneeded = count(needed,l)
print("case",t,cnow,cneeded,start,needed)
op = set([compare(start[0],n,l) for n in needed])
for i in range(1,n):
op1 = set([compare(start[i],n,l) for n in needed])
op = op&op1
if len(op) == 0:
o.write("Case #"+str(t)+": NOT POSSIBLE"+"\n")
else:
o.write("Case #"+str(t)+": "+str(min([a.count(1) for a in op]))+"\n")
o.close()
#o.write("Case #"+str(t)+": NOT POSSIBLE"+"\n")
#o.write("Case #"+str(t)+": "+str(lifts.count(1))+"\n")
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
2bf0c12af8a63aa130ff5b61c4e8f624b0a23dc3 | 4602c60c5ce05b00355f462e806fdeaac6b578cd | /Classifier_Code/back_import_clean.py | 405b90d775e8201554879d3b055b56fe7b0289c5 | [] | no_license | pranavmrane/News_Headline_Classifier | 8fc57bb5110e7c137789dc02730fd4dad88fb5e9 | 068670e5bd53e92d2408923cbc690f199a489ccb | refs/heads/master | 2020-04-25T14:39:07.793654 | 2019-10-29T21:24:55 | 2019-10-29T21:24:55 | 172,848,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,013 | py | import pandas as pd
import string
import nltk
nltk.download('stopwords')
from nltk.stem.porter import PorterStemmer
from nltk.corpus import stopwords
import os
import sys
import traceback
class ImportAndClean:
__slots__ = ['dataFrame', 'stemmed', 'stopWordList']
def __init__(self, file_address=None, columns_required = None):
"""Get dataset address and open file in Python
Keyword arguments:
file_address -- (string)
columns_required -- the required columns from table --
(list of numbers of columns to be retained)
"""
# If path is not specified, then read from the same folder as .py file
if file_address is None:
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_separator = os.path.sep
file_location = dir_path + dir_separator + "newsCorpora.csv"
else:
file_location = file_address
required_columns = [1, 4]
file_address = file_location
columns_required = required_columns
column_names = ["TITLE", "CATEGORY"]
# Read .csv using pandas
try:
self.dataFrame = pd.read_csv(file_address, sep='\t',
names=column_names,
usecols=columns_required)
except FileNotFoundError:
print("File not found")
except Exception:
print("Exception Found")
traceback.print_exc(file=sys.stdout)
# self.dataFrame = self.dataFrame[0:10000]
self.stemmed = PorterStemmer()
self.stopWordList = stopwords.words('english')
def get_text_column(self):
"""Clean Training and Testing Statements
Return:
stemmed_headlines -- Cleaned Headlines -- (list)
"""
list_to_be_cleaned = []
list_to_be_cleaned = self.dataFrame['TITLE']
filtered_headlines = self.get_cleaned_list(list_to_be_cleaned)
stemmed_headlines = self.get_stemmed_list(filtered_headlines)
return stemmed_headlines
def process_user_input(self, list_to_be_altered):
"""Clean User Inputs
Keyword arguments:
simple_input -- (list of headlines)
Return:
stemmed_headlines -- Cleaned Headlines -- (list)
"""
filtered_headlines = self.get_cleaned_list(list_to_be_altered)
stemmed_headlines = self.get_stemmed_list(filtered_headlines)
return stemmed_headlines
def get_cleaned_list(self, list_to_be_altered):
"""Remove Punctuations, Numbers
Keyword arguments:
list_to_be_altered -- (list of headlines)
Return:
filtered_sentences -- Cleaned Headlines -- (list)
"""
filtered_sentences = []
lower_clean = []
# Convert to Lower Case and Remove Punctuation
for sentence in list_to_be_altered:
lower_clean.append(sentence.lower()\
.replace('[{}]'.format(string.punctuation), ''))
# Tokenize words in a Headline
# Remove StopWords, Long Words, or Words than contain numbers
for sentence in lower_clean:
filtered_sentences.append(" ".join(word for word in sentence.split()
if ((word not in self.stopWordList)
and (word.isalpha())
and (1 < len(word.lower()) < 15))))
# print("Words in Headlines Cleaned")
return filtered_sentences
def get_stemmed_list(self, list_to_be_altered):
"""Stem Words
Keyword arguments:
list_to_be_altered -- (list of headlines)
Return:
filtered_sentences -- Cleaned Headlines -- (list)
"""
stemmed_sentences = []
for sentence in list_to_be_altered:
stemmed_word = ""
for word in sentence.split(' '):
stemmed_word += " " + self.stemmed.stem(word)
# Remove Additional Space Added at Beginning
stemmed_word = stemmed_word[1:]
stemmed_sentences.append(stemmed_word)
# print("Stemming of Headlines Completed")
return stemmed_sentences
def get_label_column(self):
"""Convert characters to numbers for ease of use
Return:
category_numerical -- List of numbers -- (list)
"""
category_numerical = self.dataFrame['CATEGORY']. \
str.replace("b", "0"). \
str.replace("t", "1"). \
str.replace("e", "2"). \
str.replace("m", "3")
category_numerical = category_numerical.tolist()
category_numerical = [int(i) for i in category_numerical]
return category_numerical
def read_headlines_from_file(self, file_location):
"""Read headlines from file, one headline per line
Keyword arguments:
file_location -- (string)
Return:
content -- List of headlines -- (list)
"""
try:
with open(file_location) as f:
content = f.readlines()
content = [x.strip() for x in content]
return content
except FileNotFoundError:
print("File Not Found")
except Exception:
print("Exception Found")
traceback.print_exc(file=sys.stdout)
def make_headline_from_user_usable(self, headline):
"""When user gives single headline wrap in list for ease of use
Keyword arguments:
headline -- (string)
Return:
content -- List of headline -- (list)
"""
list_of_headlines = []
list_of_headlines.append(headline)
return list_of_headlines
def clean_for_prediction(self, input_for_prediction, multiple_files=None):
"""User input can be file location of headlines or individual headline
Keyword arguments:
input_for_prediction -- (string)
multiple_files -- (boolean) - False for individual headline
Return:
combined_lists -- List of 2 Lists -- (original headlines,
stemmed-cleaned headlines)
"""
combined_lists = []
if multiple_files is False:
# print("Handling input for single Headline:")
predictor_input = self. \
make_headline_from_user_usable(input_for_prediction)
else:
# print("Handling input for file containing Headlines:")
predictor_input = self. \
read_headlines_from_file(input_for_prediction)
cleaned_list = self.process_user_input(predictor_input)
combined_lists.append(predictor_input)
combined_lists.append(cleaned_list)
return combined_lists
if __name__ == '__main__':
value = ImportAndClean()
| [
"pmr5279@g.rit.edu"
] | pmr5279@g.rit.edu |
4c6a4945f123306bcdf31d8b8f17939c2b32cc2f | 094304d0aa7cb6949c0f471d1c432dc7db5a4c2a | /VIRSCAN/vir_scan_db.py | 1358ecd8d77cb2b4438e1bd9cfcaacc0392ee70c | [] | no_license | smallmeet/fangzhen | 7f8e232b87841b88268d14133479846e48e33ba1 | d0cbf09eba98c835a4ea013889a8cf0b34263d0d | refs/heads/master | 2020-12-24T22:28:54.972613 | 2016-08-12T09:24:15 | 2016-08-12T09:24:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | from base_db import MysqlClient
from get_conf import GetConf
class VirScanMysqlClient:
def __init__(self, conf):
self.mysql_client = MysqlClient(conf)
def insert_vir_scan(self, args):
self.mysql_client.insert('PRO_VIR_SCAN_INSERT', args)
def insert_apk_black_list(self, args):
self.mysql_client.insert('PRO_APK_BLACK_LIST_INSERT', args)
def update_apk_black_list(self, args):
self.mysql_client.insert('PRO_APK_BLACK_LIST_UPDATE', args)
def select_vir_scan(self, args):
return self.mysql_client.select('PRO_VIR_SCAN_SELECT', args)
def select_apk_black_list_info(self, args):
return self.mysql_client.select('PRO_APK_BLACK_LIST_SELECT', args)
def fetch_apk_black_list_info(self, args):
return self.mysql_client.select('PRO_APK_BLACK_LIST_FETCH', args)
if __name__ == '__main__':
get_conf = GetConf('')
mysql_client = VirScanMysqlClient(get_conf)
# mysql_client.get_app_info()
# mysql_client.insert_data()
| [
"luojianfeng2011@163.com"
] | luojianfeng2011@163.com |
6e9a46b9a6f69fb1024a1da8c825295da764cf97 | 173016e62bcca69fc36b7888c1784ad599756ed5 | /convert-string-to-camel-case.py | 9e54ba6d581cdd09175f6e86c22cbdfd7f3d8dfc | [] | no_license | dogac00/Python-Problems | c9415a1e3f8fbf5418088bfe94564e3e4d6b388e | 057674477cd3c8fc2de550bb512eeb8ab5edba2e | refs/heads/master | 2020-03-18T00:28:39.154471 | 2019-07-11T20:48:29 | 2019-07-11T20:48:29 | 134,096,575 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | # Complete the method/function so that it converts dash/underscore delimited words into camel casing.
# The first word within the output should be capitalized only if the original word was capitalized.
# Examples
# to_camel_case("the-stealth-warrior") # returns "theStealthWarrior"
# to_camel_case("The_Stealth_Warrior") # returns "TheStealthWarrior"
def to_camel_case(text):
output = ''.join(x for x in text.title() if x not in "_-")
return text[0] + output[1:] if text else ''
# Another solution using translate method
def to_camel_case(s):
return s[0] + s.title().translate(None, "-_")[1:] if s else s
| [
"noreply@github.com"
] | noreply@github.com |
4f9cdc759c20a19b123b187ed82e7d01eb37bd48 | 8827574a663cc9d18194eb355dce7ffb676e6d0b | /everest/transit.py | 8958b3c59d794095e0ea42a5548d12f5aa0ef602 | [
"MIT"
] | permissive | mirca/everest | 70a79432f6cd2b604a64fc4c97c7513bbe2a6406 | b96cc5cd1949b81ddc49fb74b90bf5a95c6fca14 | refs/heads/master | 2021-01-13T05:56:00.206244 | 2017-03-17T16:35:49 | 2017-03-17T16:35:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,317 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
:py:mod:`transit.py` - Transit models
-------------------------------------
These are routines used to generate a transit model, primarily for
transit injection/recovery tests. These are wrappers around
:py:func:`pysyzygy.Transit`, with the added feature that
the transit :py:obj:`depth` and the transit :py:obj:`duration` can be specified
as input variables (as opposed to the planet-star radius ratio
and the stellar density, which :py:mod:`pysyzygy` expects).
'''
from __future__ import division, print_function, absolute_import, unicode_literals
import numpy as np
import matplotlib.pyplot as pl
import pysyzygy as ps
from scipy.optimize import fmin
import logging
log = logging.getLogger(__name__)
class TransitModel(object):
'''
'''
def __init__(self, name, sig_RpRs = 0.001, **kwargs):
'''
'''
# The planet/transit model ID
assert type(name) is str, "Arg `name` must be a string."
self.name = name
# The transit model
self._transit = ps.Transit(**kwargs)
# Compute the depth
times = kwargs.get('times', None)
if times is not None:
t0 = times[0]
else:
t0 = kwargs.get('t0', 0.)
self.depth = (1. - self._transit([t0]))[0]
# Approximate variance on the depth
self.var_depth = (2 * sig_RpRs) ** 2
# Save the kwargs
self.params = kwargs
def __call__(self, time):
'''
'''
model = (self._transit(time) - 1) / self.depth
return model
def Get_RpRs(d, **kwargs):
'''
Returns the value of the planet radius over the stellar radius for a given depth :py:obj:`d`, given
the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`.
'''
def Depth(RpRs, **kwargs):
return 1 - ps.Transit(RpRs = RpRs, **kwargs)([kwargs.get('t0', 0.)])
def DiffSq(r):
return 1.e10 * (d - Depth(r, **kwargs)) ** 2
return fmin(DiffSq, [np.sqrt(d)], disp = False)
def Get_rhos(dur, **kwargs):
'''
Returns the value of the stellar density for a given transit duration :py:obj:`dur`, given
the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`.
'''
assert dur >= 0.01 and dur <= 0.5, "Invalid value for the duration."
def Dur(rhos, **kwargs):
t0 = kwargs.get('t0', 0.)
time = np.linspace(t0 - 0.5, t0 + 0.5, 1000)
try:
t = time[np.where(ps.Transit(rhos = rhos, **kwargs)(time) < 1)]
except:
return 0.
return t[-1] - t[0]
def DiffSq(rhos):
return (dur - Dur(rhos, **kwargs)) ** 2
return fmin(DiffSq, [0.2], disp = False)
def Transit(time, t0 = 0., dur = 0.1, per = 3.56789, depth = 0.001, **kwargs):
'''
A `Mandel-Agol <http://adsabs.harvard.edu/abs/2002ApJ...580L.171M>`_ transit model,
but with the depth and the duration as primary input variables.
:param numpy.ndarray time: The time array
:param float t0: The time of first transit in units of :py:obj:`BJD` - 2454833.
:param float dur: The transit duration in days. Don't go too crazy on this one -- very small \
or very large values will break the inverter. Default 0.1
:param float per: The orbital period in days. Default 3.56789
:param float depth: The fractional transit depth. Default 0.001
:param dict kwargs: Any additional keyword arguments, passed directly to :py:func:`everest.pysyzygy.Transit`
:returns tmod: The transit model evaluated at the same times as the :py:obj:`time` array
'''
# Note that rhos can affect RpRs, so we should really do this iteratively,
# but the effect is pretty negligible!
RpRs = Get_RpRs(depth, t0 = t0, per = per, **kwargs)
rhos = Get_rhos(dur, t0 = t0, per = per, **kwargs)
return ps.Transit(t0 = t0, per = per, RpRs = RpRs, rhos = rhos, **kwargs)(time)
class TransitShape(object):
'''
'''
def __init__(self, depth = 1, window = 0.5, **kwargs):
'''
'''
kwargs.pop('t0', None)
kwargs.pop('times', None)
t = np.linspace(-window / 2, window / 2, 5000)
trn = ps.Transit(t0 = 0., **kwargs)
transit_model = trn(t)
transit_model -= 1
transit_model *= depth / (1 - trn([0.])[0])
self.x = t
self.y = transit_model
def __call__(self, time, t0 = 0.):
'''
'''
return np.interp(time, self.x + t0, self.y) | [
"rodluger@gmail.com"
] | rodluger@gmail.com |
8387f13e44b83640b4203424ab42916600e6b426 | 082b62ab33af75154881599d91191aa76ea3d965 | /modelos/modelo_servicios_afiliados.py | de2157a802f265599d017574d949ef1e31aa6f67 | [] | no_license | clau2489/sindicatomunicipalmerlo | 03b55ed0048367c9444a8ca41c4d27e19d656c98 | f019bf40a7055ec3a40caf8f10abecaec3d3d879 | refs/heads/master | 2021-10-09T21:38:25.924117 | 2019-01-03T17:05:15 | 2019-01-03T17:05:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,258 | py | from PyQt5 import QtCore
from libs.db import querier
import cerberus
class ModeloServiciosAfiliados(QtCore.QAbstractTableModel):
__querier = querier.Querier()
__v = cerberus.Validator()
def __init__(self, propiedades = None, parent = None):
super(ModeloServiciosAfiliados, self).__init__()
self.__esquemaServiciosAfiliados = {
'id_servicio' : {'type' : 'integer', 'maxlength' : 8 },
'legajo_afiliado' : {'type' : 'integer', 'maxlength' : 8 },
'fecha' : { 'type': 'date'},
'cantidad' : { 'type': 'integer', 'maxlength' : 20},
'detalle' : { 'type': 'string', 'maxlength' : 80},
}
self.__propiedades = ['fecha', 'nombre', 'cantidad', 'detalle' ]
self.__tablaServicios = []
def asociarServicio(self, servicio):
self.__querier.insertarElemento('servicios_afiliado', servicio)
self.verTablaServicios(servicio['legajo_afiliado'])
return True
def verTablaServicios(self, legajo):
if not legajo:
legajo = "0"
self.__tablaServicios = self.__querier.traerElementos(
tabla = 'servicios_afiliado',
campos = ['fecha', 'nombre', 'detalle', 'cantidad'],
uniones = [('servicios', 'servicios.id = id_servicio')],
condiciones = [('legajo_afiliado', '=', legajo)],
orden = ("fecha", "DESC")
)
self.__tablaServicios = self.__toList()
self._setDates(0)
if self.__tablaServicios:
self.layoutChanged.emit()
return True
return False
def _setDates(self, dateIndex):
for servicio in self.__tablaServicios:
servicio[dateIndex] = QtCore.QDate(servicio[dateIndex])
def __toList(self):
listaServicios = []
for index, debito in enumerate(self.__tablaServicios):
listaServicios.append(list(debito))
return listaServicios
# Estas son las funciones específicas de Qt para las tablas
def rowCount(self, parent):
return len(self.__tablaServicios)
def columnCount(self, parent):
if self.__tablaServicios:
return len(self.__tablaServicios[0])
else:
return 0
def flags(self, index):
return QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled
def data(self, index, role):
# Acá es donde definí de dónde (De qué lista) voy a levantar los datos
if role == QtCore.Qt.DisplayRole:
row = index.row()
column = index.column()
value = self.__tablaServicios[row][column] # value contiene la lista de listas que contiene los afiliados
return value # el valor que retorno es el que aparecería en la tabla
def headerData(self, section, orientation, role):
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
# Los objetos tipo diccionario no ordenan sus elementos, por eso usar dict.keys() me tira los nombres
# de las columnas en cualquier orden. Acá debería usar la lista propiedades.
# además de salir ordenado, se ajusta la cantidad de columnas correspondiente
keys = list(self.__propiedades)
return keys[section]
| [
"info@oestedev.com"
] | info@oestedev.com |
f4f8dd60ea58ec7d4096eb4aea102d0f080017d0 | 9ccf4964ffa09f285cc4ae90e323034c963b11ed | /rfc822.py | 0e635a767f231120237ddd1dcd416e9d2c7f3658 | [] | no_license | anybow/hyt | ff9ce0d837cb2b2dc275620ff3e2bbf7ede2e04b | 06d916ab38797e98f2ed9fba28ada3a988113a15 | refs/heads/master | 2021-01-20T12:16:47.136140 | 2014-08-07T02:39:58 | 2014-08-07T02:39:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,378 | py | """RFC 2822 message manipulation.
Note: This is only a very rough sketch of a full RFC-822 parser; in particular
the tokenizing of addresses does not adhere to all the quoting rules.
Note: RFC 2822 is a long awaited update to RFC 822. This module should
conform to RFC 2822, and is thus mis-named (it's not worth renaming it). Some
effort at RFC 2822 updates have been made, but a thorough audit has not been
performed. Consider any RFC 2822 non-conformance to be a bug.
RFC 2822: http://www.faqs.org/rfcs/rfc2822.html
RFC 822 : http://www.faqs.org/rfcs/rfc822.html (obsolete)
Directions for use:
To create a Message object: first open a file, e.g.:
fp = open(file, 'r')
You can use any other legal way of getting an open file object, e.g. use
sys.stdin or call os.popen(). Then pass the open file object to the Message()
constructor:
m = Message(fp)
This class can work with any input object that supports a readline method. If
the input object has seek and tell capability, the rewindbody method will
work; also illegal lines will be pushed back onto the input stream. If the
input object lacks seek but has an `unread' method that can push back a line
of input, Message will use that to push back illegal lines. Thus this class
can be used to parse messages coming from a buffered stream.
The optional `seekable' argument is provided as a workaround for certain stdio
libraries in which tell() discards buffered data before discovering that the
lseek() system call doesn't work. For maximum portability, you should set the
seekable argument to zero to prevent that initial \code{tell} when passing in
an unseekable object such as a a file object created from a socket object. If
it is 1 on entry -- which it is by default -- the tell() method of the open
file object is called once; if this raises an exception, seekable is reset to
0. For other nonzero values of seekable, this test is not made.
To get the text of a particular header there are several methods:
str = m.getheader(name)
str = m.getrawheader(name)
where name is the name of the header, e.g. 'Subject'. The difference is that
getheader() strips the leading and trailing whitespace, while getrawheader()
doesn't. Both functions retain embedded whitespace (including newlines)
exactly as they are specified in the header, and leave the case of the text
unchanged.
For addresses and address lists there are functions
realname, mailaddress = m.getaddr(name)
list = m.getaddrlist(name)
where the latter returns a list of (realname, mailaddr) tuples.
There is also a method
time = m.getdate(name)
which parses a Date-like field and returns a time-compatible tuple,
i.e. a tuple such as returned by time.localtime() or accepted by
time.mktime().
See the class definition for lower level access methods.
There are also some utility functions here.
"""
# Cleanup and extensions by Eric S. Raymond <esr@thyrsus.com>
import time
"""
from warnings import warnpy3k
warnpy3k("in 3.x, rfc822 has been removed in favor of the email package",
stacklevel=2)
"""
__all__ = ["Message","AddressList","parsedate","parsedate_tz","mktime_tz"]
_blanklines = ('\r\n', '\n') # Optimization for islast()
class Message:
"""Represents a single RFC 2822-compliant message."""
def __init__(self, fp, seekable = 1):
"""Initialize the class instance and read the headers."""
if seekable == 1:
# Exercise tell() to make sure it works
# (and then assume seek() works, too)
try:
fp.tell()
except (AttributeError, IOError):
seekable = 0
self.fp = fp
self.seekable = seekable
self.startofheaders = None
self.startofbody = None
#
if self.seekable:
try:
self.startofheaders = self.fp.tell()
except IOError:
self.seekable = 0
#
self.readheaders()
#
if self.seekable:
try:
self.startofbody = self.fp.tell()
except IOError:
self.seekable = 0
def rewindbody(self):
"""Rewind the file to the start of the body (if seekable)."""
if not self.seekable:
raise IOError("unseekable file")
self.fp.seek(self.startofbody)
def readheaders(self):
"""Read header lines.
Read header lines up to the entirely blank line that terminates them.
The (normally blank) line that ends the headers is skipped, but not
included in the returned list. If a non-header line ends the headers,
(which is an error), an attempt is made to backspace over it; it is
never included in the returned list.
The variable self.status is set to the empty string if all went well,
otherwise it is an error message. The variable self.headers is a
completely uninterpreted list of lines contained in the header (so
printing them will reproduce the header exactly as it appears in the
file).
"""
self.dict = {}
self.unixfrom = ''
self.headers = lst = []
self.status = ''
headerseen = ""
firstline = 1
startofline = unread = tell = None
if hasattr(self.fp, 'unread'):
unread = self.fp.unread
elif self.seekable:
tell = self.fp.tell
while 1:
if tell:
try:
startofline = tell()
except IOError:
startofline = tell = None
self.seekable = 0
line = self.fp.readline()
if not line:
self.status = 'EOF in headers'
break
# Skip unix From name time lines
if firstline and line.startswith('From '):
self.unixfrom = self.unixfrom + line
continue
firstline = 0
if headerseen and line[0] in ' \t':
# It's a continuation line.
lst.append(line)
x = (self.dict[headerseen] + "\n " + line.strip())
self.dict[headerseen] = x.strip()
continue
elif self.iscomment(line):
# It's a comment. Ignore it.
continue
elif self.islast(line):
# Note! No pushback here! The delimiter line gets eaten.
break
headerseen = self.isheader(line)
if headerseen:
# It's a legal header line, save it.
lst.append(line)
self.dict[headerseen] = line[len(headerseen)+1:].strip()
continue
else:
# It's not a header line; throw it back and stop here.
if not self.dict:
self.status = 'No headers'
else:
self.status = 'Non-header line where header expected'
# Try to undo the read.
if unread:
unread(line)
elif tell:
self.fp.seek(startofline)
else:
self.status = self.status + '; bad seek'
break
def isheader(self, line):
"""Determine whether a given line is a legal header.
This method should return the header name, suitably canonicalized.
You may override this method in order to use Message parsing on tagged
data in RFC 2822-like formats with special header formats.
"""
i = line.find(':')
if i > 0:
return line[:i].lower()
return None
def islast(self, line):
"""Determine whether a line is a legal end of RFC 2822 headers.
You may override this method if your application wants to bend the
rules, e.g. to strip trailing whitespace, or to recognize MH template
separators ('--------'). For convenience (e.g. for code reading from
sockets) a line consisting of \r\n also matches.
"""
return line in _blanklines
def iscomment(self, line):
"""Determine whether a line should be skipped entirely.
You may override this method in order to use Message parsing on tagged
data in RFC 2822-like formats that support embedded comments or
free-text data.
"""
return False
def getallmatchingheaders(self, name):
"""Find all header lines matching a given header name.
Look through the list of headers and find all lines matching a given
header name (and their continuation lines). A list of the lines is
returned, without interpretation. If the header does not occur, an
empty list is returned. If the header occurs multiple times, all
occurrences are returned. Case is not important in the header name.
"""
name = name.lower() + ':'
n = len(name)
lst = []
hit = 0
for line in self.headers:
if line[:n].lower() == name:
hit = 1
elif not line[:1].isspace():
hit = 0
if hit:
lst.append(line)
return lst
def getfirstmatchingheader(self, name):
"""Get the first header line matching name.
This is similar to getallmatchingheaders, but it returns only the
first matching header (and its continuation lines).
"""
name = name.lower() + ':'
n = len(name)
lst = []
hit = 0
for line in self.headers:
if hit:
if not line[:1].isspace():
break
elif line[:n].lower() == name:
hit = 1
if hit:
lst.append(line)
return lst
def getrawheader(self, name):
"""A higher-level interface to getfirstmatchingheader().
Return a string containing the literal text of the header but with the
keyword stripped. All leading, trailing and embedded whitespace is
kept in the string, however. Return None if the header does not
occur.
"""
lst = self.getfirstmatchingheader(name)
if not lst:
return None
lst[0] = lst[0][len(name) + 1:]
return ''.join(lst)
def getheader(self, name, default=None):
"""Get the header value for a name.
This is the normal interface: it returns a stripped version of the
header value for a given header name, or None if it doesn't exist.
This uses the dictionary version which finds the *last* such header.
"""
return self.dict.get(name.lower(), default)
get = getheader
def getheaders(self, name):
"""Get all values for a header.
This returns a list of values for headers given more than once; each
value in the result list is stripped in the same way as the result of
getheader(). If the header is not given, return an empty list.
"""
result = []
current = ''
have_header = 0
for s in self.getallmatchingheaders(name):
if s[0].isspace():
if current:
current = "%s\n %s" % (current, s.strip())
else:
current = s.strip()
else:
if have_header:
result.append(current)
current = s[s.find(":") + 1:].strip()
have_header = 1
if have_header:
result.append(current)
return result
def getaddr(self, name):
"""Get a single address from a header, as a tuple.
An example return value:
('Guido van Rossum', 'guido@cwi.nl')
"""
# New, by Ben Escoto
alist = self.getaddrlist(name)
if alist:
return alist[0]
else:
return (None, None)
def getaddrlist(self, name):
"""Get a list of addresses from a header.
Retrieves a list of addresses from a header, where each address is a
tuple as returned by getaddr(). Scans all named headers, so it works
properly with multiple To: or Cc: headers for example.
"""
raw = []
for h in self.getallmatchingheaders(name):
if h[0] in ' \t':
raw.append(h)
else:
if raw:
raw.append(', ')
i = h.find(':')
if i > 0:
addr = h[i+1:]
raw.append(addr)
alladdrs = ''.join(raw)
a = AddressList(alladdrs)
return a.addresslist
def getdate(self, name):
"""Retrieve a date field from a header.
Retrieves a date field from the named header, returning a tuple
compatible with time.mktime().
"""
try:
data = self[name]
except KeyError:
return None
return parsedate(data)
def getdate_tz(self, name):
"""Retrieve a date field from a header as a 10-tuple.
The first 9 elements make up a tuple compatible with time.mktime(),
and the 10th is the offset of the poster's time zone from GMT/UTC.
"""
try:
data = self[name]
except KeyError:
return None
return parsedate_tz(data)
# Access as a dictionary (only finds *last* header of each type):
def __len__(self):
"""Get the number of headers in a message."""
return len(self.dict)
def __getitem__(self, name):
"""Get a specific header, as from a dictionary."""
return self.dict[name.lower()]
def __setitem__(self, name, value):
"""Set the value of a header.
Note: This is not a perfect inversion of __getitem__, because any
changed headers get stuck at the end of the raw-headers list rather
than where the altered header was.
"""
del self[name] # Won't fail if it doesn't exist
self.dict[name.lower()] = value
text = name + ": " + value
for line in text.split("\n"):
self.headers.append(line + "\n")
def __delitem__(self, name):
"""Delete all occurrences of a specific header, if it is present."""
name = name.lower()
if not name in self.dict:
return
del self.dict[name]
name = name + ':'
n = len(name)
lst = []
hit = 0
for i in range(len(self.headers)):
line = self.headers[i]
if line[:n].lower() == name:
hit = 1
elif not line[:1].isspace():
hit = 0
if hit:
lst.append(i)
for i in reversed(lst):
del self.headers[i]
def setdefault(self, name, default=""):
lowername = name.lower()
if lowername in self.dict:
return self.dict[lowername]
else:
text = name + ": " + default
for line in text.split("\n"):
self.headers.append(line + "\n")
self.dict[lowername] = default
return default
def has_key(self, name):
"""Determine whether a message contains the named header."""
return name.lower() in self.dict
def __contains__(self, name):
"""Determine whether a message contains the named header."""
return name.lower() in self.dict
def __iter__(self):
return iter(self.dict)
def keys(self):
"""Get all of a message's header field names."""
return list(self.dict.keys())
def values(self):
"""Get all of a message's header field values."""
return list(self.dict.values())
def items(self):
"""Get all of a message's headers.
Returns a list of name, value tuples.
"""
return list(self.dict.items())
def __str__(self):
return ''.join(self.headers)
# Utility functions
# -----------------
# XXX Should fix unquote() and quote() to be really conformant.
# XXX The inverses of the parse functions may also be useful.
def unquote(s):
"""Remove quotes from a string."""
if len(s) > 1:
if s.startswith('"') and s.endswith('"'):
return s[1:-1].replace('\\\\', '\\').replace('\\"', '"')
if s.startswith('<') and s.endswith('>'):
return s[1:-1]
return s
def quote(s):
"""Add quotes around a string."""
return s.replace('\\', '\\\\').replace('"', '\\"')
def parseaddr(address):
"""Parse an address into a (realname, mailaddr) tuple."""
a = AddressList(address)
lst = a.addresslist
if not lst:
return (None, None)
return lst[0]
class AddrlistClass:
"""Address parser class by Ben Escoto.
To understand what this class does, it helps to have a copy of
RFC 2822 in front of you.
http://www.faqs.org/rfcs/rfc2822.html
Note: this class interface is deprecated and may be removed in the future.
Use rfc822.AddressList instead.
"""
def __init__(self, field):
"""Initialize a new instance.
`field' is an unparsed address header field, containing one or more
addresses.
"""
self.specials = '()<>@,:;.\"[]'
self.pos = 0
self.LWS = ' \t'
self.CR = '\r\n'
self.atomends = self.specials + self.LWS + self.CR
# Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
# is obsolete syntax. RFC 2822 requires that we recognize obsolete
# syntax, so allow dots in phrases.
self.phraseends = self.atomends.replace('.', '')
self.field = field
self.commentlist = []
def gotonext(self):
"""Parse up to the start of the next address."""
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS + '\n\r':
self.pos = self.pos + 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
else: break
def getaddrlist(self):
"""Parse all addresses.
Returns a list containing all of the addresses.
"""
result = []
ad = self.getaddress()
while ad:
result += ad
ad = self.getaddress()
return result
def getaddress(self):
"""Parse the next address."""
self.commentlist = []
self.gotonext()
oldpos = self.pos
oldcl = self.commentlist
plist = self.getphraselist()
self.gotonext()
returnlist = []
if self.pos >= len(self.field):
# Bad email address technically, no domain.
if plist:
returnlist = [(' '.join(self.commentlist), plist[0])]
elif self.field[self.pos] in '.@':
# email address is just an addrspec
# this isn't very efficient since we start over
self.pos = oldpos
self.commentlist = oldcl
addrspec = self.getaddrspec()
returnlist = [(' '.join(self.commentlist), addrspec)]
elif self.field[self.pos] == ':':
# address is a group
returnlist = []
fieldlen = len(self.field)
self.pos += 1
while self.pos < len(self.field):
self.gotonext()
if self.pos < fieldlen and self.field[self.pos] == ';':
self.pos += 1
break
returnlist = returnlist + self.getaddress()
elif self.field[self.pos] == '<':
# Address is a phrase then a route addr
routeaddr = self.getrouteaddr()
if self.commentlist:
returnlist = [(' '.join(plist) + ' (' + \
' '.join(self.commentlist) + ')', routeaddr)]
else: returnlist = [(' '.join(plist), routeaddr)]
else:
if plist:
returnlist = [(' '.join(self.commentlist), plist[0])]
elif self.field[self.pos] in self.specials:
self.pos += 1
self.gotonext()
if self.pos < len(self.field) and self.field[self.pos] == ',':
self.pos += 1
return returnlist
def getrouteaddr(self):
"""Parse a route address (Return-path value).
This method just skips all the route stuff and returns the addrspec.
"""
if self.field[self.pos] != '<':
return
expectroute = 0
self.pos += 1
self.gotonext()
adlist = ""
while self.pos < len(self.field):
if expectroute:
self.getdomain()
expectroute = 0
elif self.field[self.pos] == '>':
self.pos += 1
break
elif self.field[self.pos] == '@':
self.pos += 1
expectroute = 1
elif self.field[self.pos] == ':':
self.pos += 1
else:
adlist = self.getaddrspec()
self.pos += 1
break
self.gotonext()
return adlist
def getaddrspec(self):
"""Parse an RFC 2822 addr-spec."""
aslist = []
self.gotonext()
while self.pos < len(self.field):
if self.field[self.pos] == '.':
aslist.append('.')
self.pos += 1
elif self.field[self.pos] == '"':
aslist.append('"%s"' % self.getquote())
elif self.field[self.pos] in self.atomends:
break
else: aslist.append(self.getatom())
self.gotonext()
if self.pos >= len(self.field) or self.field[self.pos] != '@':
return ''.join(aslist)
aslist.append('@')
self.pos += 1
self.gotonext()
return ''.join(aslist) + self.getdomain()
def getdomain(self):
"""Get the complete domain name from an address."""
sdlist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] == '[':
sdlist.append(self.getdomainliteral())
elif self.field[self.pos] == '.':
self.pos += 1
sdlist.append('.')
elif self.field[self.pos] in self.atomends:
break
else: sdlist.append(self.getatom())
return ''.join(sdlist)
def getdelimited(self, beginchar, endchars, allowcomments = 1):
"""Parse a header fragment delimited by special characters.
`beginchar' is the start character for the fragment. If self is not
looking at an instance of `beginchar' then getdelimited returns the
empty string.
`endchars' is a sequence of allowable end-delimiting characters.
Parsing stops when one of these is encountered.
If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
within the parsed fragment.
"""
if self.field[self.pos] != beginchar:
return ''
slist = ['']
quote = 0
self.pos += 1
while self.pos < len(self.field):
if quote == 1:
slist.append(self.field[self.pos])
quote = 0
elif self.field[self.pos] in endchars:
self.pos += 1
break
elif allowcomments and self.field[self.pos] == '(':
slist.append(self.getcomment())
continue # have already advanced pos from getcomment
elif self.field[self.pos] == '\\':
quote = 1
else:
slist.append(self.field[self.pos])
self.pos += 1
return ''.join(slist)
def getquote(self):
"""Get a quote-delimited fragment from self's field."""
return self.getdelimited('"', '"\r', 0)
def getcomment(self):
"""Get a parenthesis-delimited fragment from self's field."""
return self.getdelimited('(', ')\r', 1)
def getdomainliteral(self):
"""Parse an RFC 2822 domain-literal."""
return '[%s]' % self.getdelimited('[', ']\r', 0)
def getatom(self, atomends=None):
"""Parse an RFC 2822 atom.
Optional atomends specifies a different set of end token delimiters
(the default is to use self.atomends). This is used e.g. in
getphraselist() since phrase endings must not include the `.' (which
is legal in phrases)."""
atomlist = ['']
if atomends is None:
atomends = self.atomends
while self.pos < len(self.field):
if self.field[self.pos] in atomends:
break
else: atomlist.append(self.field[self.pos])
self.pos += 1
return ''.join(atomlist)
def getphraselist(self):
"""Parse a sequence of RFC 2822 phrases.
A phrase is a sequence of words, which are in turn either RFC 2822
atoms or quoted-strings. Phrases are canonicalized by squeezing all
runs of continuous whitespace into one space.
"""
plist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos += 1
elif self.field[self.pos] == '"':
plist.append(self.getquote())
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] in self.phraseends:
break
else:
plist.append(self.getatom(self.phraseends))
return plist
class AddressList(AddrlistClass):
"""An AddressList encapsulates a list of parsed RFC 2822 addresses."""
def __init__(self, field):
AddrlistClass.__init__(self, field)
if field:
self.addresslist = self.getaddrlist()
else:
self.addresslist = []
def __len__(self):
return len(self.addresslist)
def __str__(self):
return ", ".join(map(dump_address_pair, self.addresslist))
def __add__(self, other):
# Set union
newaddr = AddressList(None)
newaddr.addresslist = self.addresslist[:]
for x in other.addresslist:
if not x in self.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __iadd__(self, other):
# Set union, in-place
for x in other.addresslist:
if not x in self.addresslist:
self.addresslist.append(x)
return self
def __sub__(self, other):
# Set difference
newaddr = AddressList(None)
for x in self.addresslist:
if not x in other.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __isub__(self, other):
# Set difference, in-place
for x in other.addresslist:
if x in self.addresslist:
self.addresslist.remove(x)
return self
def __getitem__(self, index):
# Make indexing, slices, and 'in' work
return self.addresslist[index]
def dump_address_pair(pair):
"""Dump a (name, address) pair in a canonicalized form."""
if pair[0]:
return '"' + pair[0] + '" <' + pair[1] + '>'
else:
return pair[1]
# Parse a date field
_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
'aug', 'sep', 'oct', 'nov', 'dec',
'january', 'february', 'march', 'april', 'may', 'june', 'july',
'august', 'september', 'october', 'november', 'december']
_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
# The timezone table does not include the military time zones defined
# in RFC822, other than Z. According to RFC1123, the description in
# RFC822 gets the signs wrong, so we can't rely on any such time
# zones. RFC1123 recommends that numeric timezone indicators be used
# instead of timezone names.
_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
'AST': -400, 'ADT': -300, # Atlantic (used in Canada)
'EST': -500, 'EDT': -400, # Eastern
'CST': -600, 'CDT': -500, # Central
'MST': -700, 'MDT': -600, # Mountain
'PST': -800, 'PDT': -700 # Pacific
}
def parsedate_tz(data):
"""Convert a date string to a time tuple.
Accounts for military timezones.
"""
if not data:
return None
data = data.split()
if data[0][-1] in (',', '.') or data[0].lower() in _daynames:
# There's a dayname here. Skip it
del data[0]
else:
# no space after the "weekday,"?
i = data[0].rfind(',')
if i >= 0:
data[0] = data[0][i+1:]
if len(data) == 3: # RFC 850 date, deprecated
stuff = data[0].split('-')
if len(stuff) == 3:
data = stuff + data[1:]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('') # Dummy tz
if len(data) < 5:
return None
data = data[:5]
[dd, mm, yy, tm, tz] = data
mm = mm.lower()
if not mm in _monthnames:
dd, mm = mm, dd.lower()
if not mm in _monthnames:
return None
mm = _monthnames.index(mm)+1
if mm > 12: mm = mm - 12
if dd[-1] == ',':
dd = dd[:-1]
i = yy.find(':')
if i > 0:
yy, tm = tm, yy
if yy[-1] == ',':
yy = yy[:-1]
if not yy[0].isdigit():
yy, tz = tz, yy
if tm[-1] == ',':
tm = tm[:-1]
tm = tm.split(':')
if len(tm) == 2:
[thh, tmm] = tm
tss = '0'
elif len(tm) == 3:
[thh, tmm, tss] = tm
else:
return None
try:
yy = int(yy)
dd = int(dd)
thh = int(thh)
tmm = int(tmm)
tss = int(tss)
except ValueError:
return None
tzoffset = None
tz = tz.upper()
if tz in _timezones:
tzoffset = _timezones[tz]
else:
try:
tzoffset = int(tz)
except ValueError:
pass
# Convert a timezone offset into seconds ; -0500 -> -18000
if tzoffset:
if tzoffset < 0:
tzsign = -1
tzoffset = -tzoffset
else:
tzsign = 1
tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
return (yy, mm, dd, thh, tmm, tss, 0, 1, 0, tzoffset)
def parsedate(data):
"""Convert a time string to a time tuple."""
t = parsedate_tz(data)
if t is None:
return t
return t[:9]
def mktime_tz(data):
"""Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp."""
if data[9] is None:
# No zone info, so localtime is better assumption than GMT
return time.mktime(data[:8] + (-1,))
else:
t = time.mktime(data[:8] + (0,))
return t - data[9] - time.timezone
def formatdate(timeval=None):
"""Returns time format preferred for Internet standards.
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
According to RFC 1123, day and month names must always be in
English. If not for that, this code could use strftime(). It
can't because strftime() honors the locale and could generated
non-English names.
"""
if timeval is None:
timeval = time.time()
timeval = time.gmtime(timeval)
return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (
("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")[timeval[6]],
timeval[2],
("Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec")[timeval[1]-1],
timeval[0], timeval[3], timeval[4], timeval[5])
# When used as script, run a small test program.
# The first command line argument must be a filename containing one
# message in RFC-822 format.
if __name__ == '__main__':
import sys, os
file = os.path.join(os.environ['HOME'], 'Mail/inbox/1')
if sys.argv[1:]: file = sys.argv[1]
f = open(file, 'r')
m = Message(f)
print('From:', m.getaddr('from'))
print('To:', m.getaddrlist('to'))
print('Subject:', m.getheader('subject'))
print('Date:', m.getheader('date'))
date = m.getdate_tz('date')
tz = date[-1]
date = time.localtime(mktime_tz(date))
if date:
print('ParsedDate:', time.asctime(date), end=' ')
hhmmss = tz
hhmm, ss = divmod(hhmmss, 60)
hh, mm = divmod(hhmm, 60)
print("%+03d%02d" % (hh, mm), end=' ')
if ss: print(".%02d" % ss, end=' ')
print()
else:
print('ParsedDate:', None)
m.rewindbody()
n = 0
while f.readline():
n += 1
print('Lines:', n)
print('-'*70)
print('len =', len(m))
if 'Date' in m: print('Date =', m['Date'])
if 'X-Nonsense' in m: pass
print('keys =', list(m.keys()))
print('values =', list(m.values()))
print('items =', list(m.items()))
| [
"digtip@gmail.com"
] | digtip@gmail.com |
544856a2128a2663e2458371b221be88ec516551 | c5b817222506a283bce75d3e8b4fa5482385710b | /python/edu.metrostate.ICS460/Assignment_7/ProxyServer.py | 1fc886a190e50022c2a83206f5875fd3fb2bf5d1 | [] | no_license | DavineChi/_Fall2019 | 38c646da9b0ed49be4ed5423bf232428a314658a | 3f9835f0dd1830cce7cd6fbc5a11421b3fbd1110 | refs/heads/master | 2020-06-26T16:43:32.012994 | 2019-12-05T18:53:49 | 2019-12-05T18:53:49 | 199,689,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,709 | py | from socket import *
import sys
if len(sys.argv) <= 1:
print('Usage : "python ProxyServer.py server_ip"\n[server_ip] : The IP Address Of Proxy Server')
sys.exit(2)
# Create a server socket, bind it to a port and start listening.
tcpServerSocket = socket(AF_INET, SOCK_STREAM)
# Fill in start.
serverPort = 7001
tcpServerSocket.bind((sys.argv[1], serverPort))
tcpServerSocket.listen(1)
# Fill in end.
while 1:
# Start receiving data from the client.
print('Ready to serve...')
tcpClientSocket, addr = tcpServerSocket.accept()
print('Received a connection from:', addr)
# Fill in start.
message = tcpClientSocket.recv(1024)
# Fill in end.
print(message)
# Extract the filename from the given message.
print(message.split()[1])
filename = message.split()[1].partition("/")[2] + ".cache"
print(filename)
fileExist = False
fileToUse = filename
print("File to use: " + fileToUse)
try:
# Check whether the file exist in the cache.
file = open(fileToUse[1:], "r")
outputdata = file.readlines()
fileExist = True
# ProxyServer finds a cache hit and generates a response message.
tcpClientSocket.send("HTTP/1.0 200 OK\r\n")
tcpClientSocket.send("Content-Type:text/html\r\n")
# Fill in start.
for line in range(0, len(outputdata)):
tcpClientSocket.send(outputdata[line])
# Fill in end.
print('Read from cache.')
# Error handling for file not found in cache.
except IOError:
if fileExist == False:
# Create a socket on the proxyserver.
# Fill in start.
proxySocket = socket(AF_INET, SOCK_STREAM)
# Fill in end.
hostnTemp = filename.replace("www.", "", 1)
hostn = hostnTemp.replace(".cache", "" ,1)
print("Hostname: " + hostn)
try:
# Connect to the socket on port 80.
# Fill in start.
proxySocket.connect((hostn, 80))
# Fill in end.
# Create a temporary file on this socket and ask port 80 for the file requested by the client.
fileObject = proxySocket.makefile('r', 0)
fileObject.write("GET " + "http://www." + hostn + "/ HTTP/1.0\n\n")
# Read the response into buffer.
# Fill in start.
buffer = fileObject.readlines()
# Fill in end.
# Create a new file in the cache for the requested file.
# Also send the response in the buffer to client socket and the corresponding file in the cache.
tempFile = open("./" + filename, "wb")
# Fill in start.
for line in buffer:
tempFile.write(line)
tcpClientSocket.send(line)
proxySocket.close()
# Fill in end.
except:
print("Illegal request.")
else:
# HTTP response message for file not found.
# Fill in start.
tcpClientSocket.send("HTTP/1.0 404 Not Found\r\n")
tcpClientSocket.send("Content-Type: text/html\r\n")
# Fill in end.
# Close the client socket.
tcpClientSocket.close()
# Fill in start.
# Close the server socket.
tcpServerSocket.close()
# Fill in end. | [
"davine_chi@hotmail.com"
] | davine_chi@hotmail.com |
656773742fd28b2e301e5bcb2d716d2c45b39d52 | 05d6e25937f66255cd098771cb0dde36088b9db0 | /makeFeynmanDiagrams | 84fdeae657ee393e081820cbbca0feb4a57a2a48 | [] | no_license | scphall/feynman | 52f761b7a527e327ddd048309ae660f9dd809582 | f8008c8ac99e2487eaf0b75664dcbac1447cc05a | refs/heads/master | 2016-09-05T15:01:55.075675 | 2015-01-20T11:50:17 | 2015-01-20T11:50:17 | 29,527,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | #!/usr/bin/python
from os import system
fd = 'feynman_diagrams'
print 'Making diagrams'
system('make')
cmds = [
'mv {}_1.pdf feynman_dsphi_sm.pdf',
'mv {}_2.pdf feynman_dsphi_susy.pdf',
'mv {}_3.pdf feynman_hhh_kpipimumu.pdf',
'mv {}_4.pdf feynman_hhh_phikmumu.pdf',
'mv {}_5.pdf feynman_theory_penguin.pdf',
'mv {}_6.pdf feynman_theory_box.pdf',
'mv {}_7.pdf feynman_inf.pdf',
'mv {}_8.pdf diagram_ut.pdf',
'mv {}_9.pdf diagram_intro.pdf',
]
cmds = [x.format(fd) for x in cmds]
print '\n Rename files'
print ' ============'
for cmd in cmds:
print cmd
system(cmd)
| [
"samcphall@gmail.com"
] | samcphall@gmail.com | |
1ef6c6b6d66bfbf092460d8bb0caa237ffd3ca09 | fcf6933261c478786f92b30ddbf3633ecc00be78 | /conversion.py | c29ee324b65abb8342ccda74daa4a2ddbf6b1168 | [] | no_license | Chamb3r73/tkintwer | 7de0f3ade5f2d5d683a5d86214134b6d252b812c | 5f079573e50eedb79080e09e96af72f194e743b3 | refs/heads/master | 2023-04-27T04:55:46.781958 | 2021-05-14T19:57:02 | 2021-05-14T19:57:02 | 367,470,665 | 0 | 0 | null | 2021-05-14T20:17:39 | 2021-05-14T20:17:38 | null | UTF-8 | Python | false | false | 12,961 | py | import tkinter
from tkinter import ttk
from tkinter import *
root = tkinter.Tk()
root.title('Conversion')
root.configure(bg = '#000000')
weightans = 'Please enter an answer'
def weightcalculate():
weightask = float(weightentry1.get())
weightfrom = weighttkvar.get()
weightto = weight2tkvar.get()
if weightfrom == 'Grams':
if weightto == 'Grams':
weightans = weightask
if weightto == 'Kilograms':
weightans = weightask/1000
if weightto == 'Tonnes':
weightans = weightask/1000000
if weightto == 'Ounces':
weightans = weightask/28.35
if weightto == 'Pound':
weightans = weightask/454
if weightto == 'Stone':
weightans = weightask/6350
if weightfrom == 'Kilograms':
if weightto == 'Grams':
weightans = weightask*1000
if weightto == 'Kilograms':
weightans = weightask
if weightto == 'Tonnes':
weightans = weightask/1000
if weightto == 'Ounces':
weightans = weightask*35.274
if weightto == 'Pound':
weightans = weightask*2.205
if weightto == 'Stone':
weightans = weightask/6.35
if weightfrom == 'Tonnes':
if weightto == 'Grams':
weightans = weightask*1000000
if weightto == 'Kilograms':
weightans = weightask*1000
if weightto == 'Tonnes':
weightans = weightask
if weightto == 'Ounces':
weightans = weightask*35274
if weightto == 'Pound':
weightans = weightask*2205
if weightto == 'Stone':
weightans = weightask*157
if weightfrom == 'Ounces':
if weightto == 'Grams':
weightans = weightask*28.35
if weightto == 'Kilograms':
weightans = weightask/35.274
if weightto == 'Tonnes':
weightans = weightask/35274
if weightto == 'Ounces':
weightans = weightask
if weightto == 'Pound':
weightans = weightask/16
if weightto == 'Stone':
weightans = weightask/224
if weightfrom == 'Pounds':
if weightto == 'Grams':
weightans = weightask*454
if weightto == 'Kilograms':
weightans = weightask/2.205
if weightto == 'Tonnes':
weightans = weightask/2205
if weightto == 'Ounces':
weightans = weightask*16
if weightto == 'Pound':
weightans = weightask
if weightto == 'Stone':
weightans = weightask*14
if weightfrom == 'Stone':
if weightto == 'Grams':
weightans = weightask*6350
if weightto == 'Kilograms':
weightans = weightask/6.35
if weightto == 'Tonnes':
weightans = weightask/157
if weightto == 'Ounces':
weightans = weightask*224
if weightto == 'Pound':
weightans = weightask*14
if weightto == 'Stone':
weightans = weightask
weightans2 = ttk.Label(tab1, text = weightans)
weightans2.place(relx = 0.5, rely = 0.7, anchor = CENTER)
def distcalculate():
distask = float(distentry1.get())
distfrom = disttkvar.get()
distto = dist2tkvar.get()
if distfrom == 'Millimetres':
if distto == 'Millimetres':
distans = distask
if distto == 'Centimetres':
distans = distask/10
if distto == 'Metres':
distans = distask/1000
if distto == 'Kilometres':
distans = distask/1000000
if distto == 'Inches':
distans = distask/25.4
if distto == 'Foot':
distans = distask/305
if distto == 'Yard':
distans = distask/914
if distto == 'Mile':
distans = distask/1609000
if distfrom == 'Centimetres':
if distto == 'Millimetres':
distans = distask*10
if distto == 'Centimetres':
distans = distask
if distto == 'Metres':
distans = distask/100
if distto == 'Kilometres':
distans = distask/100000
if distto == 'Inches':
distans = distask/2.54
if distto == 'Foot':
distans = distask/30.48
if distto == 'Yard':
distans = distask/91.44
if distto == 'Mile':
distans = distask/160934
if distfrom == 'Metres':
if distto == 'Millimetres':
distans = distask*1000
if distto == 'Centimetres':
distans = distask*100
if distto == 'Metres':
distans = distask
if distto == 'Kilometres':
distans = distask/1000
if distto == 'Inches':
distans = distask*39.37
if distto == 'Foot':
distans = distask*3.281
if distto == 'Yard':
distans = distask*1.094
if distto == 'Mile':
distans = distask/1609
if distfrom == 'Kilometres':
if distto == 'Millimetres':
distans = distask*1000000
if distto == 'Centimetres':
distans = distask*100000
if distto == 'Metres':
distans = distask*1000
if distto == 'Kilometres':
distans = distask
if distto == 'Inches':
distans = distask*39370
if distto == 'Foot':
distans = distask*3281
if distto == 'Yard':
distans = distask*1094
if distto == 'Mile':
distans = distask/1.609
if distfrom == 'Inch':
if distto == 'Millimetres':
distans = distask*25.4
if distto == 'Centimetres':
distans = distask*2.54
if distto == 'Metres':
distans = distask/39.37
if distto == 'Kilometres':
distans = distask/39370
if distto == 'Inches':
distans = distask
if distto == 'Foot':
distans = distask/12
if distto == 'Yard':
distans = distask/36
if distto == 'Mile':
distans = distask/63360
if distfrom == 'Foot':
if distto == 'Millimetres':
distans = distask*305
if distto == 'Centimetres':
distans = distask*30.48
if distto == 'Metres':
distans = distask/3.281
if distto == 'Kilometres':
distans = distask/3281
if distto == 'Inches':
distans = distask*12
if distto == 'Foot':
distans = distask
if distto == 'Yard':
distans = distask/3
if distto == 'Mile':
distans = distask/5280
if distfrom == 'Yard':
if distto == 'Millimetres':
distans = distask*914
if distto == 'Centimetres':
distans = distask*91.44
if distto == 'Metres':
distans = distask/1.094
if distto == 'Kilometres':
distans = distask/1094
if distto == 'Inches':
distans = distask*36
if distto == 'Foot':
distans = distask*3
if distto == 'Yard':
distans = distask
if distto == 'Mile':
distans = distask/1760
if distfrom == 'Miles':
if distto == 'Millimetres':
distans = distask*1609000
if distto == 'Centimetres':
distans = distask*160934
if distto == 'Metres':
distans = distask*1609
if distto == 'Kilometres':
distans = distask*1.609
if distto == 'Inches':
distans = distask*63360
if distto == 'Foot':
distans = distask*5280
if distto == 'Yard':
distans = distask/1760
if distto == 'Mile':
distans = distask
distans2 = ttk.Label(tab2, text = distans)
distans2.place(relx = 0.5, rely = 0.7, anchor = CENTER)
def compcalculate():
print('ham')
def tempcalculate():
print('ham')
def volcalculate():
print('ham')
tabControl = ttk.Notebook(root)
mainframe = Frame(tabControl)
mainframe.configure(bg = '#040000')
mainframe.pack(pady = 125, padx = 225)
tab1 = ttk.Frame(tabControl)
tab2 = ttk.Frame(tabControl)
tab3 = ttk.Frame(tabControl)
tab4 = ttk.Frame(tabControl)
tab5 = ttk.Frame(tabControl)
tabControl.add(tab1, text='Weight',)
tabControl.add(tab2, text='Distance')
tabControl.add(tab3, text='Denary, Binary and Hex')
tabControl.add(tab4, text='Temperature')
tabControl.add(tab5, text='Volume')
tabControl.pack(expand=1, fill="both")
##################################################################
weightOPTIONS = ['Grams', 'Kilograms', 'Tonnes', 'Ounces', 'Pounds', 'Stones']
weighttkvar = StringVar(tab1)
weight2tkvar = StringVar(tab1)
weightpopupMenu = OptionMenu(tab1, weighttkvar, *weightOPTIONS)
weightpopupMenu2 = OptionMenu(tab1, weight2tkvar, *weightOPTIONS)
weighttkvar.set('Please pick an option')
weight2tkvar.set('Please pick an option')
weightpopupMenu.place(relx = 0.4, rely = 0.2, anchor = E)
to = ttk.Label(tab1, text = 'to')
to.place(relx = 0.5, rely = 0.2, anchor = CENTER)
weightpopupMenu2.place(relx = 0.6, rely = 0.2, anchor = W)
weightentry1 = Entry(tab1, justify = CENTER )
weightentry1.place(relx = 0.5, rely = 0.4, anchor = CENTER)
weightbutton1 = Button(tab1, text = 'Submit', justify = CENTER, command = weightcalculate)
weightbutton1.place(relx = 0.5, rely = 0.5, anchor = CENTER)
##################################################################
distOPTIONS = ['Millimetres', 'Centimetres', 'Metres', 'Kilometres', 'Inches', 'Foot', 'Yard', 'Mile']
disttkvar = StringVar(tab2)
dist2tkvar = StringVar(tab2)
distpopupMenu = OptionMenu(tab2, disttkvar, *distOPTIONS)
distpopupMenu2 = OptionMenu(tab2, dist2tkvar, *distOPTIONS)
disttkvar.set('Please pick an option')
dist2tkvar.set('Please pick an option')
distpopupMenu.place(relx = 0.4, rely = 0.2, anchor = E)
to = ttk.Label(tab2, text = 'to')
to.place(relx = 0.5, rely = 0.2, anchor = CENTER)
distpopupMenu2.place(relx = 0.6, rely = 0.2, anchor = W)
distentry1 = Entry(tab2, justify = CENTER )
distentry1.place(relx = 0.5, rely = 0.4, anchor = CENTER)
distbutton1 = Button(tab2, text = 'Submit', justify = CENTER, command = distcalculate)
distbutton1.place(relx = 0.5, rely = 0.5, anchor = CENTER)
##################################################################
compOPTIONS = ['Denary', 'Binary', 'Hex']
comptkvar = StringVar(tab3)
comp2tkvar = StringVar(tab3)
comppopupMenu = OptionMenu(tab3, comptkvar, *compOPTIONS)
comppopupMenu2 = OptionMenu(tab3, comp2tkvar, *compOPTIONS)
comptkvar.set('Please pick an option')
comp2tkvar.set('Please pick an option')
comppopupMenu.place(relx = 0.4, rely = 0.2, anchor = E)
to = ttk.Label(tab3, text = 'to')
to.place(relx = 0.5, rely = 0.2, anchor = CENTER)
comppopupMenu2.place(relx = 0.6, rely = 0.2, anchor = W)
compentry1 = Entry(tab3, justify = CENTER )
compentry1.place(relx = 0.5, rely = 0.4, anchor = CENTER)
compbutton1 = Button(tab3, text = 'Submit', justify = CENTER, command = compcalculate)
compbutton1.place(relx = 0.5, rely = 0.5, anchor = CENTER)
##################################################################
tempOPTIONS = ['Celcius', 'Farenheit', 'Kelvin']
temptkvar = StringVar(tab4)
temp2tkvar = StringVar(tab4)
temppopupmenu = OptionMenu(tab4, temptkvar, *tempOPTIONS)
temppopupmenu2 = OptionMenu(tab4, temp2tkvar, *tempOPTIONS)
temptkvar.set('Please pick an option')
temp2tkvar.set('Please pick an option')
temppopupmenu.place(relx = 0.4, rely = 0.2, anchor = E)
to = ttk.Label(tab4, text = 'to')
to.place(relx = 0.5, rely = 0.2, anchor = CENTER)
temppopupmenu2.place(relx = 0.6, rely = 0.2, anchor = W)
tempentry = Entry(tab4, justify = CENTER )
tempentry.place(relx = 0.5, rely = 0.4, anchor = CENTER)
tempbutton = Button(tab4, text = 'Submit', justify = CENTER, command = tempcalculate)
tempbutton.place(relx = 0.5, rely = 0.5, anchor = CENTER)
##################################################################
volOPTIONS = ['Millilitres', 'Litres', 'Centilitres', 'Decilitres', 'Pint', 'Quart', 'Gallon', 'Teaspoon', 'Tablespoon', 'Fluid ounce']
voltkvar = StringVar(tab5)
vol2tkvar = StringVar(tab5)
volpopupmenu = OptionMenu(tab5, voltkvar, *volOPTIONS)
volpopupmenu2 = OptionMenu(tab5, vol2tkvar, *volOPTIONS)
voltkvar.set('Please pick an option')
vol2tkvar.set('Please pick an option')
volpopupmenu.place(relx = 0.4, rely = 0.2, anchor = E)
to = ttk.Label(tab5, text = 'to')
to.place(relx = 0.5, rely = 0.2, anchor = CENTER)
volpopupmenu2.place(relx = 0.6, rely = 0.2, anchor = W)
volentry = Entry(tab5, justify = CENTER )
volentry.place(relx = 0.5, rely = 0.4, anchor = CENTER)
volbutton = Button(tab5, text = 'Submit', justify = CENTER, command = volcalculate)
volbutton.place(relx = 0.5, rely = 0.5, anchor = CENTER)
##################################################################
root.mainloop() | [
"71326878+HugoCotton@users.noreply.github.com"
] | 71326878+HugoCotton@users.noreply.github.com |
11ec425f8be68dda877b4711dab7240438492217 | c1fb485fe32e1c5b73761751da1e52e6a89f83cc | /dl-learnings/CNN/cifar10Traditional.py | 6cc9beb620262ee34ce947ae8edff7182b895f4a | [] | no_license | venkateshwj/Machine-Learning-and-Deep-Learning | 24c7435be44498773ba270ba217e3c0f518f0df8 | 8a164cb67e1480ab8200cf402c95812014081278 | refs/heads/master | 2020-04-22T05:47:28.468051 | 2019-02-11T18:02:08 | 2019-02-11T18:02:08 | 170,168,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,302 | py | # CNN
# Image -> Input Layer > Hidden Layer - Output Layer
#
# Input Layer :
# Accepts the images as part of pixels and form arrays
#
# Hidde Layer
# Feature Extraction
# - Convolution Layer (சுழற்சி)
# - Relu layer
# - Pooling layer
# - Fully connected layer.
#
# CL RL PL
# IL O O O
# O O O O
# O O O O
# O O O O
# O O O O
#
#
# Images will be converted as matrix (Assume white space as 0
# and dark place as 1)
#
# a= [5,3,2,5,9,7]
# b= [1,2,3]
# a * b = [5*1, 3*2, 2*3 ] = Sum = 17
# [3*1, 3*2, 3*2 ] = Sum = 22
# Final matrix - [17 ,22, **]
# Step 1: FITERS:
# Filters the unwanted pixels and forms smaller matrix and gives the features
# Step 2: Relu Layer
# Skip the negative values.
# Gives multiple features and muliple relu layers
# Step 3: Pooling(Edges)
# Down sampling and will give smaller dimensions
#
#Rectified Fetaure map
# 1 4 2 7
# 2 6 8 5
# 3 4 0 7
# 1 2 3 1
# Arriving the max value
# 6 8
# 4 7
# Finally Getting the 2 dimensional
# Step 4 Flattening
# 6
# 8
# 4
# 7
# Step 5 Fully connected layer
# Here the image classification happens
# Lets code
#%%
import os
#%% [markdown]
#
# Let's use CIFAR-10 dataset
#
# The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes,
# with 6000 images per class.
# There are 50000 training images and 10000 test images.
# The dataset is divided into five training batches and one test batch, each with 10000 images.
# The test batch contains exactly 1000 randomly-selected images from each class.
# The training batches contain the remaining images in random order,
# but some training batches may contain more images from one class than another.
# Between them, the training batches contain exactly 5000 images from each class.
#
#%% [markdown]
# Step 0: Get the Data
#%%
# Put file path as a string here
CIFAR_DIR = 'C:/F-Folder/Machine-Learning---Real-learning/data/cifar-10-batches-py/'
#%% [markdown]
# The archive contains the files data_batch_1, data_batch_2, ..., data_batch_5, as well as test_batch.
# Each of these files is a Python "pickled" object produced with cPickle.
#
# ** Load the Data. Use the Code Below to load the data: **
#%%
def unpickle(file):
import pickle
with open(file, 'rb') as fo:
cifar_dict = pickle.load(fo, encoding='bytes')
return cifar_dict
# 60000
#%%
dirs = ['batches.meta','data_batch_1','data_batch_2','data_batch_3','data_batch_4','data_batch_5','test_batch']
#%%
all_data = [0,1,2,3,4,5,6]
#%%
for i,direc in zip(all_data,dirs):
all_data[i] = unpickle(CIFAR_DIR+direc)
#%%
batch_meta = all_data[0]
data_batch1 = all_data[1]
data_batch2 = all_data[2]
data_batch3 = all_data[3]
data_batch4 = all_data[4]
data_batch5 = all_data[5]
test_batch = all_data[6]
#%%
batch_meta
# CHeck for images
data_batch1
#%% [markdown]
# ** Why the 'b's in front of the string? **
# Bytes literals are always prefixed with 'b' or 'B';
# they produce an instance of the bytes type instead of the str type.
# They may only contain ASCII characters;
# bytes with a numeric value of 128 or greater must be expressed with escapes.
# https://stackoverflow.com/questions/6269765/what-does-the-b-character-do-in-front-of-a-string-literal
#%%
data_batch1.keys()
#%% [markdown]
# Loaded in this way, each of the batch files contains a dictionary with the following elements:
# * data -- a 10000x3072 numpy array of uint8s.
# Each row of the array stores a 32x32 colour image.
# The first 1024 entries contain the red channel values, the next 1024 the green,
# and the final 1024 the blue. The image is stored in row-major order, so that the first 32 entries of the array are the red channel values of the first row of the image.
# * labels -- a list of 10000 numbers in the range 0-9.
# The number at index i indicates the label of the ith image in the array data.
#
# The dataset contains another file, called batches.meta.
# It too contains a Python dictionary object. It has the following entries:
#
# * label_names -- a 10-element list which gives meaningful names to the numeric labels
# in the labels array described above. For example, label_names[0] == "airplane", label_names[1] == "automobile", etc.
#%% [markdown]
# ### Display a single image using matplotlib.
#
# ** Grab a single image from data_batch1 and
#%%
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
#%%
X = data_batch1[b"data"]
# Out of 10,000 images 32 * 32 picture and 3 bits
# 10,000 images are broke down into 3 pieces
# RGB
# Transpose ( 0 - image , 2 is one 32 , 3 is another 32 and 1 is 3 pieces)
# "unit8" - Ram size
#%%
X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype("uint8")
# 0 to 255 float values
#%%
X[0].max()
#%%
plt.imshow(X[4])
#%%
(X[0]/255).max()
#%%
plt.imshow(X[45])
#%%
plt.imshow(X[50])
#%% [markdown]
# # Helper Functions for Dealing With Data.
#
# ** Use the provided code below to help with dealing with grabbing the
# next batch once you've gotten ready to create the Graph Session.
# 10 possible lables ,
# Which denotes data of car or dog [0,1,1,0,1,*****]
#%%
def one_hot_encode(vec, vals=10):
'''
For use to one-hot encode the 10- possible labels
'''
n = len(vec)
out = np.zeros((n, vals))
out[range(n), vec] = 1
return out
#
#%%
class CifarHelper():
# Initializing variables
def __init__(self):
self.i = 0
self.all_train_batches = [data_batch1,data_batch2,data_batch3,data_batch4,data_batch5]
self.test_batch = [test_batch]
self.training_images = None
self.training_labels = None
self.test_images = None
self.test_labels = None
def set_up_images(self):
print("Setting Up Training Images and Labels")
# filling the training images
self.training_images = np.vstack([d[b"data"] for d in self.all_train_batches])
train_len = len(self.training_images)
#
self.training_images = self.training_images.reshape(train_len,3,32,32).transpose(0,2,3,1)/255
self.training_labels = one_hot_encode(np.hstack([d[b"labels"] for d in self.all_train_batches]), 10)
print("Setting Up Test Images and Labels")
self.test_images = np.vstack([d[b"data"] for d in self.test_batch])
test_len = len(self.test_images)
self.test_images = self.test_images.reshape(test_len,3,32,32).transpose(0,2,3,1)/255
self.test_labels = one_hot_encode(np.hstack([d[b"labels"] for d in self.test_batch]), 10)
#
def next_batch(self, batch_size):
# Just first 100 images
x = self.training_images[self.i:self.i+batch_size].reshape(100,32,32,3)
y = self.training_labels[self.i:self.i+batch_size]
self.i = (self.i + batch_size) % len(self.training_images)
# x is the image and y is the label
return x, y
#%% [markdown]
# ** How to use the above code: **
#%%
# Before Your tf.Session run these two lines
ch = CifarHelper()
ch.set_up_images()
# batch = ch.next_batch(100)
#%% [markdown]
# ## Creating the Model
#
# ** Import tensorflow **
#%%
import tensorflow as tf
#%% [markdown]
# ** Create 2 placeholders, x and y_true. Their shapes should be: **
#
# * x shape = [None,32,32,3]
# * y_true shape = [None,10]
#
#%%
x = tf.placeholder(tf.float32,shape=[None,32,32,3])
y_true = tf.placeholder(tf.float32,shape=[None,10])
#%% [markdown]
# ** Create one more placeholder called hold_prob. No need for shape here.
# This placeholder will just hold a single probability for the dropout. **
#%%
hold_prob = tf.placeholder(tf.float32)
#%% [markdown]
# ### Helper Functions
#
# ** Grab the helper functions from MNIST with CNN (or recreate them here yourself for a hard challenge!). You'll need: **
#
# * init_weights
# * init_bias
# * conv2d
# * max_pool_2by2
# * convolutional_layer
# * normal_full_layer
#
#%%
def init_weights(shape):
init_random_dist = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(init_random_dist)
def init_bias(shape):
init_bias_vals = tf.constant(0.1, shape=shape)
return tf.Variable(init_bias_vals)
def conv2d(x, W):
# reducing 32 * 32 * 3 into 2D
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
# getting the max value
def max_pool_2by2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def convolutional_layer(input_x, shape):
W = init_weights(shape)
b = init_bias([shape[3]])
return tf.nn.relu(conv2d(input_x, W) + b)
# fully connected layer inuput will be flatten
def normal_full_layer(input_layer, size):
input_size = int(input_layer.get_shape()[1])
W = init_weights([input_size, size])
b = init_bias([size])
return tf.matmul(input_layer, W) + b
#%% [markdown]
# ### Create the Layers
#
# ** Create a convolutional layer and a pooling layer as we did for MNIST. **
# ** Its up to you what the 2d size of the convolution should be, but the last two digits need to be 3 and 32 because of the 3 color channels and 32 pixels. So for example you could use:**
#
# convo_1 = convolutional_layer(x,shape=[4,4,3,32])
# STEP 1
#%%
# 3 channels , 32 - pixels each, 4 - filter size, 4 filter size
convo_1 = convolutional_layer(x,shape=[4,4,3,32])
convo_1_pooling = max_pool_2by2(convo_1)
#%% [markdown]
# ** Create the next convolutional and pooling layers. The last two dimensions of the convo_2 layer should be 32,64 **
#%%
convo_2 = convolutional_layer(convo_1_pooling,shape=[4,4,32,64])
convo_2_pooling = max_pool_2by2(convo_2)
#%% [markdown]
# ** Now create a flattened layer by reshaping the pooling layer into [-1,8 \* 8 \* 64] or [-1,4096] **
# STEP 2
# 8*8*64 bytes 4096
#%%
convo_2_flat = tf.reshape(convo_2_pooling,[-1,8*8*64])
#%% [markdown]
# ** Create a new full layer using the normal_full_layer function and
# passing in your flattend convolutional 2 layer with size=1024. (You could also choose to reduce this to something like 512)**
# STEP 3
#%%
full_layer_one = tf.nn.relu(normal_full_layer(convo_2_flat,1024))
#%% [markdown]
# ** Now create the dropout layer with tf.nn.dropout,
# remember to pass in your hold_prob placeholder. **
#%%
full_one_dropout = tf.nn.dropout(full_layer_one,keep_prob=hold_prob)
#%% [markdown]
# ** Finally set the output to y_pred by passing in the dropout layer into the normal_full_layer function. The size should be 10 because of the 10 possible labels**
# 10 labels
#%%
y_pred = normal_full_layer(full_one_dropout,10)
y_pred
#%% [markdown]
# ### Loss Function
#
# ** Create a cross_entropy loss function **
# Improve gain
# Magic happens heres . Labels is nothing but dog or car
#%%
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true,logits=y_pred))
#%% [markdown]
# ### Optimizer
# ** Create the optimizer using an Adam Optimizer. **
# redues the entropy
#%%
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train = optimizer.minimize(cross_entropy)
#%% [markdown]
# ** Create a variable to intialize all the global tf variables. **
#%%
init = tf.global_variables_initializer()
#%% [markdown]
# ## Graph Session
#
# ** Perform the training and test print outs in a Tf session and run your model! **
#%%
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# 500 * 100 = ?
for i in range(500):
batch = ch.next_batch(100)
sess.run(train, feed_dict={x: batch[0], y_true: batch[1], hold_prob: 0.5})
# PRINT OUT A MESSAGE EVERY 100 STEPS
if i%100 == 0:
print('Currently on step {}'.format(i))
print('Accuracy is:')
# Test the Train Model
matches = tf.equal(tf.argmax(y_pred,1),tf.argmax(y_true,1))
acc = tf.reduce_mean(tf.cast(matches,tf.float32))
print(sess.run(acc,feed_dict={x:ch.test_images,y_true:ch.test_labels,hold_prob:1.0}))
print('\n')
| [
"venkat@masha.io"
] | venkat@masha.io |
72dbd153b494bd8b92ad9480744487c0e206a442 | 4cb43105861cb04d6b4d8c435455c273ba4ba57c | /resources/hello_world_try.py | 8152ca69843ae52e4eace4d41e4402fd3ef8c0cf | [] | no_license | Coqueiro/airflow-sample-dags | cc30dd95097d56323d73111e98e3f2bcde3f8df6 | 5b62fbed79ead02612613962ca230f09477c4fce | refs/heads/master | 2022-12-10T17:53:35.669969 | 2020-09-03T14:29:12 | 2020-09-03T14:29:12 | 283,019,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,475 | py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import time
import os
from random import random
from operator import add
from pyspark.sql import SparkSession
if __name__ == "__main__":
counter = 0
try_file = 0
while try_file==0:
time.sleep(5)
try_file = int(os.popen('[[ -f /opt/spark/try_file ]] && echo "1" || echo "0"').read().replace('\n',''))
if try_file==1:
try:
df = spark.read.json("s3a://default/data/data.json")
df.show()
except:
print(f"Failure number: {counter}.")
os.system("rm /opt/spark/try_file")
try_file=0
spark.stop() | [
"lucas.garcia@LGarcia-Mac-01.local"
] | lucas.garcia@LGarcia-Mac-01.local |
8ea144ad15a989a6fb5c66f893ce896d2f61d1d5 | 0adf575f118216bd40b921732b91099810f233d2 | /tasks/update_customer_invoice/__manifest__.py | 7bfb7e31416b6a5bc8ede980200bcce7b9315707 | [] | no_license | EzzEldinSaleh/addons | a03eecc90ca990d187ba8f98d2db25331dc5821f | 6029591b5191f5587c1f70478fa8c0ccc00dce10 | refs/heads/master | 2023-07-05T07:20:36.705822 | 2021-08-29T11:26:42 | 2021-08-29T11:26:42 | 401,021,206 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 650 | py |
{
'name': 'Change Partner For Draft Invoices !',
'summary': 'Change Partner For Draft Invoices !',
'author': "ITSS , Mahmoud Elfeky",
'company': 'ITSS',
'website': "http://www.itss-c.com",
'version': '14.0.0.1.0',
'category': 'Accounting',
'license': 'AGPL-3',
'sequence': 1,
'depends': [
'base',
'account',
],
'data': [
'security/ir.model.access.csv',
# 'report/',
'wizard/change_partner.xml',
# 'views/',
# 'data/',
],
'demo': [
# 'demo/',
],
'installable': True,
'application': True,
'auto_install': False,
}
| [
"zizosaleh2009@gmail.com"
] | zizosaleh2009@gmail.com |
1390b2d3b283c49021827414a5f0ca6601dd27e8 | 1cfafec5935522b386d40ab7bb7246f39da89fcc | /temp/20201221_naver_ai_handsonsummit.py | ff8784adba43b2a7a15adeb0447977ce5373c919 | [] | no_license | madfalc0n/my_coding_labs | 0d9e13e2d1579607d5481c6a78baa70a2c7c374a | b38fd988a5e3ebb8d8b66bf5a0b15eb3eaa20578 | refs/heads/master | 2021-07-03T17:33:16.801207 | 2021-06-18T06:24:09 | 2021-06-18T06:24:09 | 241,097,976 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | import sys
import requests
client_id = "CID"
client_secret = "CSECRET"
lang = "Kor" # 언어 코드 ( Kor, Jpn, Eng, Chn )
url = "https://naveropenapi.apigw.ntruss.com/recog/v1/stt?lang=" + lang
data = open('filepath', 'rb')
headers = {
"X-NCP-APIGW-API-KEY-ID": client_id,
"X-NCP-APIGW-API-KEY": client_secret,
"Content-Type": "application/octet-stream"
}
response = requests.post(url, data=data, headers=headers)
rescode = response.status_code
if(rescode == 200):
print (response.text)
else:
print("Error : " + response.text)
| [
"chadool116@naver.com"
] | chadool116@naver.com |
9172f750d7849ab7bbc817f35d4f92b13a8c9ac5 | 887448be64b191a9bf0eb80db6aa74e58e7aee9e | /tmdb/models.py | 814da167808c1803c7d8201263b91ea391dbad98 | [] | no_license | YBouz/TMDb | 31e3b041282c18fcc7e7bbeacd91e0eca3eb6e86 | a9b39c480372b2c75fbade28e545a7b46aacb9a0 | refs/heads/master | 2023-05-07T15:31:00.701015 | 2021-05-25T15:58:16 | 2021-05-25T15:58:16 | 364,262,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,882 | py | from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
# Create your models here.
class Title(models.Model):
TYPES = [
('Movie', 'Movie'),
('Show', 'Show')
]
type = models.CharField(max_length=10, choices=TYPES, default='Movie')
name = models.CharField(max_length=100)
description = models.TextField()
image = models.ImageField(default='default_poster.jpg', upload_to='title_posters')
trailer_url = models.CharField(max_length=250)
year = models.IntegerField()
runtime = models.IntegerField(blank=True, null=True)
class Meta:
ordering = ['-year', 'name']
def __str__(self):
return f'{self.name}'
def get_absolute_url(self):
return reverse('title-detail', kwargs={'pk': self.pk})
class Genre(models.Model):
name = models.CharField(max_length=50, unique=True)
class Meta:
ordering = ['name']
def __str__(self):
return f'{self.name}'
class TitleGenre(models.Model):
title = models.ForeignKey(Title, on_delete=models.CASCADE)
genre = models.ForeignKey(Genre, on_delete=models.CASCADE)
class Meta:
ordering = ['genre__name']
def __str__(self):
return f'{self.title} -- {self.genre}'
class Person(models.Model):
name = models.CharField(max_length=100)
description = models.TextField()
dob = models.DateField()
image = models.ImageField(default='default_user.jpg', upload_to='person_pics')
class Meta:
ordering = ['name']
def __str__(self):
return f'{self.name}'
class TitleCast(models.Model):
person = models.ForeignKey(Person, on_delete=models.CASCADE)
title = models.ForeignKey(Title, on_delete=models.CASCADE)
character = models.CharField(max_length=100)
class Meta:
ordering = ['person__name']
def __str__(self):
return f'{self.person} -- {self.title}'
class TitleCrew(models.Model):
person = models.ForeignKey(Person, on_delete=models.CASCADE)
title = models.ForeignKey(Title, on_delete=models.CASCADE)
ROLES = [
('Director', 'Director'),
('Producer', 'Producer'),
('Executive Producer', 'Executive Producer'),
('Screen Writer', 'Screen Writer'),
('Creator', 'Creator'),
]
role = models.CharField(max_length=50, choices=ROLES, default='Director')
class Meta:
ordering = ['person__name']
def __str__(self):
return f'{self.person} -- {self.title}'
class Production(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ['name']
def __str__(self):
return f'{self.name}'
class TitleProduction(models.Model):
company = models.ForeignKey(Production, on_delete=models.CASCADE)
title = models.ForeignKey(Title, on_delete=models.CASCADE)
class Meta:
ordering = ['company__name']
def __str__(self):
return f'{self.company} -- {self.title}'
class TitleReview(models.Model):
STARS = [
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5)
]
rating = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(5)], choices=STARS, default=5)
content = models.TextField()
date_posted = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
title = models.ForeignKey(Title, on_delete=models.CASCADE)
class Meta:
ordering = ['-date_posted']
verbose_name = 'Review'
verbose_name_plural = 'Reviews'
def __str__(self):
return f'{self.author} - ({self.date_posted})'
def get_absolute_url(self):
return reverse('title-detail', kwargs={'pk': Title.pk})
| [
"youssef@bouz.me"
] | youssef@bouz.me |
3b10596eaeb6c57b1df26cc257017ff071dade92 | 9e949193fa4166beb2acc49fa1d26f53998bc6a7 | /backend/manage.py | 21e45f31ce4106f083035414c4960e509e2f7441 | [] | no_license | chpenaf/TeLlevoAPP | 6a72e1507585fb2aa73c8e2883837f9efb2d0764 | 3f412cc30bb5a716486ec71eb06326993792bd3c | refs/heads/master | 2023-08-26T20:25:59.422049 | 2021-11-05T21:03:27 | 2021-11-05T21:03:27 | 404,098,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TeLlevoApp.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"ch.penaf@gmail.com"
] | ch.penaf@gmail.com |
14679579a6dbc0f503f5b3d8562401165ce94756 | 91deb97afda334c5366e560325995cf6b5407bee | /src/command_modules/azure-cli-billing/azure/cli/command_modules/billing/custom.py | 21965f255ffa9f5644f22068a40961f8ca75b5a3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | viananth/azure-cli | ab117c1b0b676026cbb57567544cd70630efe830 | 4d23492ed03e946cfc11bae23b29acb971fb137d | refs/heads/master | 2021-05-23T05:13:51.414113 | 2017-08-17T16:58:10 | 2017-08-17T16:58:10 | 95,239,804 | 0 | 0 | NOASSERTION | 2019-03-19T18:45:16 | 2017-06-23T17:01:34 | Python | UTF-8 | Python | false | false | 891 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
def cli_billing_list_invoices(client, generate_url=False):
"""List all available invoices of the subscription"""
invoices = client.list(expand='downloadUrl' if generate_url else None)
return list(invoices)
def cli_billing_get_invoice(client, name=None):
"""Retrieve invoice of specific name of the subscription"""
if name:
return client.get(name)
return client.get_latest()
def cli_billing_list_periods(client):
"""List all available billing periods of the subscription"""
return list(client.list())
| [
"troy.dai@outlook.com"
] | troy.dai@outlook.com |
1c733ac34e31752915353a8e409c649a76ae919c | 46348f2f13617c5364044440bd3a1691fae3f96d | /app.py | 9068c5766a697d973fb57d1cc04402e5affaad96 | [] | no_license | mkmaurya25/carprice | ef4142d4d24a0e708ac95f02693b1df254be17ea | 3cdca98692b7e59cd3970ef55cdf2f0bf2115846 | refs/heads/main | 2023-04-12T06:16:25.948844 | 2021-05-14T22:03:47 | 2021-05-14T22:03:47 | 367,480,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,133 | py | from flask import Flask, render_template, request
from flask import jsonify
import requests
import pickle
import numpy as np
import sklearn
from sklearn.preprocessing import StandardScaler
app = Flask(__name__)
model = pickle.load(open('random_forest_regression_model.pkl', 'rb'))
@app.route('/',methods=['GET'])
def Home():
return render_template('index.html')
standard_to = StandardScaler()
@app.route("/predict", methods=['POST'])
def predict():
Fuel_Type_Diesel=0
if request.method == 'POST':
Year = int(request.form['Year'])
Present_Price=float(request.form['Present_Price'])
Kms_Driven=int(request.form['Kms_Driven'])
Kms_Driven2=np.log(Kms_Driven)
Owner=int(request.form['Owner'])
Fuel_Type_Petrol=request.form['Fuel_Type_Petrol']
if(Fuel_Type_Petrol=='Petrol'):
Fuel_Type_Petrol=1
Fuel_Type_Diesel=0
elif (Fuel_Type_Petrol=='Diesel'):
Fuel_Type_Petrol=0
Fuel_Type_Diesel=1
else:
Fuel_Type_Petrol=0
Fuel_Type_Diesel=0
Year=2020-Year
Seller_Type_Individual=request.form['Seller_Type_Individual']
if(Seller_Type_Individual=='Individual'):
Seller_Type_Individual=1
else:
Seller_Type_Individual=0
Transmission_Mannual=request.form['Transmission_Mannual']
if(Transmission_Mannual=='Mannual'):
Transmission_Mannual=1
else:
Transmission_Mannual=0
prediction=model.predict([[Present_Price,Kms_Driven2,Owner,Year,Fuel_Type_Diesel,Fuel_Type_Petrol,Seller_Type_Individual,Transmission_Mannual]])
output=round(prediction[0],2)
if output<0:
return render_template('index.html',prediction_texts="Sorry you cannot sell this car")
else:
return render_template('index.html',prediction_text="You Can Sell The Car at {}".format(output))
else:
return render_template('index.html')
if __name__=="__main__":
app.run(debug=True)
| [
"noreply@github.com"
] | noreply@github.com |
e80086c7681aba8a3e9db60de523efc0dda13b05 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02886/s458066707.py | c314b1e82e4800300ac6e420a37e53e4fca14534 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | n=int(input())
d=list(map(int,input().split()))
d2=[m**2 for m in d]
print((sum(d)**2-sum(d2))//2) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e8238ad90f566718d69fdf452dc3609eabf220b0 | d1e5de46c7bf5000f193f365642149368768a90f | /repository/discriminacion_horas_trabajo.py | 3ad83e73952301d6467f4df7f6ab709f09c8cc6d | [] | no_license | duvangiraldoavendano/sisteInfo | 5fb969cff8da7b6a462ad413f49c324480e3cfa8 | e932a8a0c8ccc355761999fc503d3b6e7be7c2c2 | refs/heads/main | 2023-01-21T19:33:38.666703 | 2020-11-30T17:01:49 | 2020-11-30T17:01:49 | 317,289,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,998 | py | from openpyxl import load_workbook
#Función que obtiene los profesores de ciencias politicas de catedra y los
#de derecho que son de cátedra y a su vez va al archivo de programación para
#mirar cuantas de las horas que aparecen en las horas de profesores, le corresponden a cada pregrado.
def discriminacion_horas_trabajo(cedula_dere, cedula_cp,prof_institu, profes_vincu, profes_cat, prof_institu_cp, profes_vincu_cp, profes_cat_cp,lineas_cedulas,excel_discriminacion,anio,semest):
# -----------------------------------------------PROFESORES DE CIENCIAS POLÍTICAS------------------------------------------------------------
#Procedimiento para obtener los nombres de los profesores de ciencias politicas que
#pertenecen al instituto de ciencias politicas.
wb_prof_institu = load_workbook(prof_institu)
sheet_prof_institu = wb_prof_institu["reporte"]
pos_prof_institu = 'd2'
pos_num_prof_institu = 2
lista_pares = []
par_ced_nombre_doc = []
lista_pares_nom_ced_prof_institu = []
nombre_prof_institu = []
while (sheet_prof_institu[pos_prof_institu].value != None):
pos_nom_prof_institu = 'e' + str(pos_num_prof_institu)
nombre_prof_institu.append(sheet_prof_institu[pos_nom_prof_institu].value)
par_ced_nombre_doc_prof_institu = []
index=0
for ced_dere in cedula_cp:
index+=1
if (sheet_prof_institu[pos_prof_institu].value == ced_dere):
par_ced_nombre_doc_prof_institu.append(ced_dere)
pos_nom_prof_institu = 'e' + str(pos_num_prof_institu)
par_ced_nombre_doc_prof_institu.append(sheet_prof_institu[pos_nom_prof_institu].value)
##pos_doc = 'e' + str(pos_num)
# par_ced_nombre_doc.append("CC")
break
lista_pares_nom_ced_prof_institu.append(par_ced_nombre_doc_prof_institu)
pos_num_prof_institu = pos_num_prof_institu + 1
pos_prof_institu = 'd' + str(pos_num_prof_institu)
nombre_prof_institu = set(nombre_prof_institu)
#Procedimiento para eliminar de lista_pares_nom_ced_prof_institu las listas que sean vacías.
resul = []
for i in lista_pares_nom_ced_prof_institu:
if i not in resul:
resul.append(i)
res = []
for i in resul:
if (i != []):
res.append(i)
lista_pares_nom_ced_prof_institu = res
#Procedimiento para obtener los profesores de ciencias politicas que son VINCULADOS a
#la facultad de Derecho y Ciencias políticas.
wb_profe_vincu = load_workbook(profes_vincu)
sheet_profe_vincu = wb_profe_vincu["reporte"]
pos_profe_vincu = 'd2'
pos_num_profe_vincu = 2
lista_pares_profe_vincu = []
par_ced_nombre_doc_profe_vincu = []
lista_pares_nom_ced_profe_vincu = []
nombre_profe_vincu = []
while (sheet_profe_vincu[pos_profe_vincu].value != None):
pos_nom_profe_vincu = 'e' + str(pos_num_profe_vincu)
nombre_profe_vincu.append(sheet_profe_vincu[pos_nom_profe_vincu].value)
par_ced_nombre_doc_profe_vincu = []
for ced_dere in cedula_cp:
c = sheet_profe_vincu[pos_profe_vincu].value
if (sheet_profe_vincu[pos_profe_vincu].value == ced_dere):
par_ced_nombre_doc_profe_vincu.append(ced_dere)
pos_nom_profe_vincu = 'e' + str(pos_num_profe_vincu)
par_ced_nombre_doc_profe_vincu.append(sheet_profe_vincu[pos_nom_profe_vincu].value)
##pos_doc = 'e' + str(pos_num)
# par_ced_nombre_doc.append("CC")
break
lista_pares_nom_ced_profe_vincu.append(par_ced_nombre_doc_profe_vincu)
pos_num_profe_vincu = pos_num_profe_vincu + 1
pos_profe_vincu = 'd' + str(pos_num_profe_vincu)
nombre_profe_vincu = set(nombre_profe_vincu)
# Procedimiento para eliminar de lista_pares_nom_ced_profe_vincu las listas que sean vacías.
resul = []
for i in lista_pares_nom_ced_profe_vincu:
if i not in resul:
resul.append(i)
res = []
for i in resul:
if (i != []):
res.append(i)
lista_pares_nom_ced_profe_vincu = res
#Procedimiento para obtener los profesores de ciencias politicas que son de CATEDRA en
#la facultad de Derecho y Ciencias Políticas.
wb = load_workbook(profes_cat)
sheet = wb["reporte"]
pos='d2'
pos_num=2
lista_pares=[]
par_ced_nombre_doc=[]
lista_pares_nom_ced=[]
nombre=[]
while(sheet[pos].value!=None):
pos_nom = 'e' + str(pos_num)
nombre.append(sheet[pos_nom].value)
par_ced_nombre_doc = []
for ced_dere in cedula_cp:
if(sheet[pos].value==ced_dere):
par_ced_nombre_doc.append(ced_dere)
pos_nom='e'+str(pos_num)
par_ced_nombre_doc.append(sheet[pos_nom].value)
##pos_doc = 'e' + str(pos_num)
#par_ced_nombre_doc.append("CC")
break
lista_pares_nom_ced.append(par_ced_nombre_doc)
pos_num=pos_num+1
pos='d'+str(pos_num)
nombre=set(nombre)
# Procedimiento para eliminar de lista_pares_nom_ced las listas que sean vacías.
resul = []
for i in lista_pares_nom_ced:
if i not in resul:
resul.append(i)
res=[]
for i in resul:
if(i!=[]):
res.append(i)
lista_pares_nom_ced=res
#Procedimiento para sacar de la lista de CATEDRA a los profesores que son VINCULADOS
catedra_no_vincu_no_ins=[]
for i in lista_pares_nom_ced:
contador_dife=0
for j in lista_pares_nom_ced_profe_vincu:
if(j[0]!=i[0]):
contador_dife+=1
if(contador_dife==len(lista_pares_nom_ced_profe_vincu)):
catedra_no_vincu_no_ins.append(i)
lista_pares_nom_ced=catedra_no_vincu_no_ins
#Procedimiento para sacar de la lista de CATEDRA a los profesores que son del INSTITUTO
for i in lista_pares_nom_ced:
contador_dife = 0
for j in lista_pares_nom_ced_prof_institu:
if(j[0]!=i[0]):
contador_dife+=1
if(contador_dife==len(lista_pares_nom_ced_prof_institu)):
pass
else:
catedra_no_vincu_no_ins.remove(i)
lista_pares_nom_ced=catedra_no_vincu_no_ins
#Procedimiento para sacar de la lista de VINCULADOS los profesores que son del INSTITUTO.
vincu_no_insti=[]
for i in lista_pares_nom_ced_profe_vincu:
contador_dife=0
for j in lista_pares_nom_ced_prof_institu:
if(j[0]!=i[0]):
contador_dife+=1
if(contador_dife==len(lista_pares_nom_ced_prof_institu)):
vincu_no_insti.append(i)
lista_pares_nom_ced_profe_vincu=vincu_no_insti
res = lista_pares_nom_ced
catedras_cp=lista_pares_nom_ced
vinculados_cp=lista_pares_nom_ced_profe_vincu
#-----------------------------------------------PROFESORES DE DERECHO---------------------------------------------------------------
#Procedimiento para obtener los nombres de los profesores de derecho que son del instituto de estudios politicos
wb_prof_institu = load_workbook(prof_institu_cp)
sheet_prof_institu = wb_prof_institu["reporte"]
pos_prof_institu = 'd2'
pos_num_prof_institu = 2
lista_pares = []
par_ced_nombre_doc = []
lista_pares_nom_ced_prof_institu = []
nombre_prof_institu = []
while (sheet_prof_institu[pos_prof_institu].value != None):
pos_nom_prof_institu = 'e' + str(pos_num_prof_institu)
nombre_prof_institu.append(sheet_prof_institu[pos_nom_prof_institu].value)
par_ced_nombre_doc_prof_institu = []
index=0
for ced_dere in cedula_dere:
index+=1
if (sheet_prof_institu[pos_prof_institu].value == ced_dere):
par_ced_nombre_doc_prof_institu.append(ced_dere)
pos_nom_prof_institu = 'e' + str(pos_num_prof_institu)
par_ced_nombre_doc_prof_institu.append(sheet_prof_institu[pos_nom_prof_institu].value)
break
lista_pares_nom_ced_prof_institu.append(par_ced_nombre_doc_prof_institu)
pos_num_prof_institu = pos_num_prof_institu + 1
pos_prof_institu = 'd' + str(pos_num_prof_institu)
nombre_prof_institu = set(nombre_prof_institu)
resul = []
for i in lista_pares_nom_ced_prof_institu:
if i not in resul:
resul.append(i)
res = []
for i in resul:
if (i != []):
res.append(i)
lista_pares_nom_ced_prof_institu = res
#Procedimiento para obtener los nombres de los profesores de derecho que son vinculados
wb_profe_vincu = load_workbook(profes_vincu_cp)
sheet_profe_vincu = wb_profe_vincu["reporte"]
pos_profe_vincu = 'd2'
pos_num_profe_vincu = 2
lista_pares_profe_vincu = []
par_ced_nombre_doc_profe_vincu = []
lista_pares_nom_ced_profe_vincu = []
nombre_profe_vincu = []
while (sheet_profe_vincu[pos_profe_vincu].value != None):
pos_nom_profe_vincu = 'e' + str(pos_num_profe_vincu)
nombre_profe_vincu.append(sheet_profe_vincu[pos_nom_profe_vincu].value)
par_ced_nombre_doc_profe_vincu = []
for ced_dere in cedula_dere:
c = sheet_profe_vincu[pos_profe_vincu].value
if (str(sheet_profe_vincu[pos_profe_vincu].value) == ced_dere):
par_ced_nombre_doc_profe_vincu.append(ced_dere)
pos_nom_profe_vincu = 'e' + str(pos_num_profe_vincu)
par_ced_nombre_doc_profe_vincu.append(sheet_profe_vincu[pos_nom_profe_vincu].value)
break
lista_pares_nom_ced_profe_vincu.append(par_ced_nombre_doc_profe_vincu)
pos_num_profe_vincu = pos_num_profe_vincu + 1
pos_profe_vincu = 'd' + str(pos_num_profe_vincu)
nombre_profe_vincu = set(nombre_profe_vincu)
resul = []
for i in lista_pares_nom_ced_profe_vincu:
if i not in resul:
resul.append(i)
res = []
for i in resul:
if (i != []):
res.append(i)
lista_pares_nom_ced_profe_vincu = res
# Procedimiento para obtener los nombres de los profesores de derecho que son de catedra
wb = load_workbook(profes_cat_cp)
sheet = wb["reporte"]
pos='d2'
pos_num=2
lista_pares=[]
par_ced_nombre_doc=[]
lista_pares_nom_ced=[]
nombre=[]
while(sheet[pos].value!=None):
pos_nom = 'e' + str(pos_num)
nombre.append(sheet[pos_nom].value)
par_ced_nombre_doc = []
for ced_dere in cedula_dere:
if(str(sheet[pos].value)==ced_dere):
par_ced_nombre_doc.append(ced_dere)
pos_nom='e'+str(pos_num)
par_ced_nombre_doc.append(sheet[pos_nom].value)
break
lista_pares_nom_ced.append(par_ced_nombre_doc)
pos_num=pos_num+1
pos='d'+str(pos_num)
nombre=set(nombre)
resul = []
for i in lista_pares_nom_ced:
if i not in resul:
resul.append(i)
res=[]
for i in resul:
if(i!=[]):
res.append(i)
lista_pares_nom_ced=res
#Procedimiento para excluir de la lista de profesores de catedra
#los profesores que son vinculados
vincu_catedra=[]
vinculados_dan_catedra=[]
for i in lista_pares_nom_ced:
if(i not in lista_pares_nom_ced_profe_vincu):
vincu_catedra.append(i)
else:
vinculados_dan_catedra.append(i)
lista_pares_nom_ced=vincu_catedra
res = lista_pares_nom_ced
#Procedimiento para excluir de la lista de profesores del instituto
#los profesores que son de catedra o son vinculados
insti_dere = []
for i in lista_pares_nom_ced_prof_institu:
if ((i not in lista_pares_nom_ced_profe_vincu) and (i not in lista_pares_nom_ced)):
insti_dere.append(i)
lista_pares_nom_ced_prof_institu=insti_dere
catedras_derecho=lista_pares_nom_ced
vinculados_derecho=lista_pares_nom_ced_profe_vincu
instituto_derecho=lista_pares_nom_ced_prof_institu
comunes_catedra=[]
for i in catedras_derecho:
for j in catedras_cp:
if(i[0]==j[0]):
comunes_catedra.append(i)
comunes_vinculados=[]
for i in vinculados_derecho:
for j in vinculados_cp:
if(i[0]==j[0]):
comunes_vinculados.append(i)
for catedras in comunes_catedra:
for linea in lineas_cedulas:
if(linea.find(catedras[0])!=-1):
algovoyahaceraqui=0
vincu_cpa_horas_cat=[]
vincu_cpa_horas_plan=[]
vincu_der_horas_cat = []
vincu_der_horas_plan = []
wb_discri = load_workbook(excel_discriminacion)
sheet_discri= wb_discri["SQL_Results"]
for ced in comunes_vinculados:
sum_vincu_cpa_horas_cat = 0
sum_vincu_cpa_horas_plan = 0
sum_vincu_der_horas_cat = 0
sum_vincu_der_horas_plan = 0
pos_num = 2
pos = 'A' + str(pos_num)
while(sheet_discri[pos].value!=None):
if (str(sheet_discri[pos].value) == str(anio) + str(semest)):
if (ced[0] == str(sheet_discri["P" + str(pos_num)].value)):
if (str(sheet_discri["M" + str(pos_num)].value) == "CPA" or str(sheet_discri["M" + str(pos_num)].value) == "CPT"):
if(str(sheet_discri["S"+str(pos_num)].value)!=""):
sum_vincu_cpa_horas_cat+=int(sheet_discri["S"+str(pos_num)].value)
if(str(sheet_discri["T" + str(pos_num)].value)!=""):
sum_vincu_cpa_horas_plan+=int(sheet_discri["T" + str(pos_num)].value)
elif (str(sheet_discri["M" + str(pos_num)].value) == "DER" or str(sheet_discri["M" + str(pos_num)].value) == "DEP" or str(sheet_discri["M" + str(pos_num)].value) == "DEI"):
if(str(sheet_discri["S"+str(pos_num)].value)!=""):
sum_vincu_der_horas_cat+=int(sheet_discri["S"+str(pos_num)].value)
if(str(sheet_discri["T" + str(pos_num)].value)!=""):
sum_vincu_der_horas_plan+=int(sheet_discri["T" + str(pos_num)].value)
else:
pass
pos_num += 1
pos = 'A' + str(pos_num)
#Horas de catedra de los profesores vinculados CP
lista_ind = []
lista_ind.append(ced[0])
lista_ind.append(sum_vincu_cpa_horas_cat)
vincu_cpa_horas_cat.append(lista_ind)
#Horas del plan vinculados CP
lista_ind = []
lista_ind.append(ced[0])
lista_ind.append(sum_vincu_cpa_horas_plan)
vincu_cpa_horas_plan.append(lista_ind)
#Horas de catedra de los profesores vinculados DERECHO
lista_ind = []
lista_ind.append(ced[0])
lista_ind.append(sum_vincu_der_horas_cat)
vincu_der_horas_cat.append(lista_ind)
#Horas del plan vinculados DERECHO
lista_ind = []
lista_ind.append(ced[0])
lista_ind.append(sum_vincu_der_horas_plan)
vincu_der_horas_plan.append(lista_ind)
cate_cpa_horas_cat = []
cate_cpa_horas_plan = []
cate_der_horas_cat = []
cate_der_horas_plan = []
for ced in comunes_catedra:
sum_cate_cpa_horas_cat = 0
sum_cate_cpa_horas_plan = 0
sum_cate_der_horas_cat = 0
sum_cate_der_horas_plan = 0
pos_num = 2
pos = 'A' + str(pos_num)
while(sheet_discri[pos].value!=None):
if (str(sheet_discri[pos].value) == str(anio) + str(semest)):
if (ced[0] == str(sheet_discri["P" + str(pos_num)].value)):
if (str(sheet_discri["M" + str(pos_num)].value) == "CPA" or str(sheet_discri["M" + str(pos_num)].value) == "CPT"):
if(str(sheet_discri["S"+str(pos_num)].value)!=""):
sum_cate_cpa_horas_cat+=int(sheet_discri["S"+str(pos_num)].value)
if(str(sheet_discri["T" + str(pos_num)].value)!=""):
sum_cate_cpa_horas_plan+=int(sheet_discri["T" + str(pos_num)].value)
elif (str(sheet_discri["M" + str(pos_num)].value) == "DER" or str(sheet_discri["M" + str(pos_num)].value) == "DEP" or str(sheet_discri["M" + str(pos_num)].value) == "DEI"):
if(str(sheet_discri["S"+str(pos_num)].value)!=""):
sum_cate_der_horas_cat+=int(sheet_discri["S"+str(pos_num)].value)
if(str(sheet_discri["T" + str(pos_num)].value) != ""):
sum_cate_der_horas_plan+=int(sheet_discri["T" + str(pos_num)].value)
else:
pass
pos_num += 1
pos = 'A' + str(pos_num)
#Horas de catedra de los profesores vinculados CP
lista_ind = []
lista_ind.append(ced[0])
lista_ind.append(sum_cate_cpa_horas_cat)
cate_cpa_horas_cat.append(lista_ind)
#Horas del plan vinculados CP
lista_ind = []
lista_ind.append(ced[0])
lista_ind.append(sum_cate_cpa_horas_plan)
cate_cpa_horas_plan.append(lista_ind)
#Horas de catedra de los profesores vinculados DERECHO
lista_ind = []
lista_ind.append(ced[0])
lista_ind.append(sum_cate_der_horas_cat)
cate_der_horas_cat.append(lista_ind)
#Horas del plan vinculados DERECHO
lista_ind = []
lista_ind.append(ced[0])
lista_ind.append(sum_cate_der_horas_plan)
cate_der_horas_plan.append(lista_ind)
if(anio==2018 and semest==2):
print("ojo")
return cate_cpa_horas_cat,cate_cpa_horas_plan, vincu_cpa_horas_cat,vincu_cpa_horas_plan,cate_der_horas_cat,cate_der_horas_plan,vincu_der_horas_cat,vincu_der_horas_plan
'''
while(sheet_discri[pos].value!=None):
if(str(sheet_discri[pos].value)==str(anio)+str(semest)):
for ced in comunes_vinculados:
if(ced[0]==str(sheet_discri["P"+str(pos_num)].value)):
if(str(sheet_discri["M"+str(pos_num)].value)=="CPA" or str(sheet_discri["M"+str(pos_num)].value)=="CPT"):
lista_ind=[]
lista_ind.append(ced[0])
lista_ind.append(str(sheet_discri["S"+str(pos_num)].value))
vincu_cpa_horas_cat.append(lista_ind)
lista_ind = []
lista_ind.append(ced[0])
lista_ind.append(str(sheet_discri["T" + str(pos_num)].value))
vincu_cpa_horas_plan.append(lista_ind)
elif(str(sheet_discri["M"+str(pos_num)].value)=="DER" or str(sheet_discri["M"+str(pos_num)].value)=="DEP" or str(sheet_discri["M"+str(pos_num)].value)=="DEI"):
lista_ind = []
lista_ind.append(ced[0])
lista_ind.append(str(sheet_discri["S" + str(pos_num)].value))
vincu_der_horas_cat.append(lista_ind)
lista_ind = []
lista_ind.append(ced[0])
lista_ind.append(str(sheet_discri["T" + str(pos_num)].value))
vincu_der_horas_plan.append(lista_ind)
else:
pass
pos_num+=1
pos='A'+str(pos_num)
'''
s = 1
s3 = 2 | [
"noreply@github.com"
] | noreply@github.com |
161ae1ba5c2aee37cddf0bd2e95c6feee60f60ea | e3428ec60a71a90fed3d157c472e561778f830c2 | /apps/main/models.py | e4dcfa55a7bb0452743e5095c075c13a45f49381 | [] | no_license | Rchimedes/travelplanner | 221c3dc5886c71b96070b205919424e546f37d13 | 928d63781fb7b21d819e9e8a6a3d8321628522f0 | refs/heads/master | 2023-02-03T08:12:20.792738 | 2020-12-21T21:58:40 | 2020-12-21T21:58:40 | 320,104,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,224 | py | from django.db import models
from apps.login.models import User
class TripManager(models.Manager):
def basic_validator(self, post_data):
errors = {}
if len(post_data["destination"]) < 3:
errors["destination"] = "Please enter a destination with at least 3 characters"
if len(post_data["plan"]) < 3:
errors["plan"] = "Please enter more info on the plan"
return errors
def edit_validator(self, post_data):
errors = {}
if len(post_data["destination"]) < 3:
errors["destination"] = "Please enter a destination"
if len(post_data["plan"]) < 3:
errors["plan"] = "Please enter more info on the plan"
return errors
class Trip(models.Model):
destination = models.CharField(max_length=50)
plan = models.CharField(max_length=255)
start_date = models.DateTimeField()
end_date = models.DateTimeField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
user = models.ForeignKey(User, related_name="trips")
userassigned = models.ForeignKey(User, related_name="tripsassigned")
objects = TripManager()
| [
"seventh_fret1@yahoo.com"
] | seventh_fret1@yahoo.com |
39963d3f5fd4b2c8772eba64c11eb9018bea8634 | 9aa8fcaff3541e6527314ec04ced80d4aa4767aa | /args.py | 9b4f3ef538a9952fbb511a2bb5a7ff3902535b69 | [] | no_license | einstalek/centertrack_torch | 5cc0d7eea43ac5c3a292e8e32529d35b76af68da | 67816c09b774dc5726dc3927f4dac32d9b70bb63 | refs/heads/main | 2023-01-07T12:04:55.916122 | 2020-11-11T19:39:01 | 2020-11-11T19:39:01 | 312,070,786 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,832 | py | import torch
class Args:
# Model parameters
device = 'cuda'
device_id = 1
backbone = 'resnet50'
heads = {'hm': 1, 'reg': 2, 'wh': 2, 'tracking': 2, }
weights = {'hm': 1.1, 'reg': 1, 'wh': 0.7, 'tracking': 1, }
use_bias = True
use_conv_transp = False
num_kernels = 2
down_ratio = 8
num_frames = 1
inp_dim = 3 + 3 * num_frames
# Data annotations
train_json = '/home/jovyan/dataset/train-v2.json'
val_json = '/home/jovyan/dataset/val-v2.json'
data_dir = "/home/jovyan/dataset/"
# # coco split for batch size 92
# train_group_rates = {("hand_dataset", "coco/"): 42,
# ("0002_openpose",): 2,
# ("0002_navigation_",): 6,
# ("0004_",): 6, ("0010_",): 16,
# ("0023_",): 16, ("office",): 2,
# ("human_crop",): 2,
# }
# split for batch size 92
train_group_rates = {("hand_dataset", "coco/"): 14,
("0002_openpose",): 2,
("0002_navigation_",): 12,
("0004_",): 12, ("0010_",): 24,
("0023_",): 24, ("office",): 2,
("human_crop",): 2,
}
# Input parameters
pre_hm = False
ret_fpath = True
max_frame_dist = 30
max_dist_proba = 0.7
input_w = 384
input_h = 224
cvt_gray = False
widen_boxes = 1.15
gaussian_min_radius = 2
pre_hm_min_radius = 16
# Validation parameters
val_skip_frame = 2 # frame_dist между frame и prev_frame
val_select_frame = 5 # frame_num % val_select_frame = 0 для подчета метрик
# Augmentation parameters
aug_rot = 1
rotate = 5
crop_near_box = 0.2
crop_min_box_size = 60
fp_disturb = 0.2
lost_disturb = 0.4
hm_disturb = 0.08
no_color_aug = False
use_gamma = True
gamma = (0.3, 2.)
aug_s = (0.7, 1.1)
comment = "resnet backbone, non-coco split"
# Training parameters
batch_size = 92
start_epoch = 49
end_epoch = 400
write_mota_metrics = True
num_iters = {'train': 150, 'val': -1}
gpu = 14
lr = 1.3e-4
clip_value = 50.
lr_step = (start_epoch + 150, start_epoch + 200)
drop = 0.8
max_objs = 15
print_iter = 20
hm_l1_loss = 0.
# Checkpoints
save_dir = "/home/jovyan/CenterTrack/weights_1/"
res_dir = save_dir + 'temp/'
weights_dir = save_dir
load_model = None # save_dir + "model_48.pth"
save_point = range(start_epoch + 1, end_epoch + 1, 3)
# Tracker
new_thresh = 0.4
thresh = 0.3
def __init__(self):
self.device = torch.device('cuda' if self.device == 'cuda' else 'cpu')
| [
"arganaidi@phystech.edu"
] | arganaidi@phystech.edu |
b02b17830771cfde31dd41ff465f4a7e44fcaf1c | 96ad70021009fdd4ee0df52e06ce30e87696cc99 | /Homework4/basic_simulations.py | 18bcbdb87157180dd56983ee8a4dc22dc080f68b | [] | no_license | tthatcher95/Large-Scale-Data-Structures | ee27e60b4e12f816023b57eb198a319d25fcefb4 | a42cd9abeccad407d0273a7aa9f64236848e7aeb | refs/heads/master | 2020-04-16T16:29:16.239421 | 2019-01-14T21:38:49 | 2019-01-14T21:38:49 | 165,739,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,853 | py | import numpy as np
import random
import sys
def get_random_letter(letter):
a = ['A', 'C', 'T', 'G']
a.remove(letter)
return random.choice(a)
def no_error(file, generated_number):
with open(file) as s:
total = 0
unique = []
while (total < generated_number):
current_sequence = ""
rand_number = random.randint(0, 10000)
s.seek(rand_number)
for i in range(0, 16):
char = s.read(1)
if char == '\n':
char = s.read(1)
current_sequence += char
unique.append(current_sequence)
total += 1
unique_frags = set(unique)
# print("Number of Unique Fragments: ", len(unique_frags))
return unique
def with_error(file, frag_array, p_error = 0.05):
total = 0
unique = []
avg_num_of_failuress = int(np.random.geometric(p=p_error, size=100).sum() / 100)
index = int(avg_num_of_failuress / 16)
string_position = int(avg_num_of_failuress % 16)
while (total < len(frag_array)):
x = list(frag_array[total])
x[string_position] = get_random_letter(x[string_position])
x = "".join(x)
frag_array[total] = x
total += index
unique_frags = set(frag_array)
# print("Number of Unique Fragments: ", len(unique_frags))
return len(unique_frags)
if __name__ == '__main__':
if sys.argv[1] == "1A":
avg_num_of_failuress = np.random.geometric(p=0.005, size=100)
print("Failures: {}".format(int(avg_num_of_failuress.sum() / 100)))
elif sys.argv[1] == "1B":
avg_num_of_succeses = np.random.binomial(10000, p=0.005)
print("Succeses: {}".format(int(avg_num_of_succeses)))
elif sys.argv[1] == "2A":
total_unique = 0
for i in range(0, 100):
frags, unique_frags = no_error(str(sys.argv[2]), 100000)
total_unique += len(unique_frags)
print("Avg. Number of Unique Fragments (100 trials): {}".format(int(total_unique/100)))
elif sys.argv[1] == "2B":
total_unique = 0
frags, unique_frags = no_error(str(sys.argv[2]), 100000)
for i in range(0, 100):
unq_frags = with_error(sys.argv[2], frags, p_error=0.01)
total_unique += unq_frags
print("Avg. Number of Unique Fragments (100 trials): {}".format(int(total_unique/100)))
elif sys.argv[1] == "2C":
total_unique = 0
frags, unique_frags = no_error(str(sys.argv[2]), 100000)
for i in range(0, 60):
unq_frags = with_error(sys.argv[2], frags, p_error=0.05)
total_unique += unq_frags
print("Iteration: {} Unique Fragments: {}".format(i, int(total_unique)))
print("Avg. Number of Unique Fragments (100 trials): {}".format(int(total_unique/100)))
| [
"tthatcher84@gmail.com"
] | tthatcher84@gmail.com |
697285250228dcb49cfeba0a6f5ac32856abae93 | 9a09bb914b6d99d5fa6b62b36818e2fafff59262 | /Practice_programmes/numpyExample/example2.py | 4d75d9564974d334a957443a9327ece93b51ae9b | [] | no_license | lgoyal06/LearningPython | c589f02a3457b0d8c656b70e74d9467ece5a7a23 | 14fac94133599db1fab55ab35cf0b74cd2828b60 | refs/heads/master | 2020-04-02T18:23:52.630539 | 2019-06-25T16:32:33 | 2019-06-25T16:32:33 | 154,698,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | import numpy as np
a=np.array([0,1,2,3,4])
print(a)
print(type(a))
print(a.dtype)
b = np.array([3.1, 11.02, 6.2, 213.2, 5.2])
print(b.dtype)
| [
"lgoyal06@gmail.com"
] | lgoyal06@gmail.com |
67558e1d4c168ae6ffe706cae7b73d5b96991949 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /ec2_write_1/client-vpn-client-certificate-revocation-list_export.py | 590e1a0b6582eaa6d0a43363fd3ba344c40e4825 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,091 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/export-client-vpn-client-certificate-revocation-list.html
if __name__ == '__main__':
"""
import-client-vpn-client-certificate-revocation-list : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/import-client-vpn-client-certificate-revocation-list.html
"""
parameter_display_string = """
# client-vpn-endpoint-id : The ID of the Client VPN endpoint.
"""
add_option_dict = {}
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_one_parameter("ec2", "export-client-vpn-client-certificate-revocation-list", "client-vpn-endpoint-id", add_option_dict)
| [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
08d9ef9e0df888661ac7a92b07f1072c7c740bb1 | 45231c60b4f011993975c081cfbf9c0c434e7d0e | /lab6_face_.py | 322431abfd79667f4c98eb121affb2806bd1a061 | [] | no_license | sumi89/Machine_Learning | bae6035e245eb2ef9f429a4a4d10551cbf87ccfd | 617d7ac5b3f8834c98589e77ad41fb448a3f3180 | refs/heads/master | 2020-04-24T01:31:19.797817 | 2019-02-20T04:49:38 | 2019-02-20T04:49:38 | 171,601,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,904 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 3 02:17:45 2018
@author: sumi
"""
from __future__ import print_function
import sklearn
from sklearn.model_selection import train_test_split
from scipy.special import expit
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.utils import np_utils
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, Flatten
import cv2
from sklearn.datasets import fetch_lfw_people
def onehot(X):
T = np.zeros((X.shape[0],np.max(X)+1))
T[np.arange(len(X)),X] = 1 #Set T[i,X[i]] to 1
return T
def confusion_matrix(Actual,Pred):
cm=np.zeros((np.max(Actual)+1,np.max(Actual)+1), dtype=np.int)
for i in range(len(Actual)):
cm[Actual[i],Pred[i]]+=1
return cm
def read_data(X,Class):
print("Reading data")
#X = np.loadtxt(xfile, delimiter=",")
#Class = np.loadtxt(yfile, delimiter=",").astype(int)
X /= 255
X = X.reshape(-1,62,47,1)
Y = onehot(Class)
print("Data read")
return X,Y,Class
people = fetch_lfw_people(min_faces_per_person=50, resize=0.5)
mask = np.zeros(people.target.shape, dtype=np.bool)
for target in np.unique(people.target):
mask[np.where(people.target == target)[0][:50]] = 1
X_people = people.data[mask]
y_people = people.target[mask]
# scale the grey-scale values to be between 0 and 1
# instead of 0 and 255 for better numeric stability:
#X_people = X_people / 255.
#Read data
# number 1
X_train, X_test, Y_train, Y_test = train_test_split(X_people, y_people, random_state=99)
x_train, y_train, train_class = read_data(X_train, Y_train)
x_test, y_test, test_class = read_data(X_test, Y_test)
# Set network parameters
np.set_printoptions(threshold=np.inf) #Print complete arrays
batch_size = 32
epochs = 100
learning_rate = 0.0001
first_layer_filters = 16
second_layer_filters = 32
third_layer_filters=32
ks = 3
mp = 2
dense_layer_size = 64
# trial
flipped_image = np.zeros([people.images.shape[0],people.images.shape[1],people.images.shape[2]])
for i in range(people.images.shape[0]):
# ori_image = people.images[i]
# plt.imshow(ori_image)
flipped_image[i] = cv2.flip(people.images[i], 1)
# plt.imshow(flipped_image)
flipped_image = flipped_image.reshape(1560,-1)
new_train_data = np.append(people.data, flipped_image, axis = 0)
new_train_label = np.append(people.target, people.target, axis = 0)
#Read data
#number 3
X_train, X_test, Y_train, Y_test = train_test_split(new_train_data, new_train_label, random_state=99)
x_train, y_train, train_class = read_data(X_train, Y_train)
x_test, y_test, test_class = read_data(X_test, Y_test)
#
#for i in range(3):
# ind = np.random.randint(x_train.shape[0])
# I = (255*x_train[ind]).reshape((65, 87)).astype(int)
# plt.imshow(I, cmap=plt.get_cmap('gray'))
# plt.show()
#Build model
model = Sequential()
model.add(Conv2D(first_layer_filters, kernel_size=(ks, ks),
activation='relu',
input_shape= (62, 47, 1)))
print(model.output_shape)
model.add(MaxPooling2D(pool_size=(mp, mp)))
print(model.output_shape)
model.add(Conv2D(second_layer_filters, (ks, ks), activation='relu'))
print(model.output_shape)
model.add(MaxPooling2D(pool_size=(mp, mp)))
print(model.output_shape)
model.add(Conv2D(third_layer_filters, (ks, ks), activation='relu'))
print(model.output_shape)
model.add(MaxPooling2D(pool_size=(mp, mp)))
print(model.output_shape)
model.add(Flatten())
print(model.output_shape)
model.add(Dense(dense_layer_size, activation='relu'))
print(model.output_shape)
model.add(Dense(12, activation='softmax'))
print(model.output_shape)
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(lr=learning_rate),
metrics=['accuracy'])
print(model.output_shape)
#Train network, store history in history variable
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
verbose=1)
score = model.evaluate(x_test, y_test, verbose=1)
print('\nTest loss:', score[0])
print('Test accuracy:', score[1])
# Summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# Summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
pred=model.predict_classes(x_test)
print('\n',confusion_matrix(test_class,pred))
print(model.summary())
| [
"sumi@sumis-MacBook-Pro.local"
] | sumi@sumis-MacBook-Pro.local |
022be4db452f8ecc1c423f41fa77963d1855a30e | 9e21ee282d0a567b42a96f938f61d655eb2d5940 | /chat_room/tests/test_base.py | 1262db10f3b75ee65a9f2fb0e64f06f887ac4d2a | [] | no_license | smolynets/chat-interface | e0ac815639dd993f029f331a545c5c5932785569 | 3b66970c241eb1660b60a612aceffde36223eff4 | refs/heads/master | 2021-06-12T02:19:47.749561 | 2019-07-13T12:42:21 | 2019-07-13T12:42:21 | 191,516,912 | 0 | 0 | null | 2021-06-10T18:21:22 | 2019-06-12T07:06:21 | Python | UTF-8 | Python | false | false | 1,496 | py | """
This test is inherited by tests of other apps.
"""
from django.urls import reverse
from rest_framework.test import APIClient, APITestCase
from rest_framework_simplejwt.settings import api_settings
from ..models import User
class APIRestAuthJWTClient(APIClient):
"""
APIRestAuthJWTClient class.
Login with jwt tokens.
"""
def login(self, login_name="login", **credentials):
"""
Login method.
Get tokens, if successful login.
"""
login_endpoint = reverse(login_name)
login_response = self.post(login_endpoint, credentials, format="json")
if login_response.status_code == 200:
self.credentials(
HTTP_AUTHORIZATION="{0} {1}".format(
api_settings.defaults["AUTH_HEADER_TYPES"][0],
login_response.data["access"]
)
)
return True
else:
return False
class APITestBaseClass(APITestCase):
"""
APITestBaseClass class.
Get APITestBaseClass.
"""
def setUp(self):
"""
Creeate User.
"""
self.user = User.objects.create_user(
username="test_user",
email="test@emil.com",
password="password"
)
self.user_two = User.objects.create_user(
username="test2_user",
email="test@emil2.com",
password="password"
)
client_class = APIRestAuthJWTClient
| [
"smolynets@gmail.com"
] | smolynets@gmail.com |
3bf1de822c4ab67af57788b5f747874e40c1bb8d | 2d4b27060f9870240293e00e9704fcb448bb1644 | /tester.py | a3bb9bcc33a96108e6663db86e279b473a150b19 | [] | no_license | majedomran/capstone | 465cff836f0c2832caa7e723ce38d4dee2e50516 | 244637b2dba560ba67ebbd644a5516b0fdc2838d | refs/heads/master | 2023-01-18T18:34:35.086818 | 2020-11-30T09:58:33 | 2020-11-30T09:58:33 | 308,686,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,973 | py | import requests
import json
data = json.dumps({'name':'SLS','model':'2020','reuseable':'True','capacity':5})
header = {'Authorization': 'Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IncyZVdtNWdrMl9nVjBNMVMtZWtFcCJ9.eyJpc3MiOiJodHRwczovL2Rldi04b2JlaGV1ei5ldS5hdXRoMC5jb20vIiwic3ViIjoiYXV0aDB8NWY4YTg1ZGNiYWFhMWMwMDZmNmM0YzM3IiwiYXVkIjoiY2Fwc3RvbmUiLCJpYXQiOjE2MDY2NDY1MzcsImV4cCI6MTYwNjczMjkzNywiYXpwIjoiNkE0bmg5MkpLY24yenNMdUtTbTZCRkoyQlFxbFZiSU8iLCJzY29wZSI6IiIsInBlcm1pc3Npb25zIjpbImRlbGV0ZTpvbmUiLCJnZXQ6YWxsIiwiZ2V0Om9uZSIsInBvc3Q6b25lIl19.StJRmer_TnduFTmZtj7JUwWO4btu0G_ENz28ayKEudbaPzQs_eoPzEUjcRM2tcI2wBo2dtDcirvaUlsQHmOsvNbpCJvRGcJpxs7sR5j0rUsJPSeR5J2v0AgG1PPPPY8Sq2UcGOGbf0X5pqMQaKo6w_VVaABNfQqmlp5wbrkUP1Gj--NE9Y-8UaXgNN0p5SYrqQ7KFK0Kgg27DkYEMFmbAV6q0gPbucAhOyYfm-mUhH-oGAC2ey2Mtr4x1LoJLKpQHdap3VtkgT3ExpAtzfYEwOcI-wGYnyL0c3f78Yo808ACbSY5rcQJBEpsIBDLTfxwlUcAWouADxGuRsSzmOIb6w'}
data_flight = json.dumps({'spaceship':1,'station':1,'launchingpad':'37A','launchingdate':'2020/11/16'})
data_Station = json.dumps({'crewcapacity':10,'fuelcapacity':500000})
data_astronaut = json.dumps({'name':'majed','job':"IT",'flight':1})
r_Spaceship = requests.post('http://127.0.0.1:5000/Spaceship',data=data,headers=header)
r_Station = requests.post('http://127.0.0.1:5000/Station',data=data_Station,headers=header)
r_Flight = requests.post('http://127.0.0.1:5000/Flight',data=data_flight,headers=header)
r_Astronaut = requests.post('http://127.0.0.1:5000/Astronaut',data=data_astronaut,headers=header)
# r_Astronaut_get_one = requests.get('https://heroku-sample-majed.herokuapp.com/Astronaut/1',headers=header)
# r_Astronaut_get_one = requests.delete('https://heroku-sample-majed.herokuapp.com/Astronaut/2',headers=header)
# print(r_Astronaut_get_one.status_code)
print(r_Station.status_code)
print(r_Spaceship.status_code)
print(r_Flight.status_code)
print(r_Astronaut.status_code)
# print(r_Flight.content)
# print(r_Spaceship.status_code)
# print(r_Spaceship.content) | [
"majedbinomran@gmail.com"
] | majedbinomran@gmail.com |
4b23b94b4f2d585da0f98db4029d3c7ee8ae51b4 | e9ad052050bffec5ce61ab272a7d39336625c687 | /appstore/wsgi.py | 2d5028843b96722e6325e28aba06e2cfe721e774 | [] | no_license | rxlisbest/django-appstore | 8a8946e781fb1e271650dbed6b66b40577cab002 | 2cd9c55fae604c10b90d2dd147bc0527c872e395 | refs/heads/master | 2020-04-05T23:06:30.463090 | 2014-04-27T09:17:24 | 2014-04-27T09:17:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | """
WSGI config for appstore project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import sys
import os
sys.path.append('/var/www/python/appstore/')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "appstore.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"roy@localhost.localdomain"
] | roy@localhost.localdomain |
00b8187a078d6e96f49f7e9472e15e264b6980da | 0a51871ae18acb0ad24a4f4c090cfd805a6d8c41 | /ShareKnowledge/wsgi.py | 0c38c1077c593af8f6f063b41723ed1fca24f68d | [] | no_license | kiudou/ShareKnowledge | beb595c1e0ab0e6bdf8a7f79be184095b95d919c | 568485ee5b29b9bf2e6df9141b095981c74df6fa | refs/heads/master | 2020-03-14T16:51:43.574871 | 2018-05-19T01:54:04 | 2018-05-19T01:54:04 | 131,706,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for ShareKnowledge project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ShareKnowledge.settings")
application = get_wsgi_application()
| [
"qidong120@hotmail.com"
] | qidong120@hotmail.com |
93af9a347d6f279d1e07da3c44e45072e228bc89 | cbfe621b0790157c05cc6ab16fe01cc5db0e8a34 | /game.py | 6a5cfb0eb0d72780d48785782c0d811179b47282 | [] | no_license | sirdigital/korpo_trail | caf1e671ae9ec87e1a272afb79a9841f504e4fae | d70336075db3a04a5500c44648da1eb6c578ae56 | refs/heads/master | 2020-04-22T05:11:06.067409 | 2019-02-12T12:01:04 | 2019-02-12T12:01:04 | 170,150,797 | 0 | 0 | null | 2019-02-12T12:01:06 | 2019-02-11T15:18:21 | null | UTF-8 | Python | false | false | 97 | py | from story_manager import StoryManager
if __name__ == '__main__':
manager = StoryManager()
| [
"Cezar4853"
] | Cezar4853 |
259cf5d639056369c3f945a9da6687d606cf610b | 0eb0e6e65a0e00cdf52f9817ca73990f97644d54 | /01 - janela e fundo/main.py | adddcb015dadc6083645c69c8a0cb9798b065090 | [] | no_license | dudu9999/Tkinter_Projects_dudu9999 | aafffb4e6b298788b8580f44eb0cfb3c713d2f84 | 5bd7f4939ee149c7024013e987879f4b6bba398e | refs/heads/master | 2020-08-05T12:06:42.562741 | 2019-10-03T04:37:23 | 2019-10-03T04:37:23 | 212,497,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | ##import tkinter as tk
##
##janela = tk.Tk()
##
##janela.mainloop
# OU
import tkinter
janela = tkinter.Tk()
janela.title("Janela Principal")
janela["bg"] = "green" # coloca a cor de fundo
janela["background"] = "green" # ou assim.
# largura x Altura + Distancia da esquerda da tela + Distancia do topo da tela
# LxA+E+T
# 500x600+400+200 (medida em pixel)
janela.geometry("500x200+10+30") # tamano e local onde a janela vai ficar
# janela.geometry("+10+30") # ou aasim que só escole o local onde a janela ai ficar
janela.mainloop
| [
"ecaetanocorrea@gmail.com"
] | ecaetanocorrea@gmail.com |
e80392eaa312e8e823423e815e039a6b62a9e4a1 | 461b17f1d781b93838fd4e447506572252991c75 | /Hand Gesture Recognition/segment.py | 2afbae0bc88b544eb9184244157728fb6e0b5474 | [] | no_license | sohamtiwari3120/visionAiTaskphase | 8a50aa9dd2176d9522eed00b1c72a37cefbd703c | 3e979596bf1438df84b092a951fe526b1b87178c | refs/heads/master | 2021-04-14T14:16:39.237531 | 2020-05-08T01:07:58 | 2020-05-08T01:07:58 | 249,236,673 | 0 | 0 | null | 2020-03-22T17:32:50 | 2020-03-22T17:32:49 | null | UTF-8 | Python | false | false | 895 | py | import cv2
import numpy as np
import imutils
kernel = np.ones((5, 5),np.uint8)
bg = None
def run_avg(curr_image, aWeight):
global bg
if bg is None:
bg = curr_image.copy().astype('float')
return
cv2.accumulateWeighted(src = curr_image, dst = bg, alpha=aWeight)
def segmentImage(curr_image, threshold=25):
global bg
kernel = np.ones((5, 5),np.uint8)
diff = cv2.absdiff(bg.astype('uint8'), curr_image)
_, thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)
thresholded = cv2.morphologyEx(thresholded, cv2.MORPH_CLOSE, kernel)
contours, heirarchy = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# print('contours')
# print(contours[0])
if(len(contours)==0):
return
else:
segmented = max(contours, key = cv2.contourArea)
return thresholded, segmented, diff | [
"soham.tiwari800@gmail.com"
] | soham.tiwari800@gmail.com |
977df78d722282409ad0ab4c71cf5d039b03b76d | ccaa16ac556eea4158af2385fb8e1dc2b76eaf91 | /apps/civic_pulse/apps.py | 22f47a4d88b35df40457cc6f2dd6952bf224e96c | [
"MIT"
] | permissive | morisy/GovLens | 9814d65683fa467ca9c8c09b46c5fb2ab8199f27 | efcd37b04b5ccbbe991a53e4a5d0b40754cf8889 | refs/heads/master | 2020-11-24T04:29:01.407047 | 2019-12-14T04:54:49 | 2019-12-14T04:54:49 | 227,965,237 | 1 | 0 | MIT | 2019-12-14T04:23:07 | 2019-12-14T04:23:06 | null | UTF-8 | Python | false | false | 96 | py | from django.apps import AppConfig
class CivicPulseConfig(AppConfig):
name = 'civic_pulse'
| [
"jdansey@bu.edu"
] | jdansey@bu.edu |
3c78d94f96313789cdcab22a4e37af4d6683944f | dcda5ba16474dd8ff650e04e7f4a9bf700f6a9ff | /manage.py | e4582d9147c1e546b5dee4a9c82e5fcceb52ac75 | [] | no_license | 007vict/shopbyexample | 2084d6e53faafb5c7e856cc8b3a5ff43bc3a82e2 | bc7dcfe5818499731c3cbf956c9c0b95cf3791da | refs/heads/master | 2022-12-21T13:05:08.425653 | 2019-04-10T10:30:41 | 2019-04-10T10:30:41 | 177,291,341 | 0 | 0 | null | 2022-12-08T04:58:00 | 2019-03-23T13:18:59 | JavaScript | UTF-8 | Python | false | false | 547 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'myshopbyexample.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"super_vg@bk.ru"
] | super_vg@bk.ru |
8a6f357264b2dbc8114efa7cb34e8a02b9be2820 | 63b814265ab49ebc2ed8e62577757991119be83b | /data-quality/kalman-filt.py | d10a0e87d25b724b1f8289d92c13e7a3168ac9bd | [] | no_license | wisecg/mjd-analysis | 7de4e67c34c19215984f528f31f71a8e584e1e91 | ca4f00a767f2dfe6d460b44c700e2b59fe0bb296 | refs/heads/master | 2020-12-07T21:28:34.376478 | 2017-08-28T15:20:17 | 2017-08-28T15:20:17 | 65,919,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,108 | py | """
The idea is to take plots like "gmax" and "gbase" in data-quality.cc
and apply a Kalman filter to them, to look for "extrema"
http://scipy-cookbook.readthedocs.io/items/KalmanFiltering.html
Intro to Kalman filters:
http://www.cs.unc.edu/~welch/media/pdf/kalman_intro.pdf
Ben says:
everything is “basically linear,” which in electronics engineering speak means made of gaussians
so you model it with a bunch of kalman filters
and that gives you a statistically robust way to look for discontinuities or other jumps
its how they monitor parameters at like a gas turbine plant or jet engine or shit like that
its called fault detection and is a big component of controls engineering
its like wildly unexciting
but sort of cool math
but, like, say you want to monitor stability of a peak or whatever
you can make a bunch of plots of that peak position and look at them by eye
or you can have a filter that looks at the position vs time and says WOAH WTF BRO if it jumps
kalman filters are markov chain way to do that
and you know we roll markov style up in this bitch
same with rates or whatever
""" | [
"wisecg.neontetra@gmail.com"
] | wisecg.neontetra@gmail.com |
1547ae20bcab955d0bc53826f0c25ebaf5c0ca77 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2645/60768/316508.py | 65bd1d2918995f2d545307313de4c81f073d0279 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | piles = eval(input())
h = int(input())
max_k = max(piles)
min_k = int(sum(piles) / h)
re = max_k
for k in range(min_k, max_k + 1):
time = 0
bananas = [i for i in piles]
while len(bananas) > 0:
for i in range(len(bananas)):
bananas[i] = bananas[i] - k
time += 1
if bananas[i] < 0:
bananas[i] = 0
while 0 in bananas:
bananas.remove(0)
if time <= h:
re = k
break
print(re) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
8114b4cb6593c1b9742ce21db55745a0f2733e00 | 0ca0471d6457d8dcacc5f3433af586bed44cb7af | /python/aBasic/c_module_class/mypackage/mymodule.py | 6ee5dd3ceb7d03117b50fac88b916f3a39884dfb | [] | no_license | ehan831/coding_study | 61f47a8b5a7fe448fc71a868637590821d988729 | 14958b6b4642e6488156091293e854cc36cf9411 | refs/heads/master | 2022-12-21T20:29:10.265425 | 2019-09-05T04:07:22 | 2019-09-05T04:07:22 | 181,843,058 | 0 | 0 | null | 2022-12-16T00:45:37 | 2019-04-17T07:50:37 | Jupyter Notebook | UTF-8 | Python | false | false | 1,204 | py | """
[정리]
* 함수 : 파일 내에서 일정한 작업을 수행하는 코드 블록
* 모듈 : 함수나 클래스들을 파일
모듈이름은 py 확장자를 제외한 파일 이름
* 패키지 : 여러 모듈들을 모아놓은 디렉토리
패키지 = 디렉토리
모듈 = 파일
[ 모듈 ]
- 자주 사용되는 함수를 매번 작성하지 않고 하나의 모듈로 사용하여 재사용
- 모듈 단위로 분리하여 설계함으로 작업의 효율을 높임
- 동일한 함수나 클래스를 모듈로 관리
` 표준 모듈 : 파이썬 안에 기본적으로 제공하는 모듈
` 사용자 정의 모듈 : 개발자가 직접 정의한 모듈
"""
from random import choice
def get_weather():
today = ['맑음', '비', '눈', '폭우', '돌풍', '따뜻']
return choice(today)
def get_date():
today = ['월', '화', '수', '목', '금', '토', '일ㄴ']
return choice(today)
# 프로그램의 시작 점
if __name__ == '__main__':
today = get_weather()
print('오늘의 날씨는', today)
print(get_date(), '요일입니다')
| [
"ehan831@gmail.com"
] | ehan831@gmail.com |
979fc63e25109ceca7ce6c252e93769919babbfb | be989b74573721f4359553d375bd752006f70b5c | /handson_XOR_Classification.py | c85e637283eac0d0b2967060bf5d048aee420b7f | [] | no_license | onegeeknosquad/HandsOn-ML-Tensorflow | a5b94bab252dae68ec3fd22beeb4e7f1da64d747 | fccc700ffcb052f90d990e3dcfcf04241c28bb74 | refs/heads/master | 2020-03-20T22:52:30.816609 | 2018-06-19T00:00:09 | 2018-06-19T00:00:09 | 137,819,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,615 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 18 18:08:23 2018
@author: mrpotatohead
XOR Classification Problem
Hands on Machine Learning with Scikit Learn and Tensorflow
Page 262
Figure 10-6
"""
import tensorflow as tf
import numpy as np
X = [[0,0],[0,1],[1,0],[1,1]]
y = [[0],[1],[1],[0]]
n_steps = 50000
#n_epoch = 10000
n_training = len(X)
n_input_nodes = 2
n_hidden_nodes = 5
n_output_nodes = 1
learning_rate = 0.05
X_ = tf.placeholder(tf.float32, shape=[n_training, n_input_nodes], name="x-input")
y_ = tf.placeholder(tf.float32, shape=[n_training, n_output_nodes],name="y-input")
w1 = tf.Variable(tf.random_uniform([n_input_nodes, n_hidden_nodes],-1,1),name="w1")
w2 = tf.Variable(tf.random_uniform([n_hidden_nodes, n_output_nodes],-1,1),name="w2")
bias1 = tf.Variable(tf.zeros([n_hidden_nodes]), name="bias1")
bias2 = tf.Variable(tf.zeros([n_output_nodes]), name="bias2")
layer1 = tf.sigmoid(tf.matmul(X_, w1) + bias1)
output = tf.sigmoid(tf.matmul(layer1, w2) + bias2)
cost = tf.reduce_mean(tf.square(y - output))
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for _ in range(n_steps):
sess.run(train_step, feed_dict={X_: X, y_: y})
if _ % 1000 == 0:
print("Batch: ", _)
print("Inference ", sess.run(output, feed_dict={X_: X, y_: y}))
print("Cost ", sess.run(cost, feed_dict={X_: X, y_: y}))
#Evaluate the Network
test_X = [[0,.17],[1,1],[.9,.1],[.83,.17]] # 0, 0, 1, 1
print(output.eval(feed_dict={X_:test_X}, session=sess)) | [
"onegeeknosquad@gmail.com"
] | onegeeknosquad@gmail.com |
e95391f7262eb77f17c357f16d90808250365044 | 3bd5b9c81bca76565e583dd5e35e9f10966926ac | /ptr_with_no_coverage/BiDAF.py | 0591eee01dd3038286da4c308e8c11c78ae0375c | [] | no_license | FeixLiu/graduation_project | cc06e89098ade093f149291638c3edc044976356 | 42b9de0138446097b5396d6306698be1661b3d3b | refs/heads/master | 2020-04-25T13:53:47.711713 | 2019-04-13T03:37:18 | 2019-04-13T03:37:18 | 172,822,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,947 | py | import tensorflow as tf
class BiDAF():
"""
self._refc (tensor): the context tensor
shape = [paragraph_numbers, max_seq_length, bert_embedding_size]
self._refq (tensor): the query tensor
shape = [paragraph_numbers, max_seq_length, bert_embedding_size]
self._cLength (int): the length of the refc, equal with max_seq_length
self._qLength (int): the length of the refq, equal with max_seq_length
self._hidden_units (int): the hidden units of the embedding layer, equal with bert_embedding_size
self.fuse_vector (tensor): fuse_vector of the BiDAF
shape = [paragraph_numbers, max_seq_length, 4 * bert_embedding_length]
self._sim_Mat (tensor): the similarity matrix between text and query
shape = [paragraph_numbers, max_seq_length, max_seq_length]
self._c2q_attention (tensor): text to query attention
shape = [paragraph_numbers, max_seq_length, bert_embedding_size]
self._q2c_attention (tensor): text to query attention
shape = [paragraph_numbers, max_seq_length, bert_embedding_size]
"""
def __init__(self, refc, refq, cLength, qLength, hidden_units, name):
"""
function: initialize the class
:param refc (tensor): the context tensor
shape = [paragraph_numbers, max_seq_length, bert_embedding_size]
:param refq (tensor): the query tensor
shape = [paragraph_numbers, max_seq_length, bert_embedding_size]
:param cLength (int): the length of the refc, equal with max_seq_length
:param qLength (int): the length of the refq, equal with max_seq_length
:param hidden_units (int): the hidden units of the embedding layer, equal with bert_embedding_size
"""
self._refc = refc
self._refq = refq
self._cLength = cLength
self._qLength = qLength
self._hidden_units = hidden_units
self._name = name
self.fuse_vector = self._biAttention()
def _biAttention(self):
"""
function: the process of the BiDAF
:return fuse_vector (tensor): fuse_vector of the BiDAF
shape = [paragraph_numbers, max_seq_length, 4 * bert_embedding_length]
"""
self._sim_Mat = self._simMat()
self._c2q_attention = self._c2q_attention()
self._q2c_attention = self._q2c_attention()
fuse_vector = self._calculateG()
return fuse_vector
def _simMat(self):
"""
function: calculate the similarity matrix between text and query
:return simMat (tensor): the similarity matrix between text and query
shape = [paragraph_numbers, max_seq_length, max_seq_length]
"""
weights_coMat = tf.Variable(tf.random_normal(dtype=tf.float32, shape=[6 * self._hidden_units, 1]),
name=self._name+'_weights_coMat')
cExp = tf.tile(tf.expand_dims(self._refc, 2), [1, 1, self._qLength, 1])
qExp = tf.tile(tf.expand_dims(self._refq, 1), [1, self._cLength, 1, 1])
simMat = tf.concat([cExp, qExp, tf.math.multiply(cExp, qExp)], axis=3)
simMat = tf.reshape(simMat, [-1, 6 * self._hidden_units])
simMat = tf.matmul(simMat, weights_coMat)
simMat = tf.reshape(simMat, [-1, self._cLength, self._qLength])
return simMat
def _c2q_attention(self):
"""
function: calculate the attention from the text to the query
:return c2q_attention (tensor): text to query attention
shape = [paragraph_numbers, max_seq_length, bert_embedding_size]
"""
soft_sim = tf.nn.softmax(self._sim_Mat, axis=2)
attention_weight = tf.tile(tf.reduce_sum(soft_sim, axis=2, keepdims=True), [1, 1, self._qLength])
c2q_attention = tf.matmul(attention_weight, self._refq)
return c2q_attention
def _q2c_attention(self):
"""
function: calculate the attention from the query to the text
:return q2c_attention (tensor): text to query attention
shape = [paragraph_numbers, max_seq_length, bert_embedding_size]
"""
soft_sim = tf.nn.softmax(tf.reduce_max(self._sim_Mat, axis=2), axis=1)
attented_context_vector = tf.matmul(tf.expand_dims(soft_sim, 1), self._refc)
q2c_attention = tf.tile(attented_context_vector, [1, self._cLength, 1])
return q2c_attention
def _calculateG(self):
"""
function: calculate the bi-direction attention flow fuse_vector with the two side attention
:return fuse_vector (tensor): fuse_vector of the BiDAF
shape = [paragraph_numbers, max_seq_length, 4 * bert_embedding_length]
"""
hu = tf.concat([self._refc, self._c2q_attention], axis=2)
hmu = tf.math.multiply(self._refc, self._c2q_attention)
hmh = tf.math.multiply(self._refc, self._q2c_attention)
fuse_vector = tf.concat([hu, hmu, hmh], axis=2)
return fuse_vector
| [
"1842068905@qq.com"
] | 1842068905@qq.com |
d98d52460289e88e419f5d0a57625556b7e514b8 | 8e8ed90ee59426da5c8a350ded2b635cefcc923c | /robosuite/models/segmentation/split_dataset.py | b5a522743505a5348e3fab91e58b7eb5b254a77a | [
"MIT"
] | permissive | YeWR/robosuite | 6286782294fd922f20bce3e8ff89449ddab34b8a | 49bd80c0e6499299a96b67b5e23cd8903e849d7d | refs/heads/master | 2020-12-05T18:54:38.029260 | 2020-09-12T15:41:08 | 2020-09-12T15:41:08 | 232,214,909 | 2 | 1 | MIT | 2020-11-17T03:04:53 | 2020-01-07T01:09:21 | Python | UTF-8 | Python | false | false | 1,520 | py | import os
import random
def split_by_type(file, path, type=4):
lines = open(file).readlines()
fs = []
for t in range(type):
p = os.path.join(path, 'label_' + str(t) + '.txt')
fs.append(open(p, 'w'))
for line in lines:
path, action, obj_type, reward = line.split()
f = fs[int(obj_type)]
s = path + ' ' + action + ' ' + obj_type + ' ' + reward
f.write(s + '\n')
for f in fs:
f.close()
def split_train_test(file, ratio=0.8):
lines = open(file).readlines()
num = len(lines)
random.shuffle(lines)
train_num = int(num * ratio)
train_lines = lines[:train_num]
test_lines = lines[train_num:]
assert len(train_lines) > len(test_lines)
path = file.split('.')[0]
train_path = path + '_train.txt'
test_path = path + '_test.txt'
train_file = open(train_path, 'w')
test_file = open(test_path, 'w')
for line in train_lines:
train_file.write(line + '\n')
for line in test_lines:
test_file.write(line + '\n')
train_file.close()
test_file.close()
if __name__ == '__main__':
split_by_type('/home/yeweirui/data/8obj_half_in_1m/label.txt', '/home/yeweirui/data/8obj_half_in_1m/')
split_train_test('/home/yeweirui/data/8obj_half_in_1m/label_0.txt')
split_train_test('/home/yeweirui/data/8obj_half_in_1m/label_1.txt')
split_train_test('/home/yeweirui/data/8obj_half_in_1m/label_2.txt')
split_train_test('/home/yeweirui/data/8obj_half_in_1m/label_3.txt') | [
"yeweirui16@gmail.com"
] | yeweirui16@gmail.com |
c3c4498cc89fc0759a18d9c2e42405e1699763d0 | 89771f98d03e9bda96301a08544585061595af6c | /ros_start/src/kinematics.py | e641fd96af68ce4c41e27f77bc5f187fbe4e8421 | [
"BSD-3-Clause"
] | permissive | Ryosuke-YA/kadai2 | c21f6875924a2fbbfef29c40f8451c767b2253c6 | b981a9d02a4d02948e54d371569bf158b8c3212f | refs/heads/master | 2020-12-18T19:41:57.108435 | 2020-01-23T15:34:48 | 2020-01-23T15:34:48 | 235,501,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | #!/usr/bin/env python
import roslib
import rospy
import math
PI = math.pi
def Kinematics_f(x, y, z):
sheta1rad = float(math.atan2(x, y))
sheta1 = float((sheta1rad* 180.0 / PI)*10)
C3 = float(((x-pow(51.2*math.sin(sheta1rad),2) + pow(y - 51.2*math.cos(sheta1rad),2) + pow(z,2)-pow(54.2,2)-pow(118.0,2)) /(2*54.2*118.0)))
sheta3rad = float(math.atan2(math.sqrt(1-pow(C3,2)),C3))
sheta3 = float((sheta3rad*180.0 / PI)*10)
k = float(-math.sqrt(pow(x-math.sin(sheta1rad)*51.2,2)) + pow(y-math.cos(sheta1rad)*51.2,2 ))
g = float(-(pow(math.sin(sheta3rad),2)*pow(118.0,2))-pow(math.cos(sheta3rad)*118.0 + 54.2,2 ))
a = float(math.cos(sheta3rad)*118.0+54.2)
sheta2rad = float(math.atan2(((k*math.sin(sheta3rad)*118.0)-a*z)/g,(-k*a-(math.sin(sheta3rad)*118.0*z))/g))
sheta2 = float((sheta2rad*180.0 / PI)*10)
return sheta1, sheta2, sheta3
if __name__ == '__main__':
pass
| [
"noreply@github.com"
] | noreply@github.com |
5509f28877444ba0ac97b513a2106dbc9ddd0995 | ea0c0b8d67a42086f840149b3dbe1c0e4f58e56f | /members_area/migrations/0005_auto_20200129_2122.py | 12832e8dc63b3301b343c645b65d640d95c3d93b | [
"MIT"
] | permissive | AzeezBello/raodoh | 78b27e0886f8882144a4def160d9c3f53bcc6af9 | 296bd44069bd750557bf49995374601f5052d695 | refs/heads/master | 2022-05-03T05:07:21.632642 | 2020-02-26T10:16:08 | 2020-02-26T10:16:08 | 235,878,080 | 0 | 0 | MIT | 2022-04-22T23:01:27 | 2020-01-23T20:15:39 | JavaScript | UTF-8 | Python | false | false | 481 | py | # Generated by Django 2.2.9 on 2020-01-29 20:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('members_area', '0004_auto_20200128_2330'),
]
operations = [
migrations.AlterField(
model_name='lesson',
name='course',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='members_area.Course'),
),
]
| [
"azeez@scholarx.co"
] | azeez@scholarx.co |
16e3399b6851e6117677a58a73bbfb612061145e | 16bd8ca85a6249975b47fab6513e89de0d737986 | /rooms/migrations/0007_auto_20210323_1545.py | f830de470b2f06ef79b94668c9c29d87bf6901ab | [] | no_license | yessm621/airbnb-clone | ed5ee7b52c290bb6a4e140b1387d66048e41db39 | 76faadc75f00c3b8801e50e2596334d08ff29ff2 | refs/heads/master | 2023-06-01T05:12:29.779071 | 2021-07-13T02:21:00 | 2021-07-13T02:21:00 | 344,806,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | # Generated by Django 2.2.5 on 2021-03-23 06:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rooms', '0006_auto_20210321_1752'),
]
operations = [
migrations.AlterField(
model_name='photo',
name='file',
field=models.ImageField(upload_to='room_photos'),
),
]
| [
"yessm621@gmail.com"
] | yessm621@gmail.com |
3225468d05a0d7d8ccb6a3150949dc7e5a44ca89 | cab6d7986b8a001a1e41a8f9063d5b91e0b3215b | /Exercício 10/ex10.py | 1f58afc7995f23ed34d64b6c2b543ac0e515ecac | [] | no_license | DevVictorr/Python-Exercicios | 4f59527d7be7fbf5be46c2344feee8876f10464d | 4b449505e0a94877da512a80e4e17b69020d143c | refs/heads/master | 2023-06-04T22:19:32.211581 | 2021-06-16T22:14:13 | 2021-06-16T22:14:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | frase = 'Completando o desafio EX 10 em python'
print(frase[:18]) | [
"victordsantunes@gmail.com"
] | victordsantunes@gmail.com |
b08e83d8310b1e996d5490e5a676d7b291570857 | 91be5031ae0ed8fa9adc1ad91b2f23d5e780c02e | /app/my_app.py | 2c1d0f7d594f994eef715fdabe1cda9490b0033c | [] | no_license | chadkendziorski/flask_app | 190bbf60a73ffc81b343a4879144b9008dcd8e95 | 5ee0d8f795e09d1fa1afebdd49070c6a8487da05 | refs/heads/master | 2023-08-06T23:08:12.343286 | 2021-09-30T02:28:51 | 2021-09-30T02:28:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | from flask import Flask, render_template, url_for
app = Flask(__name__)
@app.route("/")
@app.route("/home")
def home():
return render_template('home.html')
@app.route("/about")
def about():
return render_template('about.html', title='About')
if __name__ == '__main__':
app.run(debug=True) | [
"btwist540@yahoo.com"
] | btwist540@yahoo.com |
974c211fc0aafd7072d0d9fe4abf58e736c2437e | 9bc228372e586a1f90bb0685c43e744be9638ecd | /19_송지영/session09/blogproject/blogproject/urls.py | 3d9666b58bf08aa36fe65c0a9c7c339174ab6a5b | [
"MIT"
] | permissive | LikeLionSCH/9th_ASSIGNMENT | 3e58862a76e3232aed7e19e8939da23330ff2e22 | c211995ad12f404833ffec7fd80e1229b82a3bfa | refs/heads/master | 2023-07-03T10:27:11.843177 | 2021-08-02T14:52:02 | 2021-08-02T14:52:02 | 379,633,279 | 7 | 18 | MIT | 2021-08-02T14:52:03 | 2021-06-23T14:36:59 | Python | UTF-8 | Python | false | false | 1,054 | py | """blogproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from study import views as s
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls')),
path('account/', include('account.urls')),
path('study/',include('study.urls')),
]+static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"thdwldud8@gmail.com"
] | thdwldud8@gmail.com |
838ea1d1a76bac8e500e2182ffc6165bcc297aab | 507bd189770e324c9961f5e6b1cc429f8f56013f | /maths.py | a3dfa48a1cb31612731b6c654d800d8cb6c75d11 | [] | no_license | aditijhaa9901/git-tutorial | 48abf9d16273c9101a01a02368955e0f8e7701a5 | f7306368a5a0bca5287efd6c3a47b72c0abb2c81 | refs/heads/master | 2022-11-04T21:40:52.159567 | 2020-06-28T07:04:43 | 2020-06-28T07:04:43 | 275,533,962 | 0 | 0 | null | 2020-06-28T07:45:31 | 2020-06-28T07:45:30 | null | UTF-8 | Python | false | false | 32 | py | a=5
b=4
print(a+b)
print(a-b)
| [
"noreply@github.com"
] | noreply@github.com |
5cab032420aeef10362adc5c232ceff023afc26a | 4aa7c3c3b9b8bd898411a8409b4ac2ede1914914 | /Fileutils.py | 8be4ed73c4e2d0b71f2732c775a20c1410873550 | [
"Apache-2.0"
] | permissive | davidhstocker/Intentsity | c2848867874f84ba6be84516d9b153025f849686 | c8e0c6a5ecfb910bf456f6218a3d2eae3b6b8486 | refs/heads/master | 2021-10-11T23:40:42.665897 | 2021-10-06T19:15:55 | 2021-10-06T19:15:55 | 170,385,686 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,039 | py | '''
Created on June 13, 2018
@author: David Stocker
'''
import re
import os
import codecs
import zipfile
import importlib
import sys
import Graphyne.Graph as Graph
#globals
logType = Graph.logTypes.ENGINE
logLevel = Graph.LogLevel()
tiogaHome = os.path.dirname(os.path.abspath(__file__))
def modulePathToFilePath(modulePath):
splitPath = re.split('\.', modulePath)
filePath = os.path.join(tiogaHome, splitPath[0])
splitPath.remove(splitPath[0])
for fragment in splitPath:
filePath = os.path.join(tiogaHome, fragment)
return filePath
def getModuleFromResolvedPath(fullModuleName):
try:
x = importlib.import_module(fullModuleName)
for fragment in fullModuleName.split('.')[1:]:
x = getattr(x, fragment)
return x
except Exception as e:
unused_errorMsg = "unable to resolve module at path %s" %fullModuleName
raise e
def ensureDirectory(targetDir):
'''Endure that targetDir exists, by creating whatever part of the tree is required '''
firstGoodAncestor = targetDir
badAncestors = []
while not os.access(firstGoodAncestor, os.F_OK):
tempTuple = os.path.split(firstGoodAncestor)
firstGoodAncestor = tempTuple[0]
badAncestors.insert(1, tempTuple[1])
for badAncestor in badAncestors:
targetDir = os.path.join(firstGoodAncestor, badAncestor)
print(("creating %s" %targetDir))
try:
os.mkdir(targetDir)
except OSError as e:
catch = "me"
def getCodePageFromFile(fileURI):
return "utf-8"
# A recursive examiner for package subdirectories
def walkDirectory(workingDir, packagePath):
#Go through the subdirectory and load the files up
#method = sys.modules[__name__] + '.' + 'walkDirectory'
if packagePath is None:
#Graph.logQ.put( [logType , logLevel.DEBUG , method , 'Branch is directly off the root'])
pass
pathSet = {} # A dict object containing all modules. Key = module path. Data = filedata
dirList = os.listdir(workingDir)
#Graph.logQ.put( [logType , logLevel.DEBUG , method , 'Child dirs of package path %s ' % packagePath])
#for nextdir in dirList:
#Graph.logQ.put( [logType , logLevel.DEBUG , method , '... %s ' % dir])
#pass
for dirEntity in dirList:
#Graph.logQ.put( [logType , logLevel.DEBUG , method , 'Examining %s' % dirEntity])
trimmedfile = re.split('\.', dirEntity)
if packagePath is not None:
localPackagePath = packagePath + '.' + trimmedfile[0]
else:
localPackagePath = trimmedfile
fileName = os.path.join(workingDir, dirEntity)
#Graph.logQ.put( [logType , logLevel.DEBUG , method , 'package path = %s' % localPackagePath])
#Graph.logQ.put( [logType , logLevel.DEBUG , method , 'Examining %s' % fileName])
#logging.logger.logDebug( method, 'Examining %s' % fileName)
fileData = {}
if (os.path.isdir(fileName)) and (re.search( '.', localPackagePath) is None):
# ensuring that there are no dots in localPackagePath is a workaround to prevent
# the engine from choking on repositories that are in versioning repositories, such as svn
#Graph.logQ.put( [logType , logLevel.DEBUG , method , '%s is a directory' % fileName])
pathSubSet = walkDirectory(fileName, localPackagePath)
pathSet.update(pathSubSet)
elif re.search( '.py', fileName) is not None:
#Graph.logQ.put( [logType , logLevel.DEBUG , method , '%s is a python file' % fileName])
pass
elif re.search( '.xml', fileName) is not None:
#Graph.logQ.put( [logType , logLevel.DEBUG , method , '%s is an xml file' % fileName])
codepage = getCodePageFromFile(fileName)
fileObj = codecs.open( fileName, "r", codepage )
fileStream = fileObj.read() # Returns a Unicode string from the UTF-8 bytes in the file
fileData[fileStream] = codepage
pathSet[localPackagePath] = fileData
else:
#Graph.logQ.put( [logType , logLevel.DEBUG , method , '%s is not a directory, xml or python file and will be ignored' % fileName])
pass
#Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return pathSet
def walkRepository():
method = sys.modules[__name__] + '.' + 'walkRepository'
dataLocation = os.path.join(tiogaHome, 'IntentsityRepository', 'IntentsitySchema')
#Graph.logQ.put( [logType , logLevel.DEBUG , method , 'RML Repository location is %s' % dataLocation])
#Graph.logQ.put( [logType , logLevel.DEBUG , method , 'Ready to start walking the files of the repository'])
#Go through the condition repository directory and load the files up
pathSet = {}
packageList = os.listdir(dataLocation)
for package in packageList:
#Graph.logQ.put( [logType , logLevel.DEBUG , method , 'Examining %s' % package])
fileName = os.path.join(dataLocation, package)
fileData = {}
fileStream = None
trimmedPackage = re.split('\.', package)
packagePath = trimmedPackage[0]
#packages will be zip files. Free modules wll not be
try:
z = zipfile.ZipFile(fileName)
#Graph.logQ.put( [logType , logLevel.DEBUG , method , '%s is a zip archve' % fileName])
#Graph.logQ.put( [logType , logLevel.DEBUG , method , '%s contains the following files: %s' % (fileName, z.namelist())])
for nextFile in z.namelist():
trimmedfile = re.split('\.', nextFile)
localPackagePath = packagePath + '.' + trimmedfile[0]
#Graph.logQ.put( [logType , logLevel.DEBUG , method , 'Examining %s' % localPackagePath])
try:
#if os.path.isdir(file):
##Graph.logQ.put( [logType , logLevel.DEBUG , method , '%s is a directory' % file)
#pathSubSet = walkDirectory(file, localPackagePath)
#pathSet.update(pathSubSet)
if re.search( '.py', nextFile) is not None:
#Graph.logQ.put( [logType , logLevel.DEBUG , method , '%s is a python file' % file])
pass
elif re.search( '.xml', nextFile) is not None:
#Graph.logQ.put( [logType , logLevel.DEBUG , method , '%s is an xml file' % file])
codepage = getCodePageFromFile(nextFile)
fileObj = z.read(nextFile)
fileStream = str(fileObj, codepage)
fileData[fileStream] = codepage
pathSet[localPackagePath] = fileData
##Graph.logQ.put( [logType , logLevel.DEBUG , method , '%s fileStream = %s' % (fileName, fileStream))
#Graph.logQ.put( [logType , logLevel.DEBUG , method , '%s codepage = %s' % (fileName, codepage)])
#Graph.logQ.put( [logType , logLevel.DEBUG , method , '%s packagePath = %s' % (fileName, localPackagePath)])
else:
#Graph.logQ.put( [logType , logLevel.DEBUG , method , '%s is not a directory, xml or python file and will be ignored' % file])
pass
except Exception as e:
Graph.logQ.put( [logType , logLevel.WARNING , method , u'Problem reading file %s. "Traceback = %s' % (localPackagePath, e)])
except:
# if the file is not a zip, then we'll get this exception
if os.path.isdir(fileName):
#Graph.logQ.put( [logType , logLevel.DEBUG , method , '%s is a directory' % fileName])
pathSubSet = walkDirectory(fileName, packagePath)
pathSet.update(pathSubSet)
elif re.search( '.xml', fileName) is not None:
#Graph.logQ.put( [logType , logLevel.DEBUG , method , '%s is an xml file' % fileName])
codepage = getCodePageFromFile(fileName)
fileObj = codecs.open( fileName, "r", codepage )
fileStream = fileObj.read() # Returns a Unicode string from the UTF-8 bytes in the file
fileData[fileStream] = codepage
pathSet[packagePath] = fileData
##Graph.logQ.put( [logType , logLevel.DEBUG , method , '%s fileStream = %s' % (fileName, fileStream))
#Graph.logQ.put( [logType , logLevel.DEBUG , method , '%s codepage = %s' % (fileName, codepage)])
#Graph.logQ.put( [logType , logLevel.DEBUG , method , '%s packagePath = %s' % (fileName, packagePath)])
else:
#Graph.logQ.put( [logType , logLevel.DEBUG , method , '%s is not a directory or xml file and will be ignored' % fileName])
pass
#Graph.logQ.put( [logType , logLevel.DEBUG , method , 'Finished walking directories under %s' % dataLocation])
#Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return pathSet
def defaultCSS():
''' A default CSS stylesheet for formatting HTML generated by Angela Utilities'''
subdivision = "table.subdivision = {border-style:solid}"
tableheader = "thead.tableheader {font-size:1.35em;font-weight:bolder}"
badOVCell = "td.badOVCell {background-color:LightPink}"
goodOVCell = "td.goodOVCell {background-color:LightGreen}"
tableHeaderRow = "th.tableHeaderRow {text-align:center;padding-right:50px}"
badDRow = "tr.badDRow {background-color:LightPink;color:black;font-weight:bold;padding-right:50px;padding-left:10px;padding-top:10px;text-align:top}"
goodDRow = "tr.goodDRow {background-color:white;color:black;padding-right:50px;padding-left:10px;padding-top:10px;text-align:top}"
badOverviewRow = "tr.badOverviewRow {background-color:LightPink;color:black;font-weight:bold;padding-right:10px;padding-left:10px;padding-top:10px;text-align:top}"
goodOverviewRow = "tr.goodOverviewRow {background-color:LightGreen;color:black;padding-right:10px;padding-left:10px;padding-top:10px;text-align:top}"
detailsCell = "td.detailsCell {padding-right:50px;padding-left:10px;padding-top:10px;text-align:top}"
vBlankSpace = "div.vBlankSpace {padding-top:100px}"
hBlankSpace = "div.hBlankSpace {padding-left:100px}"
vAlignment = "div.vAlignment {margin-top:10px}"
defaultCSS = "<!--\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n-->" %(subdivision, tableheader, badOVCell, goodOVCell, badDRow, goodDRow, badOverviewRow, goodOverviewRow, tableHeaderRow, detailsCell, vBlankSpace, hBlankSpace, vAlignment)
return defaultCSS
| [
"david.stocker@sap.com"
] | david.stocker@sap.com |
5afae695f4d0d8c66f3a8d64f55c514f3919824c | a00ed711e3e08b50ad6e91cc07a2cddc4a1de5ea | /airflow/providers/microsoft/azure/operators/data_factory.py | 488ccbced070222c2fa5c3d5046514b7ee751015 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | ishiis/airflow | 4305794e36b611d01f49e3f2401be3dc49782670 | 292440d54f4db84aaf0c5a98cf5fcf34303f2fa8 | refs/heads/master | 2022-07-30T00:51:28.806940 | 2022-07-14T12:07:11 | 2022-07-14T12:07:11 | 209,801,072 | 1 | 0 | Apache-2.0 | 2019-09-20T13:47:26 | 2019-09-20T13:47:26 | null | UTF-8 | Python | false | false | 9,684 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import TYPE_CHECKING, Any, Dict, Optional, Sequence
from airflow.hooks.base import BaseHook
from airflow.models import BaseOperator, BaseOperatorLink, XCom
from airflow.providers.microsoft.azure.hooks.data_factory import (
AzureDataFactoryHook,
AzureDataFactoryPipelineRunException,
AzureDataFactoryPipelineRunStatus,
)
if TYPE_CHECKING:
from airflow.models.taskinstance import TaskInstanceKey
from airflow.utils.context import Context
class AzureDataFactoryPipelineRunLink(BaseOperatorLink):
"""Constructs a link to monitor a pipeline run in Azure Data Factory."""
name = "Monitor Pipeline Run"
def get_link(
self,
operator,
dttm=None,
*,
ti_key: Optional["TaskInstanceKey"] = None,
) -> str:
if ti_key is not None:
run_id = XCom.get_value(key="run_id", ti_key=ti_key)
else:
assert dttm
run_id = XCom.get_one(
key="run_id",
dag_id=operator.dag.dag_id,
task_id=operator.task_id,
execution_date=dttm,
)
conn = BaseHook.get_connection(operator.azure_data_factory_conn_id)
subscription_id = conn.extra_dejson["extra__azure_data_factory__subscriptionId"]
# Both Resource Group Name and Factory Name can either be declared in the Azure Data Factory
# connection or passed directly to the operator.
resource_group_name = operator.resource_group_name or conn.extra_dejson.get(
"extra__azure_data_factory__resource_group_name"
)
factory_name = operator.factory_name or conn.extra_dejson.get(
"extra__azure_data_factory__factory_name"
)
url = (
f"https://adf.azure.com/en-us/monitoring/pipelineruns/{run_id}"
f"?factory=/subscriptions/{subscription_id}/"
f"resourceGroups/{resource_group_name}/providers/Microsoft.DataFactory/"
f"factories/{factory_name}"
)
return url
class AzureDataFactoryRunPipelineOperator(BaseOperator):
"""
Executes a data factory pipeline.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AzureDataFactoryRunPipelineOperator`
:param azure_data_factory_conn_id: The connection identifier for connecting to Azure Data Factory.
:param pipeline_name: The name of the pipeline to execute.
:param wait_for_termination: Flag to wait on a pipeline run's termination. By default, this feature is
enabled but could be disabled to perform an asynchronous wait for a long-running pipeline execution
using the ``AzureDataFactoryPipelineRunSensor``.
:param resource_group_name: The resource group name. If a value is not passed in to the operator, the
``AzureDataFactoryHook`` will attempt to use the resource group name provided in the corresponding
connection.
:param factory_name: The data factory name. If a value is not passed in to the operator, the
``AzureDataFactoryHook`` will attempt to use the factory name name provided in the corresponding
connection.
:param reference_pipeline_run_id: The pipeline run identifier. If this run ID is specified the parameters
of the specified run will be used to create a new run.
:param is_recovery: Recovery mode flag. If recovery mode is set to `True`, the specified referenced
pipeline run and the new run will be grouped under the same ``groupId``.
:param start_activity_name: In recovery mode, the rerun will start from this activity. If not specified,
all activities will run.
:param start_from_failure: In recovery mode, if set to true, the rerun will start from failed activities.
The property will be used only if ``start_activity_name`` is not specified.
:param parameters: Parameters of the pipeline run. These parameters are referenced in a pipeline via
``@pipeline().parameters.parameterName`` and will be used only if the ``reference_pipeline_run_id`` is
not specified.
:param timeout: Time in seconds to wait for a pipeline to reach a terminal status for non-asynchronous
waits. Used only if ``wait_for_termination`` is True.
:param check_interval: Time in seconds to check on a pipeline run's status for non-asynchronous waits.
Used only if ``wait_for_termination`` is True.
"""
template_fields: Sequence[str] = (
"azure_data_factory_conn_id",
"resource_group_name",
"factory_name",
"pipeline_name",
"reference_pipeline_run_id",
"parameters",
)
template_fields_renderers = {"parameters": "json"}
ui_color = "#0678d4"
operator_extra_links = (AzureDataFactoryPipelineRunLink(),)
def __init__(
self,
*,
pipeline_name: str,
azure_data_factory_conn_id: str = AzureDataFactoryHook.default_conn_name,
wait_for_termination: bool = True,
resource_group_name: Optional[str] = None,
factory_name: Optional[str] = None,
reference_pipeline_run_id: Optional[str] = None,
is_recovery: Optional[bool] = None,
start_activity_name: Optional[str] = None,
start_from_failure: Optional[bool] = None,
parameters: Optional[Dict[str, Any]] = None,
timeout: int = 60 * 60 * 24 * 7,
check_interval: int = 60,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.azure_data_factory_conn_id = azure_data_factory_conn_id
self.pipeline_name = pipeline_name
self.wait_for_termination = wait_for_termination
self.resource_group_name = resource_group_name
self.factory_name = factory_name
self.reference_pipeline_run_id = reference_pipeline_run_id
self.is_recovery = is_recovery
self.start_activity_name = start_activity_name
self.start_from_failure = start_from_failure
self.parameters = parameters
self.timeout = timeout
self.check_interval = check_interval
def execute(self, context: "Context") -> None:
self.hook = AzureDataFactoryHook(azure_data_factory_conn_id=self.azure_data_factory_conn_id)
self.log.info("Executing the %s pipeline.", self.pipeline_name)
response = self.hook.run_pipeline(
pipeline_name=self.pipeline_name,
resource_group_name=self.resource_group_name,
factory_name=self.factory_name,
reference_pipeline_run_id=self.reference_pipeline_run_id,
is_recovery=self.is_recovery,
start_activity_name=self.start_activity_name,
start_from_failure=self.start_from_failure,
parameters=self.parameters,
)
self.run_id = vars(response)["run_id"]
# Push the ``run_id`` value to XCom regardless of what happens during execution. This allows for
# retrieval the executed pipeline's ``run_id`` for downstream tasks especially if performing an
# asynchronous wait.
context["ti"].xcom_push(key="run_id", value=self.run_id)
if self.wait_for_termination:
self.log.info("Waiting for pipeline run %s to terminate.", self.run_id)
if self.hook.wait_for_pipeline_run_status(
run_id=self.run_id,
expected_statuses=AzureDataFactoryPipelineRunStatus.SUCCEEDED,
check_interval=self.check_interval,
timeout=self.timeout,
resource_group_name=self.resource_group_name,
factory_name=self.factory_name,
):
self.log.info("Pipeline run %s has completed successfully.", self.run_id)
else:
raise AzureDataFactoryPipelineRunException(
f"Pipeline run {self.run_id} has failed or has been cancelled."
)
def on_kill(self) -> None:
if self.run_id:
self.hook.cancel_pipeline_run(
run_id=self.run_id,
resource_group_name=self.resource_group_name,
factory_name=self.factory_name,
)
# Check to ensure the pipeline run was cancelled as expected.
if self.hook.wait_for_pipeline_run_status(
run_id=self.run_id,
expected_statuses=AzureDataFactoryPipelineRunStatus.CANCELLED,
check_interval=self.check_interval,
timeout=self.timeout,
resource_group_name=self.resource_group_name,
factory_name=self.factory_name,
):
self.log.info("Pipeline run %s has been cancelled successfully.", self.run_id)
else:
raise AzureDataFactoryPipelineRunException(f"Pipeline run {self.run_id} was not cancelled.")
| [
"noreply@github.com"
] | noreply@github.com |
0455597e32ed947b92e63329538060ec63a7cb7e | 861fdf0e6ee0d12c7fecc1449ff9e878dc5bb4cb | /1 Basics of Neural Network/1.2 Logistic Regression for Binary Classification/3. ProblemSet-Prog1.py | ffc085f046cef5fe035cbece0767da15f414aa43 | [] | no_license | PromitR99/Deep-Learning | 74ef0cb95e329987b7cbf49bfc8c3d27f3ad0025 | f2efefd6c1a5cc75be8918f86d7a21bb9718742a | refs/heads/master | 2022-12-11T14:39:03.742034 | 2020-08-31T08:45:21 | 2020-08-31T08:45:21 | 287,965,679 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | #numpy is the fundamental package for scientific computing with Python.
import numpy as np
#h5py is a common package to interact with a dataset that is stored on an H5 file.
import matplotlib.pyplot as plt
#matplotlib is a famous library to plot graphs in Python.
import h5py
#PIL and scipy are used here to test your model with your own picture at the end.
import scipy
from PIL import Image
from scipy import ndimage
#from lr_utils import load_dataset
#%matplotlib inline
# Loading the data (cat/non-cat)
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
# Example of a picture
index = 20
plt.imshow(train_set_x_orig[index])
print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.") | [
"pr18u10260@btech.nitdgp.ac.in"
] | pr18u10260@btech.nitdgp.ac.in |
656bb960ef1d2fd531df0a667c4d97135b95bcb1 | dd5ee6d1e88527cd22f1b64443320ba8ef751b59 | /rlcard3/envs/mocsar.py | 2b3edd2b9df42da117998cd3dd3b41bf88e15885 | [
"MIT"
] | permissive | sorata2894/rlcard3 | 42a2587e3ab00f3a33c684fb76efbc334a835359 | e9bbd36b789e670f96622a3a2ba8327f0d897561 | refs/heads/master | 2022-11-05T00:08:10.809055 | 2020-06-11T03:28:41 | 2020-06-11T03:28:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,854 | py | """
Mocsár Environment
File name: envs/gmocsar.py
Author: József Varga
Date created: 3/27/2020
"""
from rlcard3 import models
from rlcard3.envs.env import Env
from rlcard3.games.mocsar.game import MocsarGame as Game
from rlcard3.games.mocsar.utils import action_to_string, \
string_to_action, payoff_func, print_state, encode_to_obs
from typing import List
class MocsarEnv(Env):
""" GinRummy Environment
"""
state_shape: List[int] # Dimensions of state numpy array
def __init__(self, config):
self.game = Game()
self.state_shape = [3, 9, 14]
super().__init__(config=config)
def _extract_state(self, state): # 200213 don't use state ???
"""
Extract useful information from state for RL. Must be implemented in the child class.
numpy(3,9,14)
Menaing: x,y,z
z: 1/0, 1 means, the hand contains y amount of card.
y: rank of cards in some hand.
x=0: player's hand
x=1: others hand
x=2: target
x>2: history, not implemented....
:param state: dict, the raw state
:return: dict: 'obs':the extracted state, numpy.array, 'legal_actions': list of actions
"""
obs = encode_to_obs(state=state)
extracted_state = {'obs': obs,
'legal_actions': self._get_legal_actions(),
'is_extract': True # State is extracted>
}
return extracted_state
def get_payoffs(self):
"""
Get the payoffs of players. Must be implemented in the child class.
First one scores 1, Last one scores 0. Other ith player scores 0.5 ^^i
:return: A list of payoffs for each player.
"""
num_players = self.game.num_players
# winnersben a győzelmek sorrendje van
# List indexed by PlayerID instead of OrderId, pl [1,3,2,0]
win_id = [self.game.players.winners.index(i) for i in range(num_players)]
# win_id-ben, meg az, hogy az adott indexű játékos hányadik, pl [3,0,2,1], mivel a 0-ik indexű játékos utolsó=3
payoffs = [payoff_func(position=win_id[i], num_players=num_players) for i in range(num_players)]
return payoffs
def _decode_action(self, action_id):
"""
Decode Action id to the action in the game.
:param action_id: The id of the action
:return: The action that will be passed to the game engine.
"""
return action_to_string(action=action_id)
def _get_legal_actions(self):
"""
Get all legal actions for current state.
:return: A list of legal actions' id.
"""
return [string_to_action(action) for action in self.game.get_legal_actions()]
def _load_model(self):
"""
Load pretrained/rule model
:return: A Model object
"""
return models.load('mocsar-rule-v1', num_players=self.game.get_player_num())
def print_state(self, player: int):
"""
Print out the state of a given player
:param player: Player Id to print
"""
state = self.game.get_state(player)
print_state(state)
def print_result(self, player):
"""
Print the game result when the game is over
:param player: Player Id to print
"""
payoffs = self.get_payoffs()
for player_ in self.game.players.players:
print(f"Player {player_.__str__()} : points {payoffs[player_.player_id]}")
@staticmethod
def print_action(action: str):
"""
Print out an action in a nice form
:param action: Code of the action
"""
if type(action) is tuple:
action, _ = action
print(f"\nAction code:{string_to_action(action)}, action:{action}")
| [
"31209755+cogitoergoread@users.noreply.github.com"
] | 31209755+cogitoergoread@users.noreply.github.com |
76faf3fd6ac8be77e3a1174cf85ff9d069e4638a | 96c970ebacd9ade1493f4d01537005788b43a49b | /pychron/experiment/tasks/experiment_actions.py | 1449ab2015d4a6e355f16e188813b9cb6753b314 | [
"Apache-2.0"
] | permissive | OSUPychron/pychron | d2da9051b68024200d0009de634da810ccef2a0d | fe0ba9daff9548fa8bebab26db66a1cefff7c1d6 | refs/heads/master | 2021-01-14T12:47:26.389887 | 2015-12-18T22:27:02 | 2015-12-18T22:27:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,537 | py | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from pyface.message_dialog import warning
from pyface.tasks.task_window_layout import TaskWindowLayout
from pychron.core.helpers.filetools import get_path
from pychron.envisage.tasks.actions import PAction as Action, PTaskAction as TaskAction
# ============= standard library imports ========================
import os
# ============= local library imports ==========================
from pychron.envisage.resources import icon
from pychron.paths import paths
EXP_ID = 'pychron.experiment.task'
class ResetSystemHealthAction(Action):
name = 'Reset System Health'
dname = 'Reset System Health'
def perform(self, event):
from pychron.experiment.health.series import reset_system_health_series
reset_system_health_series()
class ExperimentAction(Action):
task_id = EXP_ID
# def _get_experimentor(self, event):
# return self._get_service(event, 'pychron.experiment.experimentor.Experimentor')
def _get_service(self, event, name):
app = event.task.window.application
return app.get_service(name)
def _open_editor(self, event):
application = event.task.window.application
application.open_task(self.task_id)
class ConfigureEditorTableAction(TaskAction):
name = 'Configure Experiment Table'
dname = 'Configure Experiment Table'
method = 'configure_experiment_table'
class BasePatternAction(TaskAction):
_enabled = None
def _task_changed(self):
if self.task:
if hasattr(self.task, 'open_pattern'):
enabled = True
if self.enabled_name:
if self.object:
enabled = bool(self._get_attr(self.object,
self.enabled_name, False))
if enabled:
self._enabled = True
else:
self._enabled = False
def _enabled_update(self):
"""
reimplement ListeningAction's _enabled_update
"""
if self.enabled_name:
if self.object:
self.enabled = bool(self._get_attr(self.object,
self.enabled_name, False))
else:
self.enabled = False
elif self._enabled is not None:
self.enabled = self._enabled
else:
self.enabled = bool(self.object)
class OpenPatternAction(BasePatternAction):
name = 'Open Pattern...'
dname = 'Open Pattern'
method = 'open_pattern'
class NewPatternAction(BasePatternAction):
name = 'New Pattern...'
dname = 'New Pattern'
method = 'new_pattern'
class SendTestNotificationAction(TaskAction):
name = 'Send Test Notification'
dname = 'Send Test Notification'
method = 'send_test_notification'
# accelerator = 'Ctrl+Shift+N'
class DeselectAction(TaskAction):
name = 'Deselect'
dname = 'Deselect'
method = 'deselect'
tooltip = 'Deselect the selected run(s)'
id = 'pychron.deselect'
class UndoAction(TaskAction):
name = 'Undo'
dname = 'Undo'
method = 'undo'
accelerator = 'Ctrl+Z'
class QueueConditionalsAction(Action):
name = 'Edit Queue Conditionals'
dname = 'Edit Queue Conditionals'
def perform(self, event):
task = event.task
if hasattr(task, 'edit_queue_conditionals'):
# edit the current queue's conditionals
task.edit_queue_conditionals()
else:
# choose a conditionals file to edit
from pychron.experiment.conditional.conditionals_edit_view import edit_conditionals
dnames = None
spec = task.application.get_service(
'pychron.spectrometer.base_spectrometer_manager.BaseSpectrometerManager')
if spec:
dnames = spec.spectrometer.detector_names
edit_conditionals(None, detectors=dnames, app=task.application)
class SystemConditionalsAction(Action):
name = 'Edit System Conditionals'
dname = 'Edit System Conditionals'
def perform(self, event):
from pychron.experiment.conditional.conditionals_edit_view import edit_conditionals
task = event.task
dnames = None
spec = task.application.get_service(
'pychron.spectrometer.base_spectrometer_manager.BaseSpectrometerManager')
if spec:
dnames = spec.spectrometer.detector_names
p = get_path(paths.spectrometer_dir, '.*conditionals', ('.yaml','.yml'))
if p:
edit_conditionals(p, detectors=dnames, app=task.application)
else:
warning(None, 'No system conditionals file at {}'.format(p))
def open_experiment(event, path):
app = event.task.window.application
task = event.task
if task.id == EXP_ID:
task.open(path)
else:
task = app.get_task(EXP_ID, False)
if task.open(path):
task.window.open()
# class QueueAction(ExperimentAction):
# def _open_experiment(self, event, path=None):
# open_experiment(event, path)
class NewExperimentQueueAction(ExperimentAction):
description = 'Create a new experiment queue'
name = 'New Experiment'
dname = 'New Experiment'
id = 'pychron.new_experiment'
def perform(self, event):
if event.task.id == EXP_ID:
event.task.new()
else:
application = event.task.window.application
win = application.create_window(TaskWindowLayout(EXP_ID))
task = win.active_task
if task.new():
win.open()
class OpenExperimentHistoryAction(Action):
name = 'Experiment Launch History'
dname = 'Experiment Launch History'
def perform(self, event):
from pychron.experiment.experiment_launch_history import ExperimentLaunchHistory
elh = ExperimentLaunchHistory()
elh.load()
info = elh.edit_traits()
if info.result:
if elh.selected:
open_experiment(event, elh.selected.path)
class OpenLastExperimentQueueAction(ExperimentAction):
description = 'Open last executed experiment'
name = 'Open Last Experiment...'
dname = 'Open Last Experiment'
id = 'pychron.open_last_experiment'
def __init__(self, *args, **kw):
super(OpenLastExperimentQueueAction, self).__init__(*args, **kw)
self.enabled = bool(self._get_last_experiment())
def perform(self, event):
path = self._get_last_experiment()
if path:
open_experiment(event, path)
else:
warning(None, 'No last experiment available')
# if os.path.isfile(paths.last_experiment):
# with open(paths.last_experiment, 'r') as rfile:
# path = fp.readline()
# if os.path.isfile(path):
# self._open_experiment(event, path)
# else:
# print 'asdfasdf', path
# else:
# warning(None, 'No last experiment available')
def _get_last_experiment(self):
if os.path.isfile(paths.last_experiment):
with open(paths.last_experiment, 'r') as rfile:
path = rfile.readline()
if os.path.isfile(path):
return path
class OpenExperimentQueueAction(ExperimentAction):
description = 'Open experiment'
name = 'Open Experiment...'
dname = 'Open Experiment'
image = icon('project-open')
id = 'pychron.open_experiment'
def perform(self, event):
path = '/Users/ross/Pychron_dev/experiments/Current Experiment.txt'
# path = '/Users/ross/Pychrondata_dev/experiments/test.txt'
open_experiment(event, path)
# ===============================================================================
# Utilities
# ===============================================================================
class SignalCalculatorAction(ExperimentAction):
name = 'Signal Calculator'
dname = 'Signal Calculator'
def perform(self, event):
obj = self._get_service(event, 'pychron.experiment.signal_calculator.SignalCalculator')
app = event.task.window.application
app.open_view(obj)
class ResetQueuesAction(TaskAction):
method = 'reset_queues'
name = 'Reset Queues'
dname = 'Reset Queues'
class LastAnalysisRecoveryAction(Action):
name = 'Recover Last Analysis'
dname = 'Recover Last Analysis'
def perform(self, event):
from pychron.experiment.analysis_recovery import AnalysisRecoverer
a = AnalysisRecoverer()
a.recover_last_analysis()
# ============= EOF ====================================
| [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
682fb58ece5814588e15685a98e46a2f6dfcc2f7 | 7f2ce7e465b434dae7158f0a53b247ba74e43024 | /powergenome/externals/cpi/cpi/parsers.py | 473e8b16a334937b9c693268400da4ac0e5b67c1 | [
"MIT",
"CC-BY-4.0"
] | permissive | nspatank/PowerGenome | 14ba0f2a61535f6e125790f91e91a67a6b61dfda | 780e6ab5aa56da9abbbaadb6230af6304d9ecc06 | refs/heads/master | 2023-06-24T08:54:45.355887 | 2021-07-22T21:02:28 | 2021-07-22T21:02:28 | 273,345,726 | 0 | 1 | MIT | 2021-07-22T21:02:29 | 2020-06-18T21:42:46 | Python | UTF-8 | Python | false | false | 6,364 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Parse and prepare the Consumer Price Index (CPI) dataset.
"""
import os
import sqlite3
import logging
from .models import MappingList, SeriesList
from .models import Area, Item, Period, Periodicity, Index, Series
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class BaseParser(object):
THIS_DIR = os.path.dirname(__file__)
def get_file(self, file):
"""
Returns the CPI data file provided as a list of dictionaries.
"""
# Connect to database
db_path = os.path.join(self.THIS_DIR, "cpi.db")
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
# Query this file
query = cursor.execute('SELECT * FROM "{}"'.format(file))
columns = [d[0] for d in query.description]
result_list = [dict(zip(columns, r)) for r in query.fetchall()]
# Close database connection
cursor.close()
cursor.connection.close()
# Return data
return result_list
class ParseArea(BaseParser):
"""
Parses the raw list of CPI areas.
"""
def parse(self):
"""
Returns a list Area objects.
"""
logger.debug("Parsing area file")
object_list = MappingList()
for row in self.get_file("cu.area"):
obj = Area(row["area_code"], row["area_name"])
object_list.append(obj)
return object_list
class ParseItem(BaseParser):
"""
Parses the raw list of CPI items.
"""
def parse(self):
"""
Returns a list Area objects.
"""
logger.debug("Parsing item file")
object_list = MappingList()
for row in self.get_file("cu.item"):
obj = Item(row["item_code"], row["item_name"])
object_list.append(obj)
return object_list
class ParsePeriod(BaseParser):
"""
Parses the raw list of CPI periods.
"""
def parse(self):
"""
Returns a list Area objects.
"""
logger.debug("Parsing period file")
object_list = MappingList()
for row in self.get_file("cu.period"):
obj = Period(row["period"], row["period_abbr"], row["period_name"])
object_list.append(obj)
return object_list
class ParsePeriodicity(BaseParser):
"""
Parses the raw list of CPI periodicities.
"""
def parse(self):
"""
Returns a list Periodicity objects.
"""
logger.debug("Parsing periodicity file")
object_list = MappingList()
for row in self.get_file("cu.periodicity"):
obj = Periodicity(row["periodicity_code"], row["periodicity_name"])
object_list.append(obj)
return object_list
class ParseSeries(BaseParser):
"""
Parses the raw list of CPI series from the BLS.
"""
SURVEYS = {
"CU": "All urban consumers",
"CW": "Urban wage earners and clerical workers",
}
FILE_LIST = [
"cu.data.0.Current",
# "cu.data.1.AllItems",
# "cu.data.2.Summaries",
# "cu.data.3.AsizeNorthEast",
# "cu.data.4.AsizeNorthCentral",
# "cu.data.5.AsizeSouth",
# "cu.data.6.AsizeWest",
# "cu.data.7.OtherNorthEast",
# "cu.data.8.OtherNorthCentral",
# "cu.data.9.OtherSouth",
# "cu.data.10.OtherWest",
# "cu.data.11.USFoodBeverage",
# "cu.data.12.USHousing",
# "cu.data.13.USApparel",
# "cu.data.14.USTransportation",
# "cu.data.15.USMedical",
# "cu.data.16.USRecreation",
# "cu.data.17.USEducationAndCommunication",
# "cu.data.18.USOtherGoodsAndServices",
# "cu.data.19.PopulationSize",
# "cu.data.20.USCommoditiesServicesSpecial"
]
def __init__(self, periods=None, periodicities=None, areas=None, items=None):
self.periods = periods or ParsePeriod().parse()
self.periodicities = periodicities or ParsePeriodicity().parse()
self.areas = areas or ParseArea().parse()
self.items = items or ParseItem().parse()
def parse_id(self, id):
return dict(
survey_code=id[:2],
seasonal_code=id[2:3],
periodicity_code=id[3:4],
area_code=id[4:8],
item_code=id[8:],
)
def parse(self):
self.series_list = self.parse_series()
self.parse_indexes()
return self.series_list
def parse_series(self):
"""
Returns a list Series objects.
"""
logger.debug("Parsing series file")
object_list = SeriesList(
periodicities=self.periodicities, areas=self.areas, items=self.items
)
for row in self.get_file("cu.series"):
parsed_id = self.parse_id(row["series_id"])
obj = Series(
row["series_id"],
row["series_title"],
self.SURVEYS[parsed_id["survey_code"]],
row["seasonal"] == "S",
self.periodicities.get_by_id(row["periodicity_code"]),
self.areas.get_by_id(row["area_code"]),
self.items.get_by_id(row["item_code"]),
)
object_list.append(obj)
return object_list
def parse_indexes(self):
logger.debug("Parsing index files")
# Loop through all the files ...
for file in self.FILE_LIST:
# ... and for each file ...
for row in self.get_file(file):
# Get the series
series = self.series_list.get_by_id(row["series_id"])
# Create an object
index = Index(
series,
int(row["year"]),
self.periods.get_by_id(row["period"]),
float(row["value"]),
)
# If the value has already been loaded ...
if index.date in series._indexes[index.period.type]:
# ... verify this value matches what we have ...
assert index == series._indexes[index.period.type][index.date]
else:
# ... and if the series doesn't have the index yet, add it.
series._indexes[index.period.type][index.date] = index
| [
"greg.schivley@gmail.com"
] | greg.schivley@gmail.com |
008383cce6aa8a6910ad5b4da236d93f9721d8ec | 3ff40164604372340944477eaace1ecaaf755de4 | /Directory/www/views.py | 276364e1a5582872043631c518d7977955dce681 | [] | no_license | sigkillchris/djangoteamapp | 8eaa497963a93ac3206c74325a9356e27fc4dc73 | fc0e339c0b8cfcd0d5867aaff0cd82ce6cb901cb | refs/heads/master | 2021-08-28T00:28:47.246947 | 2017-12-10T21:17:21 | 2017-12-10T21:17:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py |
from django.shortcuts import render
from . models import Person
# Create your views here.
def index(request):
people = Person.objects.all()
return render(request, 'index.html', {'people':people})
| [
"cvg.developer@gmail.com"
] | cvg.developer@gmail.com |
66dfe6b365de0000540bc736f3ffb564d3fe2321 | 8c7138f50bdf002e6bd5282eafc7cc6b5976110a | /twitter/cmdline.py | dcc5255fa1f3b55dcb201e5a166e7604d1a72e6a | [] | no_license | lgrs05/bigdata-project2 | d11f1a1b982d15c2df4177e1c8ec7647003893f4 | fefb627854eb60f8f1157d7bc05c3e7895e4bc23 | refs/heads/master | 2020-12-03T04:14:51.696092 | 2017-06-30T03:09:43 | 2017-06-30T03:09:43 | 95,838,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,304 | py | # encoding: utf-8
"""
USAGE:
twitter [action] [options]
ACTIONS:
authorize authorize the command-line tool to interact with Twitter
follow follow a user
friends get latest tweets from your friends (default action)
help print this help text that you are currently reading
leave stop following a user
list get list of a user's lists; give a list name to get
tweets from that list
mylist get list of your lists; give a list name to get tweets
from that list
pyprompt start a Python prompt for interacting with the twitter
object directly
replies get latest replies to you
search search twitter (Beware: octothorpe, escape it)
set set your twitter status
shell login to the twitter shell
rate get your current rate limit status (remaining API reqs)
OPTIONS:
-r --refresh run this command forever, polling every once
in a while (default: every 5 minutes)
-R --refresh-rate <rate> set the refresh rate (in seconds)
-f --format <format> specify the output format for status updates
-c --config <filename> read username and password from given config
file (default ~/.twitter)
-l --length <count> specify number of status updates shown
(default: 20, max: 200)
-t --timestamp show time before status lines
-d --datestamp show date before status lines
--no-ssl use less-secure HTTP instead of HTTPS
--oauth <filename> filename to read/store oauth credentials to
FORMATS for the --format option
default one line per status
verbose multiple lines per status, more verbose status info
json raw json data from the api on each line
urls nothing but URLs
ansi ansi colour (rainbow mode)
CONFIG FILES
The config file should be placed in your home directory and be named .twitter.
It must contain a [twitter] header, and all the desired options you wish to
set, like so:
[twitter]
format: <desired_default_format_for_output>
prompt: <twitter_shell_prompt e.g. '[cyan]twitter[R]> '>
OAuth authentication tokens are stored in the file .twitter_oauth in your
home directory.
"""
from __future__ import print_function
try:
input = __builtins__['raw_input']
except (AttributeError, KeyError):
pass
CONSUMER_KEY = 'uS6hO2sV6tDKIOeVjhnFnQ'
CONSUMER_SECRET = 'MEYTOS97VvlHX7K1rwHPEqVpTSqZ71HtvoK4sVuYk'
from getopt import gnu_getopt as getopt, GetoptError
from getpass import getpass
import json
import locale
import os.path
import re
import string
import sys
import time
try:
from ConfigParser import SafeConfigParser
except ImportError:
from configparser import ConfigParser as SafeConfigParser
import datetime
try:
from urllib.parse import quote
except ImportError:
from urllib2 import quote
try:
import HTMLParser
except ImportError:
import html.parser as HTMLParser
import webbrowser
from .api import Twitter, TwitterError
from .oauth import OAuth, write_token_file, read_token_file
from .oauth_dance import oauth_dance
from . import ansi
from .util import smrt_input, printNicely
OPTIONS = {
'action': 'friends',
'refresh': False,
'refresh_rate': 600,
'format': 'default',
'prompt': '[cyan]twitter[R]> ',
'config_filename': os.environ.get('HOME', os.environ.get('USERPROFILE', '')) + os.sep + '.twitter',
'oauth_filename': os.environ.get('HOME', os.environ.get('USERPROFILE', '')) + os.sep + '.twitter_oauth',
'length': 20,
'timestamp': False,
'datestamp': False,
'extra_args': [],
'secure': True,
'invert_split': False,
'force-ansi': False,
}
gHtmlParser = HTMLParser.HTMLParser()
hashtagRe = re.compile(r'(?P<hashtag>#\S+)')
profileRe = re.compile(r'(?P<profile>\@\S+)')
ansiFormatter = ansi.AnsiCmd(False)
def parse_args(args, options):
long_opts = ['help', 'format=', 'refresh', 'oauth=',
'refresh-rate=', 'config=', 'length=', 'timestamp',
'datestamp', 'no-ssl', 'force-ansi']
short_opts = "e:p:f:h?rR:c:l:td"
opts, extra_args = getopt(args, short_opts, long_opts)
if extra_args and hasattr(extra_args[0], 'decode'):
extra_args = [arg.decode(locale.getpreferredencoding())
for arg in extra_args]
for opt, arg in opts:
if opt in ('-f', '--format'):
options['format'] = arg
elif opt in ('-r', '--refresh'):
options['refresh'] = True
elif opt in ('-R', '--refresh-rate'):
options['refresh_rate'] = int(arg)
elif opt in ('-l', '--length'):
options["length"] = int(arg)
elif opt in ('-t', '--timestamp'):
options["timestamp"] = True
elif opt in ('-d', '--datestamp'):
options["datestamp"] = True
elif opt in ('-?', '-h', '--help'):
options['action'] = 'help'
elif opt in ('-c', '--config'):
options['config_filename'] = arg
elif opt == '--no-ssl':
options['secure'] = False
elif opt == '--oauth':
options['oauth_filename'] = arg
elif opt == '--force-ansi':
options['force-ansi'] = True
if extra_args and not ('action' in options and options['action'] == 'help'):
options['action'] = extra_args[0]
options['extra_args'] = extra_args[1:]
def get_time_string(status, options, format="%a %b %d %H:%M:%S +0000 %Y"):
timestamp = options["timestamp"]
datestamp = options["datestamp"]
t = time.strptime(status['created_at'], format)
i_hate_timezones = time.timezone
if (time.daylight):
i_hate_timezones = time.altzone
dt = datetime.datetime(*t[:-3]) - datetime.timedelta(
seconds=i_hate_timezones)
t = dt.timetuple()
if timestamp and datestamp:
return time.strftime("%Y-%m-%d %H:%M:%S ", t)
elif timestamp:
return time.strftime("%H:%M:%S ", t)
elif datestamp:
return time.strftime("%Y-%m-%d ", t)
return ""
def reRepl(m):
ansiTypes = {
'clear': ansiFormatter.cmdReset(),
'hashtag': ansiFormatter.cmdBold(),
'profile': ansiFormatter.cmdUnderline(),
}
s = None
try:
mkey = m.lastgroup
if m.group(mkey):
s = '%s%s%s' % (ansiTypes[mkey], m.group(mkey), ansiTypes['clear'])
except IndexError:
pass
return s
def replaceInStatus(status):
txt = gHtmlParser.unescape(status)
txt = re.sub(hashtagRe, reRepl, txt)
txt = re.sub(profileRe, reRepl, txt)
return txt
class StatusFormatter(object):
def __call__(self, status, options):
return ("%s%s %s" % (
get_time_string(status, options),
status['user']['screen_name'], gHtmlParser.unescape(status['text'])))
class AnsiStatusFormatter(object):
def __init__(self):
self._colourMap = ansi.ColourMap()
def __call__(self, status, options):
colour = self._colourMap.colourFor(status['user']['screen_name'])
return ("%s%s%s%s %s" % (
get_time_string(status, options),
ansiFormatter.cmdColour(colour), status['user']['screen_name'],
ansiFormatter.cmdReset(), replaceInStatus(status['text'])))
class VerboseStatusFormatter(object):
def __call__(self, status, options):
return ("-- %s (%s) on %s\n%s\n" % (
status['user']['screen_name'],
status['user']['location'],
status['created_at'],
gHtmlParser.unescape(status['text'])))
class JSONStatusFormatter(object):
def __call__(self, status, options):
status['text'] = gHtmlParser.unescape(status['text'])
return json.dumps(status)
class URLStatusFormatter(object):
urlmatch = re.compile(r'https?://\S+')
def __call__(self, status, options):
urls = self.urlmatch.findall(status['text'])
return '\n'.join(urls) if urls else ""
class ListsFormatter(object):
def __call__(self, list):
if list['description']:
list_str = "%-30s (%s)" % (list['name'], list['description'])
else:
list_str = "%-30s" % (list['name'])
return "%s\n" % list_str
class ListsVerboseFormatter(object):
def __call__(self, list):
list_str = "%-30s\n description: %s\n members: %s\n mode:%s\n" % (list['name'], list['description'], list['member_count'], list['mode'])
return list_str
class AnsiListsFormatter(object):
def __init__(self):
self._colourMap = ansi.ColourMap()
def __call__(self, list):
colour = self._colourMap.colourFor(list['name'])
return ("%s%-15s%s %s" % (
ansiFormatter.cmdColour(colour), list['name'],
ansiFormatter.cmdReset(), list['description']))
class AdminFormatter(object):
def __call__(self, action, user):
user_str = "%s (%s)" % (user['screen_name'], user['name'])
if action == "follow":
return "You are now following %s.\n" % (user_str)
else:
return "You are no longer following %s.\n" % (user_str)
class VerboseAdminFormatter(object):
def __call__(self, action, user):
return("-- %s: %s (%s): %s" % (
"Following" if action == "follow" else "Leaving",
user['screen_name'],
user['name'],
user['url']))
class SearchFormatter(object):
def __call__(self, result, options):
return("%s%s %s" % (
get_time_string(result, options, "%a, %d %b %Y %H:%M:%S +0000"),
result['from_user'], result['text']))
class VerboseSearchFormatter(SearchFormatter):
pass # Default to the regular one
class URLSearchFormatter(object):
urlmatch = re.compile(r'https?://\S+')
def __call__(self, result, options):
urls = self.urlmatch.findall(result['text'])
return '\n'.join(urls) if urls else ""
class AnsiSearchFormatter(object):
def __init__(self):
self._colourMap = ansi.ColourMap()
def __call__(self, result, options):
colour = self._colourMap.colourFor(result['from_user'])
return ("%s%s%s%s %s" % (
get_time_string(result, options, "%a, %d %b %Y %H:%M:%S +0000"),
ansiFormatter.cmdColour(colour), result['from_user'],
ansiFormatter.cmdReset(), result['text']))
_term_encoding = None
def get_term_encoding():
global _term_encoding
if not _term_encoding:
lang = os.getenv('LANG', 'unknown.UTF-8').split('.')
if lang[1:]:
_term_encoding = lang[1]
else:
_term_encoding = 'UTF-8'
return _term_encoding
formatters = {}
status_formatters = {
'default': StatusFormatter,
'verbose': VerboseStatusFormatter,
'json': JSONStatusFormatter,
'urls': URLStatusFormatter,
'ansi': AnsiStatusFormatter
}
formatters['status'] = status_formatters
admin_formatters = {
'default': AdminFormatter,
'verbose': VerboseAdminFormatter,
'urls': AdminFormatter,
'ansi': AdminFormatter
}
formatters['admin'] = admin_formatters
search_formatters = {
'default': SearchFormatter,
'verbose': VerboseSearchFormatter,
'urls': URLSearchFormatter,
'ansi': AnsiSearchFormatter
}
formatters['search'] = search_formatters
lists_formatters = {
'default': ListsFormatter,
'verbose': ListsVerboseFormatter,
'urls': None,
'ansi': AnsiListsFormatter
}
formatters['lists'] = lists_formatters
def get_formatter(action_type, options):
formatters_dict = formatters.get(action_type)
if (not formatters_dict):
raise TwitterError(
"There was an error finding a class of formatters for your type (%s)"
% (action_type))
f = formatters_dict.get(options['format'])
if (not f):
raise TwitterError(
"Unknown formatter '%s' for status actions" % (options['format']))
return f()
class Action(object):
def ask(self, subject='perform this action', careful=False):
'''
Requests fromt he user using `raw_input` if `subject` should be
performed. When `careful`, the default answer is NO, otherwise YES.
Returns the user answer in the form `True` or `False`.
'''
sample = '(y/N)'
if not careful:
sample = '(Y/n)'
prompt = 'You really want to %s %s? ' % (subject, sample)
try:
answer = input(prompt).lower()
if careful:
return answer in ('yes', 'y')
else:
return answer not in ('no', 'n')
except EOFError:
print(file=sys.stderr) # Put Newline since Enter was never pressed
# TODO:
# Figure output why on OS X the raw_input keeps raising
# EOFError and is never able to reset and get more input
# Hint: Look at how IPython implements their console
default = True
if careful:
default = False
return default
def __call__(self, twitter, options):
action = actions.get(options['action'], NoSuchAction)()
try:
doAction = lambda : action(twitter, options)
if (options['refresh'] and isinstance(action, StatusAction)):
while True:
doAction()
sys.stdout.flush()
time.sleep(options['refresh_rate'])
else:
doAction()
except KeyboardInterrupt:
print('\n[Keyboard Interrupt]', file=sys.stderr)
pass
class NoSuchActionError(Exception):
pass
class NoSuchAction(Action):
def __call__(self, twitter, options):
raise NoSuchActionError("No such action: %s" % (options['action']))
class StatusAction(Action):
def __call__(self, twitter, options):
statuses = self.getStatuses(twitter, options)
sf = get_formatter('status', options)
for status in statuses:
statusStr = sf(status, options)
if statusStr.strip():
printNicely(statusStr)
class SearchAction(Action):
def __call__(self, twitter, options):
# We need to be pointing at search.twitter.com to work, and it is less
# tangly to do it here than in the main()
twitter.domain = "search.twitter.com"
twitter.uriparts = ()
# We need to bypass the TwitterCall parameter encoding, so we
# don't encode the plus sign, so we have to encode it ourselves
query_string = "+".join(
[quote(term)
for term in options['extra_args']])
results = twitter.search(q=query_string)['results']
f = get_formatter('search', options)
for result in results:
resultStr = f(result, options)
if resultStr.strip():
printNicely(resultStr)
class AdminAction(Action):
def __call__(self, twitter, options):
if not (options['extra_args'] and options['extra_args'][0]):
raise TwitterError("You need to specify a user (screen name)")
af = get_formatter('admin', options)
try:
user = self.getUser(twitter, options['extra_args'][0])
except TwitterError as e:
print("There was a problem following or leaving the specified user.")
print("You may be trying to follow a user you are already following;")
print("Leaving a user you are not currently following;")
print("Or the user may not exist.")
print("Sorry.")
print()
print(e)
else:
printNicely(af(options['action'], user))
class ListsAction(StatusAction):
def getStatuses(self, twitter, options):
if not options['extra_args']:
raise TwitterError("Please provide a user to query for lists")
screen_name = options['extra_args'][0]
if not options['extra_args'][1:]:
lists = twitter.lists.list(screen_name=screen_name)
if not lists:
printNicely("This user has no lists.")
for list in lists:
lf = get_formatter('lists', options)
printNicely(lf(list))
return []
else:
return reversed(twitter.user.lists.list.statuses(
user=screen_name, list=options['extra_args'][1]))
class MyListsAction(ListsAction):
def getStatuses(self, twitter, options):
screen_name = twitter.account.verify_credentials()['screen_name']
options['extra_args'].insert(0, screen_name)
return ListsAction.getStatuses(self, twitter, options)
class FriendsAction(StatusAction):
def getStatuses(self, twitter, options):
return reversed(twitter.statuses.home_timeline(count=options["length"]))
class RepliesAction(StatusAction):
def getStatuses(self, twitter, options):
return reversed(twitter.statuses.mentions_timeline(count=options["length"]))
class FollowAction(AdminAction):
def getUser(self, twitter, user):
return twitter.friendships.create(screen_name=user)
class LeaveAction(AdminAction):
def getUser(self, twitter, user):
return twitter.friendships.destroy(screen_name=user)
class SetStatusAction(Action):
def __call__(self, twitter, options):
statusTxt = (" ".join(options['extra_args'])
if options['extra_args']
else str(input("message: ")))
replies = []
ptr = re.compile("@[\w_]+")
while statusTxt:
s = ptr.match(statusTxt)
if s and s.start() == 0:
replies.append(statusTxt[s.start():s.end()])
statusTxt = statusTxt[s.end() + 1:]
else:
break
replies = " ".join(replies)
if len(replies) >= 140:
# just go back
statusTxt = replies
replies = ""
splitted = []
while statusTxt:
limit = 140 - len(replies)
if len(statusTxt) > limit:
end = string.rfind(statusTxt, ' ', 0, limit)
else:
end = limit
splitted.append(" ".join((replies, statusTxt[:end])))
statusTxt = statusTxt[end:]
if options['invert_split']:
splitted.reverse()
for status in splitted:
twitter.statuses.update(status=status)
class TwitterShell(Action):
def render_prompt(self, prompt):
'''Parses the `prompt` string and returns the rendered version'''
prompt = prompt.strip("'").replace("\\'", "'")
for colour in ansi.COLOURS_NAMED:
if '[%s]' % (colour) in prompt:
prompt = prompt.replace(
'[%s]' % (colour), ansiFormatter.cmdColourNamed(colour))
prompt = prompt.replace('[R]', ansiFormatter.cmdReset())
return prompt
def __call__(self, twitter, options):
prompt = self.render_prompt(options.get('prompt', 'twitter> '))
while True:
options['action'] = ""
try:
args = input(prompt).split()
parse_args(args, options)
if not options['action']:
continue
elif options['action'] == 'exit':
raise SystemExit(0)
elif options['action'] == 'shell':
print('Sorry Xzibit does not work here!', file=sys.stderr)
continue
elif options['action'] == 'help':
print('''\ntwitter> `action`\n
The Shell Accepts all the command line actions along with:
exit Leave the twitter shell (^D may also be used)
Full CMD Line help is appended below for your convinience.''', file=sys.stderr)
Action()(twitter, options)
options['action'] = ''
except NoSuchActionError as e:
print(e, file=sys.stderr)
except KeyboardInterrupt:
print('\n[Keyboard Interrupt]', file=sys.stderr)
except EOFError:
print(file=sys.stderr)
leaving = self.ask(subject='Leave')
if not leaving:
print('Excellent!', file=sys.stderr)
else:
raise SystemExit(0)
class PythonPromptAction(Action):
def __call__(self, twitter, options):
try:
while True:
smrt_input(globals(), locals())
except EOFError:
pass
class HelpAction(Action):
def __call__(self, twitter, options):
print(__doc__)
class DoNothingAction(Action):
def __call__(self, twitter, options):
pass
class RateLimitStatus(Action):
def __call__(self, twitter, options):
rate = twitter.account.rate_limit_status()
print("Remaining API requests: %s / %s (hourly limit)" % (rate['remaining_hits'], rate['hourly_limit']))
print("Next reset in %ss (%s)" % (int(rate['reset_time_in_seconds'] - time.time()),
time.asctime(time.localtime(rate['reset_time_in_seconds']))))
actions = {
'authorize' : DoNothingAction,
'follow' : FollowAction,
'friends' : FriendsAction,
'list' : ListsAction,
'mylist' : MyListsAction,
'help' : HelpAction,
'leave' : LeaveAction,
'pyprompt' : PythonPromptAction,
'replies' : RepliesAction,
'search' : SearchAction,
'set' : SetStatusAction,
'shell' : TwitterShell,
'rate' : RateLimitStatus,
}
def loadConfig(filename):
options = dict(OPTIONS)
if os.path.exists(filename):
cp = SafeConfigParser()
cp.read([filename])
for option in ('format', 'prompt'):
if cp.has_option('twitter', option):
options[option] = cp.get('twitter', option)
# process booleans
for option in ('invert_split',):
if cp.has_option('twitter', option):
options[option] = cp.getboolean('twitter', option)
return options
def main(args=sys.argv[1:]):
arg_options = {}
try:
parse_args(args, arg_options)
except GetoptError as e:
print("I can't do that, %s." % (e), file=sys.stderr)
print(file=sys.stderr)
raise SystemExit(1)
config_path = os.path.expanduser(
arg_options.get('config_filename') or OPTIONS.get('config_filename'))
config_options = loadConfig(config_path)
# Apply the various options in order, the most important applied last.
# Defaults first, then what's read from config file, then command-line
# arguments.
options = dict(OPTIONS)
for d in config_options, arg_options:
for k, v in list(d.items()):
if v: options[k] = v
if options['refresh'] and options['action'] not in (
'friends', 'replies'):
print("You can only refresh the friends or replies actions.", file=sys.stderr)
print("Use 'twitter -h' for help.", file=sys.stderr)
return 1
oauth_filename = os.path.expanduser(options['oauth_filename'])
if (options['action'] == 'authorize'
or not os.path.exists(oauth_filename)):
oauth_dance(
"the Command-Line Tool", CONSUMER_KEY, CONSUMER_SECRET,
options['oauth_filename'])
global ansiFormatter
ansiFormatter = ansi.AnsiCmd(options["force-ansi"])
oauth_token, oauth_token_secret = read_token_file(oauth_filename)
twitter = Twitter(
auth=OAuth(
oauth_token, oauth_token_secret, CONSUMER_KEY, CONSUMER_SECRET),
secure=options['secure'],
api_version='1.1',
domain='api.twitter.com')
try:
Action()(twitter, options)
except NoSuchActionError as e:
print(e, file=sys.stderr)
raise SystemExit(1)
except TwitterError as e:
print(str(e), file=sys.stderr)
print("Use 'twitter -h' for help.", file=sys.stderr)
raise SystemExit(1)
| [
"luis.rivera157@upr.edu"
] | luis.rivera157@upr.edu |
e7286393de85a9ea1daeff4f6a590f0d35dd069b | f50f1aa1f8f139d546db3230a1cb1f53043fd9e6 | /hardware/mobile/ifuse/actions.py | 4253ce5b51f00a7f0adaacfac0ba3a76d71890d3 | [] | no_license | pars-linux/corporate2 | 7887961d1552d39bc3b0bef4a60fd3413d9b82bb | 14d1eacfc824fb8d0bff8173e7ac06b36b88d10d | refs/heads/master | 2020-05-26T15:02:12.005654 | 2017-02-27T03:07:14 | 2017-02-27T03:07:14 | 82,476,084 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2009-2010 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.autoreconf("-vfi")
autotools.configure()
def build():
autotools.make("-j1")
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("AUTHORS", "COPYING", "README")
| [
"ozancaglayan@users.noreply.github.com"
] | ozancaglayan@users.noreply.github.com |
369e5de6978cd855314fe286b88ec95c0f367146 | 19bdbe1c4aa00ba9799764681f16e09f65d6ea2b | /np/lib/smtp.py | 0065154141cf437ba3588749e4b816c5fc03783f | [] | no_license | invisibleroads/networkplanner | b4a3c7b3c0c169c3cd6610a6fb77125434dcb1c4 | 7ad8c0f2b4078f6cca681205e1671d060a937c18 | refs/heads/master | 2023-08-11T17:33:44.458438 | 2012-05-31T13:41:04 | 2012-05-31T13:41:04 | 961,674 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | 'Routines for sending messages'
# Import system modules
import smtplib
import email.message
import email.utils
import socket
def sendMessage(fromByValue, toByValue, subject, body, headerByName=None):
'Send a message using SMTP'
# Prepare
message = email.message.Message()
message.add_header('from', email.utils.formataddr((fromByValue['nickname'], fromByValue['email'])))
message.add_header('to', email.utils.formataddr((toByValue['nickname'], toByValue['email'])))
message.add_header('subject', subject)
message.set_payload(body)
if headerByName:
for key, value in headerByName.iteritems():
message.add_header(key, value)
# Connect to server
if fromByValue['smtp'] == 'localhost':
server = smtplib.SMTP('localhost')
else:
server = smtplib.SMTP_SSL(fromByValue['smtp'], 465)
if len(fromByValue['username']):
server.login(fromByValue['username'], fromByValue['password'])
# Send mail
try:
server.sendmail(fromByValue['email'], toByValue['email'], message.as_string())
except socket.error, error:
raise SMTPError(error)
finally:
server.quit()
class SMTPError(Exception):
pass
| [
"support@invisibleroads.com"
] | support@invisibleroads.com |
afa233f76cb5afeb5878c1f8371c6ee8b5e88667 | 5ed795f324b1f94ded479a22f60580d9f41a114b | /dashboard/migrations/0007_auto_20190212_1753.py | 231eeaf5940eeed0afb26eda070c777986ca996d | [] | no_license | ashutoshdev/Django-Main-news-wesbite | 907f52a131e136072a585c903c906adb19457765 | 9a934255465d73ab12e16031fb99ad5847b65b55 | refs/heads/master | 2023-08-23T20:27:40.286701 | 2021-10-21T02:03:49 | 2021-10-21T02:03:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,404 | py | # Generated by Django 2.0 on 2019-02-12 12:23
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0006_rfcompany'),
]
operations = [
migrations.CreateModel(
name='DashboardCompany',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('companyname', models.TextField(default='', verbose_name='companyname')),
('bannerloads', models.PositiveIntegerField(default=0)),
('clicks', models.PositiveIntegerField(default=0)),
('date', models.DateField(auto_now_add=True)),
('time', models.TimeField(auto_now_add=True)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AddField(
model_name='rfcompany',
name='date',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='rfcompany',
name='time',
field=models.TimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
| [
"looklikeme05@gmail.com"
] | looklikeme05@gmail.com |
535593d22f261c2160a28ef49bdc94ee12184408 | e4537b92847c8489215338265a1410de37aeed0a | /run/runner_generate_literature_edges.py | 5e59eeb820d2b1f1b2f88b8dea998dd87669db1e | [] | no_license | KnowledgeLab/bm_support | 4e0b7712d01161480c7bbf3412445a5c14616a3a | b7b28b2485564a491c933c1a4c7233335b5297a1 | refs/heads/master | 2023-08-18T05:39:32.476129 | 2021-10-13T21:09:33 | 2021-10-13T21:09:33 | 50,460,089 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | import gzip
import pickle
from os.path import expanduser
import pandas as pd
from datahelpers.constants import ye, up, dn
df_type = "lit"
versions = [8, 11]
df_types = ["lit", "gw"]
version = 11
dfs = []
for ty, v in zip(df_types, versions):
with gzip.open(
expanduser("~/data/kl/claims/df_{0}_{1}.pgz".format(ty, v)), "rb"
) as fp:
df = pickle.load(fp)
dfs.append(df)
for ty, v, df in zip(df_types, versions, dfs):
years = sorted(df[ye].unique())
print(len(years))
h5_fname = expanduser("~/data/kl/comms/edges_all_{0}{1}.h5".format(ty, v))
store = pd.HDFStore(h5_fname)
for y in years[:]:
mask = df[ye] <= y
print(df[mask].shape)
dfe = df.loc[mask].groupby([up, dn]).apply(lambda x: x.shape[0])
store.put("y{0}".format(y), dfe.reset_index(), format="t")
store.close()
| [
"abelikov@gmail.com"
] | abelikov@gmail.com |
f37af5154d5298728a9393b150cbf139ca619504 | 92afc7a9140bdc392ae52256f70962917ade4615 | /phonereviewsapp/migrations/0002_store_phone.py | cdce932971c8f251b775c0bb7da674b40242922e | [] | no_license | Mateuzs/phone-reviews | 9b901a15e98dc625c26efdb064eac01f1f40af20 | 84cb4e88e92fab420b38940dd5b9f768a2696d47 | refs/heads/master | 2020-03-19T18:43:42.635748 | 2018-06-10T15:45:18 | 2018-06-10T15:45:18 | 136,821,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | # Generated by Django 2.0.5 on 2018-06-08 09:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('phonereviewsapp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='store',
name='phone',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='phones', to='phonereviewsapp.Phone'),
),
]
| [
"mateusz.zembol@gmail.com"
] | mateusz.zembol@gmail.com |
1f6142ec058d2e3a37ed2bb2ff5227382cd9b409 | 0bee73e5bde6b4899db8e38e3fb59a75caf71ad9 | /tst/person_detection/person_predictor/test_spline.py | ff75c8c9238f51e430c13185e4982b2cc1a9be77 | [] | no_license | mycal-tucker/stalker-drone | 8e7710045b4d57fb4ddf19197371f55c1e5acfac | 245695009a1a1a70a009c4acbaf8533ef6877ced | refs/heads/master | 2020-04-04T11:54:01.756265 | 2018-12-04T22:00:39 | 2018-12-04T22:02:55 | 155,907,258 | 0 | 0 | null | 2018-11-27T19:58:50 | 2018-11-02T18:34:44 | Python | UTF-8 | Python | false | false | 5,821 | py | import time
import unittest
from person_detection.person_predictor.spline_predictor import SplinePredictor
from utils.person_state import PersonState
# Define a few helpful variables common across a few tests
origin_person_state = PersonState(0, 0)
second_intervals = [1, 2, 3, 4, 5, 6]
class TestSpline(unittest.TestCase):
# Before every test, reset the SplinePredictor object that will be tested.
def setUp(self):
self.spline_predictor = SplinePredictor(bc_type='natural', extrapolation_type=None)
def test_one_state_given(self):
# SETUP
self.spline_predictor.add_person_state(origin_person_state)
# EXECUTE
predicted_states = self.spline_predictor.predict_next_person_state(second_intervals)
# VERIFY
# Given only a single state, the best prediction over any time horizon is to stay in the same place.
assert len(predicted_states) == len(second_intervals)
for predicted_state in predicted_states:
assert predicted_state == origin_person_state
def test_cubic_standard(self):
# SETUP
# All default (not-a-knot and normal extrapolation based on last interval)
self.spline_predictor = SplinePredictor()
# Feed in states going through (-1, 1), (0, 0), (1, 1), (2, 1), (3, 1)
self.spline_predictor.add_person_state(PersonState(-1, 1))
time.sleep(1)
self.spline_predictor.add_person_state(origin_person_state)
time.sleep(1)
self.spline_predictor.add_person_state(PersonState(1, 1))
time.sleep(1)
self.spline_predictor.add_person_state(PersonState(2, 1))
time.sleep(1)
self.spline_predictor.add_person_state(PersonState(3, 1))
# EXECUTE
predicted_states = self.spline_predictor.predict_next_person_state(second_intervals)
# VERIFY
assert len(predicted_states) == len(second_intervals)
prev_x = 1
for predicted_state in predicted_states:
# Very loose. Fix me?
assert predicted_state.x > prev_x
prev_x = predicted_state.x
# Plot things for visual inspection
self.spline_predictor.plot_projections(predicted_states)
def test_cubic_natural_end_points(self):
# SETUP
self.spline_predictor = SplinePredictor(bc_type='natural')
# Feed in states going through (-1, 1), (0, 0), (1, 1), (2, 1), (3, 1)
self.spline_predictor.add_person_state(PersonState(-1, 1))
time.sleep(1)
self.spline_predictor.add_person_state(origin_person_state)
time.sleep(1)
self.spline_predictor.add_person_state(PersonState(1, 1))
time.sleep(1)
self.spline_predictor.add_person_state(PersonState(2, 1))
time.sleep(1)
self.spline_predictor.add_person_state(PersonState(3, 1))
# EXECUTE
predicted_states = self.spline_predictor.predict_next_person_state(second_intervals)
# VERIFY
assert len(predicted_states) == len(second_intervals)
prev_x = 1
for predicted_state in predicted_states:
# Very loose. Fix me?
assert predicted_state.x > prev_x
prev_x = predicted_state.x
# Plot things for visual inspection
self.spline_predictor.plot_projections(predicted_states)
# Use periodic extrapolation. Almost certainly dumb.
def test_cubic_periodic_extrapolation(self):
# SETUP
self.spline_predictor = SplinePredictor(bc_type='natural', extrapolation_type='periodic')
# Feed in states going through (-1, 1), (0, 0), (1, 1), (2, 1), (3, 1)
self.spline_predictor.add_person_state(PersonState(-1, 1))
time.sleep(1)
self.spline_predictor.add_person_state(origin_person_state)
time.sleep(1)
self.spline_predictor.add_person_state(PersonState(1, 1))
time.sleep(1)
self.spline_predictor.add_person_state(PersonState(2, 1))
time.sleep(1)
self.spline_predictor.add_person_state(PersonState(3, 1))
# EXECUTE
predicted_states = self.spline_predictor.predict_next_person_state(second_intervals)
# VERIFY
assert len(predicted_states) == len(second_intervals)
prev_x = 1
for predicted_state in predicted_states:
# Very loose. Fix me?
assert predicted_state.x > prev_x
prev_x = predicted_state.x
# Plot things for visual inspection
self.spline_predictor.plot_projections(predicted_states)
def test_cubic_many_datapoints(self):
# SETUP
self.spline_predictor = SplinePredictor(num_states_to_track=100, bc_type='natural')
# Feed in tons of states.
self.spline_predictor.add_person_state(PersonState(-1, 1))
time.sleep(1)
self.spline_predictor.add_person_state(origin_person_state)
time.sleep(1)
self.spline_predictor.add_person_state(PersonState(1, 1))
time.sleep(1)
self.spline_predictor.add_person_state(PersonState(2, 1))
time.sleep(1)
self.spline_predictor.add_person_state(PersonState(3, 1))
time.sleep(1)
self.spline_predictor.add_person_state(PersonState(4, 2))
time.sleep(1)
self.spline_predictor.add_person_state(PersonState(5, 1))
# EXECUTE
predicted_states = self.spline_predictor.predict_next_person_state(second_intervals)
# VERIFY
assert len(predicted_states) == len(second_intervals)
prev_x = 1
for predicted_state in predicted_states:
# Very loose. Fix me?
assert predicted_state.x > prev_x
prev_x = predicted_state.x
# Plot things for visual inspection
self.spline_predictor.plot_projections(predicted_states)
| [
"mycal@mit.edu"
] | mycal@mit.edu |
00d9d5011339462d1de81b32c298106c9f81c7b6 | 52c6415bea542cee6a043986a28ac8c2baee58ca | /test_app_3/wsgi.py | 90dd1f72167a0e487ac3ca494686865cfdfc4002 | [] | no_license | KarstenPoddig/django_test_app | 5380f037353f0a637a5dd6e2099024a3a75820a1 | 0b4a7490ef5a93a382ff739b375614bd2584c5a3 | refs/heads/master | 2020-11-28T21:12:21.332733 | 2019-12-29T11:25:12 | 2019-12-29T11:25:12 | 229,921,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for test_app_3 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_app_3.settings')
application = get_wsgi_application()
| [
"karsten_poddig@web.de"
] | karsten_poddig@web.de |
3558ffcff95890aa4e2acd17a5a1708e19ca0ca4 | f6c08dc702f39dbe3693d325327eeeddd65bb6df | /SNN_TUT/backup/stimAlt2.py | af186c2a3fc507689f05fa4ec01215d9adff84d0 | [] | no_license | christofrancois/BioSWdev4ASN | fae10e98d494379cd7ea206504977f924b229846 | 380d0ce5f3568deda434ca609ff30ccb8309299e | refs/heads/master | 2021-01-13T16:39:26.162383 | 2017-10-11T10:14:35 | 2017-10-11T10:14:35 | 78,402,746 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | #script generating SIMPLE stimtime format
from random import randint
start = 25
length = 2000
#only 2 patterns
i = 0
while i < length:
if i % 5 == 0:
print("%s %s %s"% (i + 0.01 - 0.01 + start, 1, 0))
print("%s %s %s"% (i + 0.01 + start, 0, 0))
if i % 5 == 0.5:
print("%s %s %s"% (i + 0.01 - 0.01 + start, 0, 0))
print("%s %s %s"% (i + 0.01 + start, 1, 0))
if i % 5 == 2.5:
print("%s %s %s"% (i + 0.01 - 0.01 + start, 1, 1))
print("%s %s %s"% (i + 0.01 + start, 0, 1))
if i % 5 == 3:
print("%s %s %s"% (i + 0.01 - 0.01 + start, 0, 1))
print("%s %s %s"% (i + 0.01 + start, 1, 1))
i += 0.5
print("%s %s %s"% (i + 0.01 - 0.01 + start, 1, 0))
print("%s %s %s"% (i + 0.01 - 0.01 + start, 1, 1))
print("%s %s %s"% (3600 + start, 1, 0))
print("%s %s %s"% (3600 + start, 1, 1))
| [
"francois.christophe@helsinki.fi"
] | francois.christophe@helsinki.fi |
3fd599f2dd2b120dfc1fa457dd87c9056ade3f26 | 00a9295409b78a53ce790f7ab44931939f42c0e0 | /FPGA/apio/iCEBreaker/FIR_Filter/sympy/venv/lib/python3.8/site-packages/sympy/multipledispatch/utils.py | 11cea683ed08448b11c2efecaea1b7e234934cc4 | [
"Apache-2.0"
] | permissive | klei22/Tech-OnBoarding-Class | c21f0762d2d640d5e9cb124659cded5c865b32d4 | 960e962322c37be9117e0523641f8b582a2beceb | refs/heads/master | 2022-11-10T13:17:39.128342 | 2022-10-25T08:59:48 | 2022-10-25T08:59:48 | 172,292,871 | 2 | 3 | Apache-2.0 | 2019-05-19T00:26:32 | 2019-02-24T03:50:35 | C | UTF-8 | Python | false | false | 3,042 | py | def expand_tuples(L):
"""
>>> from sympy.multipledispatch.utils import expand_tuples
>>> expand_tuples([1, (2, 3)])
[(1, 2), (1, 3)]
>>> expand_tuples([1, 2])
[(1, 2)]
"""
if not L:
return [()]
elif not isinstance(L[0], tuple):
rest = expand_tuples(L[1:])
return [(L[0],) + t for t in rest]
else:
rest = expand_tuples(L[1:])
return [(item,) + t for t in rest for item in L[0]]
# Taken from theano/theano/gof/sched.py
# Avoids licensing issues because this was written by Matthew Rocklin
def _toposort(edges):
""" Topological sort algorithm by Kahn [1] - O(nodes + vertices)
inputs:
edges - a dict of the form {a: {b, c}} where b and c depend on a
outputs:
L - an ordered list of nodes that satisfy the dependencies of edges
>>> from sympy.multipledispatch.utils import _toposort
>>> _toposort({1: (2, 3), 2: (3, )})
[1, 2, 3]
Closely follows the wikipedia page [2]
[1] Kahn, Arthur B. (1962), "Topological sorting of large networks",
Communications of the ACM
[2] https://en.wikipedia.org/wiki/Toposort#Algorithms
"""
incoming_edges = reverse_dict(edges)
incoming_edges = {k: set(val) for k, val in incoming_edges.items()}
S = {v for v in edges if v not in incoming_edges}
L = []
while S:
n = S.pop()
L.append(n)
for m in edges.get(n, ()):
assert n in incoming_edges[m]
incoming_edges[m].remove(n)
if not incoming_edges[m]:
S.add(m)
if any(incoming_edges.get(v, None) for v in edges):
raise ValueError("Input has cycles")
return L
def reverse_dict(d):
"""Reverses direction of dependence dict
>>> d = {'a': (1, 2), 'b': (2, 3), 'c':()}
>>> reverse_dict(d) # doctest: +SKIP
{1: ('a',), 2: ('a', 'b'), 3: ('b',)}
:note: dict order are not deterministic. As we iterate on the
input dict, it make the output of this function depend on the
dict order. So this function output order should be considered
as undeterministic.
"""
result = {}
for key in d:
for val in d[key]:
result[val] = result.get(val, tuple()) + (key, )
return result
# Taken from toolz
# Avoids licensing issues because this version was authored by Matthew Rocklin
def groupby(func, seq):
""" Group a collection by a key function
>>> from sympy.multipledispatch.utils import groupby
>>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']
>>> groupby(len, names) # doctest: +SKIP
{3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}
>>> iseven = lambda x: x % 2 == 0
>>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP
{False: [1, 3, 5, 7], True: [2, 4, 6, 8]}
See Also:
``countby``
"""
d = dict()
for item in seq:
key = func(item)
if key not in d:
d[key] = list()
d[key].append(item)
return d
| [
"kaunalei@gmail.com"
] | kaunalei@gmail.com |
d1d37d55ddd2effdd725fcd0b86dd12c8c294955 | bc92e1b18d73259349bd813eb5199182c8ac12fa | /src/detect_peaks.py | db8acd67ed3031e5a46980357b2570d3bcecb0c2 | [] | no_license | IanVlasov/OTUS_epileptic_seizure_detection | e88f0e95700ddd5a8c8b6e195b42362c63ee2c9e | d18ae0c3c09366abce94362c1eb1fa6c27323638 | refs/heads/main | 2023-04-11T07:55:38.091808 | 2021-04-29T12:52:05 | 2021-04-29T12:52:05 | 362,772,523 | 0 | 0 | null | 2021-04-29T12:52:06 | 2021-04-29T10:05:37 | Jupyter Notebook | UTF-8 | Python | false | false | 5,267 | py | import numpy as np
# Copied From http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
# Thank you Marcos Duarte
def detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising',
kpsh=False, valley=False, show=False, ax=None):
"""Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
mph : {None, number}, optional (default = None)
detect peaks that are greater than minimum peak height.
mpd : positive integer, optional (default = 1)
detect peaks that are at least separated by minimum peak distance (in
number of data).
threshold : positive number, optional (default = 0)
detect peaks (valleys) that are greater (smaller) than `threshold`
in relation to their immediate neighbors.
edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')
for a flat peak, keep only the rising edge ('rising'), only the
falling edge ('falling'), both edges ('both'), or don't detect a
flat peak (None).
kpsh : bool, optional (default = False)
keep peaks with same height even if they are closer than `mpd`.
valley : bool, optional (default = False)
if True (1), detect valleys (local minima) instead of peaks.
show : bool, optional (default = False)
if True (1), plot data in matplotlib figure.
ax : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
ind : 1D array_like
indeces of the peaks in `x`.
Notes
-----
The detection of valleys instead of peaks is performed internally by simply
negating the data: `ind_valleys = detect_peaks(-x)`
The function can handle NaN's
See this IPython Notebook [1]_.
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
Examples
--------
>>> from src.detect_peaks import detect_peaks
>>> x = np.random.randn(100)
>>> x[60:81] = np.nan
>>> # detect all peaks and plot data
>>> ind = detect_peaks(x, show=True)
>>> print(ind)
>>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5
>>> # set minimum peak height = 0 and minimum peak distance = 20
>>> detect_peaks(x, mph=0, mpd=20, show=True)
>>> x = [0, 1, 0, 2, 0, 3, 0, 2, 0, 1, 0]
>>> # set minimum peak distance = 2
>>> detect_peaks(x, mpd=2, show=True)
>>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5
>>> # detection of valleys instead of peaks
>>> detect_peaks(x, mph=0, mpd=20, valley=True, show=True)
>>> x = [0, 1, 1, 0, 1, 1, 0]
>>> # detect both edges
>>> detect_peaks(x, edge='both', show=True)
>>> x = [-2, 1, -2, 2, 1, 1, 3, 0]
>>> # set threshold = 2
>>> detect_peaks(x, threshold = 2, show=True)
"""
x = np.atleast_1d(x).astype('float64')
if x.size < 3:
return np.array([], dtype=int)
if valley:
x = -x
# find indices of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
indnan = np.where(np.isnan(x))[0]
if indnan.size:
x[indnan] = np.inf
dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if not edge:
ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ['rising', 'both']:
ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]
if edge.lower() in ['falling', 'both']:
ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]
ind = np.unique(np.hstack((ine, ire, ife)))
# handle NaN's
if ind.size and indnan.size:
# NaN's and values close to NaN's cannot be peaks
ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan-1, indnan+1))), invert=True)]
# first and last values of x cannot be peaks
if ind.size and ind[0] == 0:
ind = ind[1:]
if ind.size and ind[-1] == x.size-1:
ind = ind[:-1]
# remove peaks < minimum peak height
if ind.size and mph is not None:
ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
if ind.size and threshold > 0:
dx = np.min(np.vstack([x[ind]-x[ind-1], x[ind]-x[ind+1]]), axis=0)
ind = np.delete(ind, np.where(dx < threshold)[0])
# detect small peaks closer than minimum peak distance
if ind.size and mpd > 1:
ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height
idel = np.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
# keep peaks with the same height if kpsh is True
idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \
& (x[ind[i]] > x[ind] if kpsh else True)
idel[i] = 0 # Keep current peak
# remove the small peaks and sort back the indices by their occurrence
ind = np.sort(ind[~idel])
if show:
if indnan.size:
x[indnan] = np.nan
if valley:
x = -x
_plot(x, mph, mpd, threshold, edge, valley, ax, ind)
return ind | [
"noreply@github.com"
] | noreply@github.com |
6602f0f5a7e9c69451cabe5de3227b1c51e36e9f | 3509b286919369502f0a53b0e2cef2f654470d81 | /ScotteLogger/getsystemdata.py | b85a703d2c431f4abed61c8ce7c1bb7f474ada44 | [
"MIT"
] | permissive | fkierbye/scottepi | ffce524014904dfc477a79b8192562babcd59d5a | 372ad44fc54ac458e7be51644e470c028e2a6f01 | refs/heads/master | 2021-01-13T08:04:58.624818 | 2016-11-08T23:17:47 | 2016-11-08T23:17:47 | 71,733,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,211 | py | #!/usr/bin/python
import subprocess
import os
import json
from pprint import pprint
project_path="/home/scotte/scottepi/ScotteLogger/"
json_file=project_path+"systemdata.json"
def get_mem_usage():
"""Returns memory usage in percent, see http://www.linuxatemyram.com/ for available ram"""
try:
s = subprocess.check_output(["free","-m"])
lines = s.split('\n')
mem_total=float(lines[1].split()[1])
mem_free=int(lines[2].split()[3])
mem_used=mem_total-mem_free
mem_percent_used=int(round(mem_used*100/mem_total))
return ( mem_percent_used )
except:
return 0
def get_disk_usage():
"""Returns disk usage in percent"""
try:
s = subprocess.check_output(["df","-m"])
lines = s.split('\n')
disk_total=float(lines[1].split()[1])
disk_free=int(lines[1].split()[3])
disk_used=disk_total-disk_free
disk_percent_used=int(round(disk_used*100/disk_total))
return ( disk_percent_used )
except:
return 0
def get_process_count():
"""Returns the number of processes"""
try:
s = subprocess.check_output(["ps","-e"])
return len(s.split('\n'))
except:
return 0
def get_uptime():
"""Returns uptime as a string (tbd tuple? )"""
try:
s = subprocess.check_output(["uptime"])
load_split = s.split('load average: ')
up = load_split[0]
up_pos = up.rfind(',',0,len(up)-4)
up = up[:up_pos].split('up ')[1]
return ( up )
except:
return ( "" )
def get_cpu_load_average():
"""Returns a tuple (1 min load, 5 min load, 15 min load )"""
try:
s = subprocess.check_output(["uptime"])
load_split = s.split('load average: ')
load_1 = float(load_split[1].split(',')[0])
load_5 = float(load_split[1].split(',')[1])
load_15 = float(load_split[1].split(',')[2])
return (load_1, load_5, load_15)
except:
return ( 0, 0, 0 )
def get_ipconnections():
"""Returns the number of network connections"""
try:
s = subprocess.check_output(["netstat","-tun"])
return len([x for x in s.split() if x == 'ESTABLISHED'])
except:
return 0
def get_cpu_temperature():
"""Returns the temperature in degrees C"""
try:
s = subprocess.check_output(["/opt/vc/bin/vcgencmd","measure_temp"])
return float(s.split('=')[1][:-3])
except:
return 0
def get_ipaddress():
"""Returns the current IP address"""
arg='ip route list'
p=subprocess.Popen(arg,shell=True,stdout=subprocess.PIPE)
data = p.communicate()
split_data = data[0].split()
ipaddr = split_data[split_data.index('src')+1]
return ipaddr
def get_cpu_speed():
"""Returns the current CPU speed"""
s = subprocess.check_output(["/opt/vc/bin/vcgencmd", "get_config", "arm_freq"])
return int(s.split('=')[1])
def get_wireless_info():
"""Returns a touple with ssid and quality in %"""
try:
s = subprocess.check_output(["wavemon","-iwlan0","-d"])
lines = s.split('\n')
ssid = lines[5].split()[1].strip("\"")
quality_str = lines[18].split()[2]
quality = int(quality_str.split('/')[0])
quality_max = float(quality_str.split('/')[1]) # make a float to calculate floating point factor below
factor = 100/quality_max
quality_percent = int(round(quality*factor)) # make in % usually max is 70
return ( ssid, quality_percent )
except:
return ("", 0)
systeminfo = {}
systeminfo['mem_usage'] = get_mem_usage()
systeminfo['disk_usage'] = get_disk_usage()
systeminfo['process_count'] = get_process_count()
systeminfo['cpu_load'] = get_cpu_load_average()
systeminfo['uptime'] = get_uptime()
systeminfo['cpu_temperature'] = get_cpu_temperature()
systeminfo['cpu_speed'] = get_cpu_speed()
systeminfo['ip_address'] = get_ipaddress()
systeminfo['ip_connections'] = get_ipconnections()
systeminfo['wireless_info'] = get_wireless_info()
# export data to json file
with open(json_file, 'w') as outfile:
json.dump(systeminfo, outfile, sort_keys=True, indent=4, separators=(',', ': '))
pprint( systeminfo) | [
"frederik@kierbye.dk"
] | frederik@kierbye.dk |
632bcfd9791ccbdc1e14fd7487c231c3e8ccd408 | 29b6a856a81a47ebab7bfdba7fe8a7b845123c9e | /dingtalk/python/alibabacloud_dingtalk/conv_file_1_0/models.py | 4327631ff59f6b9e1c0b1d34a2c61fedcfc59b67 | [
"Apache-2.0"
] | permissive | aliyun/dingtalk-sdk | f2362b6963c4dbacd82a83eeebc223c21f143beb | 586874df48466d968adf0441b3086a2841892935 | refs/heads/master | 2023-08-31T08:21:14.042410 | 2023-08-30T08:18:22 | 2023-08-30T08:18:22 | 290,671,707 | 22 | 9 | null | 2021-08-12T09:55:44 | 2020-08-27T04:05:39 | PHP | UTF-8 | Python | false | false | 30,852 | py | # -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.model import TeaModel
from typing import Dict
class GetSpaceHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetSpaceRequest(TeaModel):
def __init__(
self,
open_conversation_id: str = None,
union_id: str = None,
):
self.open_conversation_id = open_conversation_id
self.union_id = union_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.open_conversation_id is not None:
result['openConversationId'] = self.open_conversation_id
if self.union_id is not None:
result['unionId'] = self.union_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('openConversationId') is not None:
self.open_conversation_id = m.get('openConversationId')
if m.get('unionId') is not None:
self.union_id = m.get('unionId')
return self
class GetSpaceResponseBodySpace(TeaModel):
def __init__(
self,
corp_id: str = None,
create_time: str = None,
modified_time: str = None,
space_id: str = None,
):
self.corp_id = corp_id
self.create_time = create_time
self.modified_time = modified_time
self.space_id = space_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.create_time is not None:
result['createTime'] = self.create_time
if self.modified_time is not None:
result['modifiedTime'] = self.modified_time
if self.space_id is not None:
result['spaceId'] = self.space_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('createTime') is not None:
self.create_time = m.get('createTime')
if m.get('modifiedTime') is not None:
self.modified_time = m.get('modifiedTime')
if m.get('spaceId') is not None:
self.space_id = m.get('spaceId')
return self
class GetSpaceResponseBody(TeaModel):
def __init__(
self,
space: GetSpaceResponseBodySpace = None,
):
self.space = space
def validate(self):
if self.space:
self.space.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.space is not None:
result['space'] = self.space.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('space') is not None:
temp_model = GetSpaceResponseBodySpace()
self.space = temp_model.from_map(m['space'])
return self
class GetSpaceResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
status_code: int = None,
body: GetSpaceResponseBody = None,
):
self.headers = headers
self.status_code = status_code
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.status_code, 'status_code')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.status_code is not None:
result['statusCode'] = self.status_code
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('statusCode') is not None:
self.status_code = m.get('statusCode')
if m.get('body') is not None:
temp_model = GetSpaceResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class SendHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class SendRequest(TeaModel):
def __init__(
self,
dentry_id: str = None,
open_conversation_id: str = None,
space_id: str = None,
union_id: str = None,
):
self.dentry_id = dentry_id
self.open_conversation_id = open_conversation_id
self.space_id = space_id
self.union_id = union_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.dentry_id is not None:
result['dentryId'] = self.dentry_id
if self.open_conversation_id is not None:
result['openConversationId'] = self.open_conversation_id
if self.space_id is not None:
result['spaceId'] = self.space_id
if self.union_id is not None:
result['unionId'] = self.union_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('dentryId') is not None:
self.dentry_id = m.get('dentryId')
if m.get('openConversationId') is not None:
self.open_conversation_id = m.get('openConversationId')
if m.get('spaceId') is not None:
self.space_id = m.get('spaceId')
if m.get('unionId') is not None:
self.union_id = m.get('unionId')
return self
class SendResponseBodyFile(TeaModel):
def __init__(
self,
conversation_id: str = None,
create_time: str = None,
creator_id: str = None,
extension: str = None,
id: str = None,
modified_time: str = None,
modifier_id: str = None,
name: str = None,
parent_id: str = None,
path: str = None,
size: int = None,
space_id: str = None,
status: str = None,
type: str = None,
uuid: str = None,
version: int = None,
):
self.conversation_id = conversation_id
self.create_time = create_time
self.creator_id = creator_id
self.extension = extension
self.id = id
self.modified_time = modified_time
self.modifier_id = modifier_id
self.name = name
self.parent_id = parent_id
self.path = path
self.size = size
self.space_id = space_id
self.status = status
self.type = type
self.uuid = uuid
self.version = version
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.conversation_id is not None:
result['conversationId'] = self.conversation_id
if self.create_time is not None:
result['createTime'] = self.create_time
if self.creator_id is not None:
result['creatorId'] = self.creator_id
if self.extension is not None:
result['extension'] = self.extension
if self.id is not None:
result['id'] = self.id
if self.modified_time is not None:
result['modifiedTime'] = self.modified_time
if self.modifier_id is not None:
result['modifierId'] = self.modifier_id
if self.name is not None:
result['name'] = self.name
if self.parent_id is not None:
result['parentId'] = self.parent_id
if self.path is not None:
result['path'] = self.path
if self.size is not None:
result['size'] = self.size
if self.space_id is not None:
result['spaceId'] = self.space_id
if self.status is not None:
result['status'] = self.status
if self.type is not None:
result['type'] = self.type
if self.uuid is not None:
result['uuid'] = self.uuid
if self.version is not None:
result['version'] = self.version
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('conversationId') is not None:
self.conversation_id = m.get('conversationId')
if m.get('createTime') is not None:
self.create_time = m.get('createTime')
if m.get('creatorId') is not None:
self.creator_id = m.get('creatorId')
if m.get('extension') is not None:
self.extension = m.get('extension')
if m.get('id') is not None:
self.id = m.get('id')
if m.get('modifiedTime') is not None:
self.modified_time = m.get('modifiedTime')
if m.get('modifierId') is not None:
self.modifier_id = m.get('modifierId')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('parentId') is not None:
self.parent_id = m.get('parentId')
if m.get('path') is not None:
self.path = m.get('path')
if m.get('size') is not None:
self.size = m.get('size')
if m.get('spaceId') is not None:
self.space_id = m.get('spaceId')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('type') is not None:
self.type = m.get('type')
if m.get('uuid') is not None:
self.uuid = m.get('uuid')
if m.get('version') is not None:
self.version = m.get('version')
return self
class SendResponseBody(TeaModel):
def __init__(
self,
file: SendResponseBodyFile = None,
):
self.file = file
def validate(self):
if self.file:
self.file.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.file is not None:
result['file'] = self.file.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('file') is not None:
temp_model = SendResponseBodyFile()
self.file = temp_model.from_map(m['file'])
return self
class SendResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
status_code: int = None,
body: SendResponseBody = None,
):
self.headers = headers
self.status_code = status_code
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.status_code, 'status_code')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.status_code is not None:
result['statusCode'] = self.status_code
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('statusCode') is not None:
self.status_code = m.get('statusCode')
if m.get('body') is not None:
temp_model = SendResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class SendByAppHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class SendByAppRequest(TeaModel):
def __init__(
self,
dentry_id: str = None,
space_id: str = None,
union_id: str = None,
):
self.dentry_id = dentry_id
self.space_id = space_id
self.union_id = union_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.dentry_id is not None:
result['dentryId'] = self.dentry_id
if self.space_id is not None:
result['spaceId'] = self.space_id
if self.union_id is not None:
result['unionId'] = self.union_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('dentryId') is not None:
self.dentry_id = m.get('dentryId')
if m.get('spaceId') is not None:
self.space_id = m.get('spaceId')
if m.get('unionId') is not None:
self.union_id = m.get('unionId')
return self
class SendByAppResponseBodyFile(TeaModel):
def __init__(
self,
conversation_id: str = None,
create_time: str = None,
creator_id: str = None,
extension: str = None,
id: str = None,
modified_time: str = None,
modifier_id: str = None,
name: str = None,
parent_id: str = None,
path: str = None,
size: int = None,
space_id: str = None,
status: str = None,
type: str = None,
uuid: str = None,
version: int = None,
):
self.conversation_id = conversation_id
self.create_time = create_time
self.creator_id = creator_id
self.extension = extension
self.id = id
self.modified_time = modified_time
self.modifier_id = modifier_id
self.name = name
self.parent_id = parent_id
self.path = path
self.size = size
self.space_id = space_id
self.status = status
self.type = type
self.uuid = uuid
self.version = version
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.conversation_id is not None:
result['conversationId'] = self.conversation_id
if self.create_time is not None:
result['createTime'] = self.create_time
if self.creator_id is not None:
result['creatorId'] = self.creator_id
if self.extension is not None:
result['extension'] = self.extension
if self.id is not None:
result['id'] = self.id
if self.modified_time is not None:
result['modifiedTime'] = self.modified_time
if self.modifier_id is not None:
result['modifierId'] = self.modifier_id
if self.name is not None:
result['name'] = self.name
if self.parent_id is not None:
result['parentId'] = self.parent_id
if self.path is not None:
result['path'] = self.path
if self.size is not None:
result['size'] = self.size
if self.space_id is not None:
result['spaceId'] = self.space_id
if self.status is not None:
result['status'] = self.status
if self.type is not None:
result['type'] = self.type
if self.uuid is not None:
result['uuid'] = self.uuid
if self.version is not None:
result['version'] = self.version
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('conversationId') is not None:
self.conversation_id = m.get('conversationId')
if m.get('createTime') is not None:
self.create_time = m.get('createTime')
if m.get('creatorId') is not None:
self.creator_id = m.get('creatorId')
if m.get('extension') is not None:
self.extension = m.get('extension')
if m.get('id') is not None:
self.id = m.get('id')
if m.get('modifiedTime') is not None:
self.modified_time = m.get('modifiedTime')
if m.get('modifierId') is not None:
self.modifier_id = m.get('modifierId')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('parentId') is not None:
self.parent_id = m.get('parentId')
if m.get('path') is not None:
self.path = m.get('path')
if m.get('size') is not None:
self.size = m.get('size')
if m.get('spaceId') is not None:
self.space_id = m.get('spaceId')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('type') is not None:
self.type = m.get('type')
if m.get('uuid') is not None:
self.uuid = m.get('uuid')
if m.get('version') is not None:
self.version = m.get('version')
return self
class SendByAppResponseBody(TeaModel):
def __init__(
self,
file: SendByAppResponseBodyFile = None,
):
self.file = file
def validate(self):
if self.file:
self.file.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.file is not None:
result['file'] = self.file.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('file') is not None:
temp_model = SendByAppResponseBodyFile()
self.file = temp_model.from_map(m['file'])
return self
class SendByAppResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
status_code: int = None,
body: SendByAppResponseBody = None,
):
self.headers = headers
self.status_code = status_code
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.status_code, 'status_code')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.status_code is not None:
result['statusCode'] = self.status_code
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('statusCode') is not None:
self.status_code = m.get('statusCode')
if m.get('body') is not None:
temp_model = SendByAppResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class SendLinkHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class SendLinkRequest(TeaModel):
def __init__(
self,
dentry_id: str = None,
open_conversation_id: str = None,
space_id: str = None,
union_id: str = None,
):
self.dentry_id = dentry_id
self.open_conversation_id = open_conversation_id
self.space_id = space_id
self.union_id = union_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.dentry_id is not None:
result['dentryId'] = self.dentry_id
if self.open_conversation_id is not None:
result['openConversationId'] = self.open_conversation_id
if self.space_id is not None:
result['spaceId'] = self.space_id
if self.union_id is not None:
result['unionId'] = self.union_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('dentryId') is not None:
self.dentry_id = m.get('dentryId')
if m.get('openConversationId') is not None:
self.open_conversation_id = m.get('openConversationId')
if m.get('spaceId') is not None:
self.space_id = m.get('spaceId')
if m.get('unionId') is not None:
self.union_id = m.get('unionId')
return self
class SendLinkResponseBodyFile(TeaModel):
def __init__(
self,
conversation_id: str = None,
create_time: str = None,
creator_id: str = None,
extension: str = None,
id: str = None,
modified_time: str = None,
modifier_id: str = None,
name: str = None,
parent_id: str = None,
path: str = None,
size: int = None,
space_id: str = None,
status: str = None,
type: str = None,
uuid: str = None,
version: int = None,
):
self.conversation_id = conversation_id
self.create_time = create_time
self.creator_id = creator_id
self.extension = extension
self.id = id
self.modified_time = modified_time
self.modifier_id = modifier_id
self.name = name
self.parent_id = parent_id
self.path = path
self.size = size
self.space_id = space_id
self.status = status
self.type = type
self.uuid = uuid
self.version = version
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.conversation_id is not None:
result['conversationId'] = self.conversation_id
if self.create_time is not None:
result['createTime'] = self.create_time
if self.creator_id is not None:
result['creatorId'] = self.creator_id
if self.extension is not None:
result['extension'] = self.extension
if self.id is not None:
result['id'] = self.id
if self.modified_time is not None:
result['modifiedTime'] = self.modified_time
if self.modifier_id is not None:
result['modifierId'] = self.modifier_id
if self.name is not None:
result['name'] = self.name
if self.parent_id is not None:
result['parentId'] = self.parent_id
if self.path is not None:
result['path'] = self.path
if self.size is not None:
result['size'] = self.size
if self.space_id is not None:
result['spaceId'] = self.space_id
if self.status is not None:
result['status'] = self.status
if self.type is not None:
result['type'] = self.type
if self.uuid is not None:
result['uuid'] = self.uuid
if self.version is not None:
result['version'] = self.version
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('conversationId') is not None:
self.conversation_id = m.get('conversationId')
if m.get('createTime') is not None:
self.create_time = m.get('createTime')
if m.get('creatorId') is not None:
self.creator_id = m.get('creatorId')
if m.get('extension') is not None:
self.extension = m.get('extension')
if m.get('id') is not None:
self.id = m.get('id')
if m.get('modifiedTime') is not None:
self.modified_time = m.get('modifiedTime')
if m.get('modifierId') is not None:
self.modifier_id = m.get('modifierId')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('parentId') is not None:
self.parent_id = m.get('parentId')
if m.get('path') is not None:
self.path = m.get('path')
if m.get('size') is not None:
self.size = m.get('size')
if m.get('spaceId') is not None:
self.space_id = m.get('spaceId')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('type') is not None:
self.type = m.get('type')
if m.get('uuid') is not None:
self.uuid = m.get('uuid')
if m.get('version') is not None:
self.version = m.get('version')
return self
class SendLinkResponseBody(TeaModel):
def __init__(
self,
file: SendLinkResponseBodyFile = None,
):
self.file = file
def validate(self):
if self.file:
self.file.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.file is not None:
result['file'] = self.file.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('file') is not None:
temp_model = SendLinkResponseBodyFile()
self.file = temp_model.from_map(m['file'])
return self
class SendLinkResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
status_code: int = None,
body: SendLinkResponseBody = None,
):
self.headers = headers
self.status_code = status_code
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.status_code, 'status_code')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.status_code is not None:
result['statusCode'] = self.status_code
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('statusCode') is not None:
self.status_code = m.get('statusCode')
if m.get('body') is not None:
temp_model = SendLinkResponseBody()
self.body = temp_model.from_map(m['body'])
return self
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
936fe33888460fe111915ebee493e4b636140d10 | b42957e496e5c9447b858d7382caea83ce9ea431 | /packtml/__init__.py | d3f0e325a23686c69f7eec073c983de5f0695885 | [
"MIT"
] | permissive | PacktPublishing/Supervised-Machine-Learning-with-Python | 153b9f5248fd4ca79896a277c7f703cf5899ac07 | 00d6ce2451547a73e6358d85937f8cbf2af762a4 | refs/heads/master | 2023-02-02T21:20:35.889344 | 2023-01-30T08:34:13 | 2023-01-30T08:34:13 | 187,639,872 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | # -*- coding: utf-8 -*-
import os
# global namespace:
from packtml import clustering
from packtml import decision_tree
from packtml import metrics
from packtml import neural_net
from packtml import recommendation
from packtml import regression
from packtml import utils
# set the version
packtml_location = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(packtml_location, "VERSION")) as vsn:
__version__ = vsn.read().strip()
# remove from global namespace
del os
del packtml_location
del vsn
__all__ = [
'clustering',
'decision_tree',
'metrics',
'neural_net',
'recommendation',
'regression',
'utils'
]
| [
"packt.suwarnar@gmail.com"
] | packt.suwarnar@gmail.com |
5897ad30e799ace1a772be841481c713f069e46d | 552d688a9f5b39df5791b65f6f09240190337936 | /Core_Python/Data_science/Numpy/Lec1/Assignment_1.py | a13a01832f816c9b4586e1d26100723796e04270 | [] | no_license | dev7796/MVC_Architecture- | b999bab77a7b0674c85cbe9455e9ea2e6f65cf51 | e34ae3bdb1b81501c65390aed77dc8d811baa8f1 | refs/heads/main | 2023-08-16T17:55:42.279859 | 2021-09-12T15:04:00 | 2021-09-12T15:04:00 | 405,671,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,881 | py | import numpy as np
import re
from itertools import islice
rows=int(input("Please Enter number of Rows: "))
columns=int(input("Please Enter number of Columns: "))
nparray = np.random.random((rows,columns))
print(nparray)
'''def slice_from_string(slice_string):
slices1=re.split(",|\[|\]",slice_string)
while ("" in slices1):
slices1.remove("")
ls1 = slices1[0].split(':')
ls2 = slices1[1].split(':')
while ("" in ls1):
ls1.remove("")
while ("" in ls2):
ls2.remove("")
for i in range(len(ls1)):
ls1[i] = int(ls1[i])
for i in range(len(ls2)):
ls2[i] = int(ls2[i])
if len(ls1)>1 and len(ls2)>1:
split_value=nparray[ls1[0]:ls1[1],ls2[0]:ls2[1]]
print(split_value)
print("The shape of the sliced array :", split_value.shape)
elif len(ls1)<=1 and len(ls2)>1:
split_value = nparray[0:ls1[0], ls2[0]:ls2[1]]
print(split_value)
print("The shape of the sliced array :", split_value.shape)
elif len(ls1)>1 and len(ls2)<=1:
split_value = nparray[ls1[0]:ls1[1], 0:ls2[0]]
print(split_value)
print("The shape of the sliced array :",split_value.shape)
else:
print(nparray)
print("The shape of the sliced array :", nparray.shape)
no_of_times=int(input("Please enter the no. of times you want to do slicing: "))
for i in range(no_of_times):
resp = input("Enter 'Y' or 'N' for Slicing: ")
if resp == 'Y' or resp == 'y':
silicing_values = (input("Please enter value for silicing: "))
slice_from_string(silicing_values)
else:
if resp == 'N' or resp == 'n':
break
def mutation(mutate):
slices1 = re.split(",|\[|\]", mutate)
while ("" in slices1):
slices1.remove("")
for i in range(len(slices1)):
slices1[i] = int(slices1[i])
b = np.array(slices1)
print(nparray[np.arange(len(b)), b])
# Mutate one element from each row of a using the indices in b
resp = input("Enter 'Y' or 'N' for Mutating: ")
if resp == 'Y' or resp == 'y':
mutate_values = int(input("Please enter value for mutating ONE element in each row: "))
operation=input("Please enter the operation to be performed for mutation: ")
if '+' in operation:
nparray[np.arange(len(b)), b]+=mutate_values
print(nparray)
elif '-' in operation:
nparray[np.arange(len(b)), b] -= mutate_values
print(nparray)
elif '*' in operation:
nparray[np.arange(len(b)), b] *= mutate_values
print(nparray)
elif '/' in operation:
nparray[np.arange(len(b)), b] /= mutate_values
print(nparray)
elif '**' in operation:
nparray[np.arange(len(b)), b] **= mutate_values
print(nparray)
elif '//' in operation:
nparray[np.arange(len(b)), b] //= mutate_values
print(nparray)
else:
if resp == 'N' or resp == 'n':
print("No input")
mutate=(input("Enter the value of indexes in list form: "))
mutation(mutate)'''
# An example of integer array indexing.
def array_indexing(slice):
slices1 = re.split(",|\[|\]", slice)
while ("" in slices1):
slices1.remove("")
for i in range(len(slices1)):
slices1[i] = int(slices1[i])
l = (int(len(slices1) / 2))
length_to_split = [l,l]
Inputt = iter(slices1)
Output=[list(islice(Inputt, elem))
for elem in length_to_split]
print(nparray[Output[0],Output[1]])
slicer=(input("Enter the value of indexes in list form: "))
array_indexing(slicer)
'''def slice_from_string(slice_string):
slices = slice_string.split(',')
if len(slices) > 1:
return [slice_from_string(s.strip()) for s in slices]
return slice(*[int(x) for x in slice_string.split(':')])''' | [
"dev7796@gmail.com"
] | dev7796@gmail.com |
9574c1a6631802cab39ed7556c4b041f29946c38 | ce7e4aca111ddfb9c947d473a0e3ffb3edb3a4e4 | /model/imagenet_multitrain/data.py | 3c0c45fed12e39af2bdb943fa76d3a4ce5ad1cbc | [] | no_license | xarion/masters | 6825a15f7dda3a153dc7cd35d5d9032c7dd32371 | b9eba0c30c66a59486439997eacef4f29031a181 | refs/heads/develop | 2021-01-12T16:50:04.683082 | 2017-09-01T10:08:10 | 2017-09-01T10:08:10 | 71,443,978 | 5 | 0 | null | 2016-11-24T14:57:38 | 2016-10-20T08:56:57 | Python | UTF-8 | Python | false | false | 10,305 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Routine for decoding the CIFAR-10 binary file format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from six.moves import xrange # pylint: disable=redefined-builtin
# Process images of this size. Note that this differs from the original CIFAR
# image size of 32 x 32. If one alters this number, then the entire model
# architecture will change and any model would need to be retrained.
IMAGE_SIZE = 24
# Global constants describing the CIFAR-10 data set.
NUM_CLASSES = 10
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000
def read_cifar10(filename_queue):
"""Reads and parses examples from CIFAR10 data files.
Recommendation: if you want N-way read parallelism, call this function
N times. This will give you N independent Readers reading different
files & positions within those files, which will give better mixing of
examples.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
An object representing a single example, with the following fields:
height: number of rows in the result (32)
width: number of columns in the result (32)
depth: number of color channels in the result (3)
key: a scalar string Tensor describing the filename & record number
for this example.
label: an int32 Tensor with the label in the range 0..9.
uint8image: a [height, width, depth] uint8 Tensor with the image data
"""
class CIFAR10Record(object):
pass
result = CIFAR10Record()
# Dimensions of the images in the CIFAR-10 dataset.
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
label_bytes = 1 # 2 for CIFAR-100
result.height = 32
result.width = 32
result.depth = 3
image_bytes = result.height * result.width * result.depth
# Every record consists of a label followed by the image, with a
# fixed number of bytes for each.
record_bytes = label_bytes + image_bytes
# Read a record, getting filenames from the filename_queue. No
# header or footer in the CIFAR-10 format, so we leave header_bytes
# and footer_bytes at their default of 0.
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
result.key, value = reader.read(filename_queue)
# Convert from a string to a vector of uint8 that is record_bytes long.
record_bytes = tf.decode_raw(value, tf.uint8)
# The first bytes represent the label, which we convert from uint8->int32.
result.label = tf.cast(
tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(
tf.strided_slice(record_bytes, [label_bytes],
[label_bytes + image_bytes]),
[result.depth, result.height, result.width])
# Convert from [depth, height, width] to [height, width, depth].
result.uint8image = tf.transpose(depth_major, [1, 2, 0])
return result
def _generate_image_and_label_batch(image, label, min_queue_examples,
batch_size, shuffle):
"""Construct a queued batch of images and labels.
Args:
image: 3-D Tensor of [height, width, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
batch_size: Number of images per batch.
shuffle: boolean indicating whether to use a shuffling queue.
Returns:
images: Images. 4D tensor of [batch_size, height, width, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
num_preprocess_threads = 16
if shuffle:
images, label_batch = tf.train.shuffle_batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
else:
images, label_batch = tf.train.batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size)
# Display the training images in the visualizer.
tf.summary.image('images', images)
return images, tf.reshape(label_batch, [batch_size])
def distorted_inputs(data_dir, batch_size):
"""Construct distorted input for CIFAR training using the Reader ops.
Args:
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in xrange(1, 6)]
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for training the network. Note the many random
# distortions applied to the image.
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(reshaped_image, [height, width, 3])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing
# the order their operation.
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_standardization(distorted_image)
# Set the shapes of tensors.
float_image.set_shape([height, width, 3])
read_input.label.set_shape([1])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
min_fraction_of_examples_in_queue)
print('Filling queue with %d CIFAR images before starting to train. '
'This will take a few minutes.' % min_queue_examples)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size,
shuffle=True)
def inputs(eval_data, data_dir, batch_size):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
if not eval_data:
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in xrange(1, 6)]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
else:
filenames = [os.path.join(data_dir, 'test_batch.bin')]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
height, width)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_standardization(resized_image)
# Set the shapes of tensors.
float_image.set_shape([height, width, 3])
read_input.label.set_shape([1])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(num_examples_per_epoch *
min_fraction_of_examples_in_queue)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size,
shuffle=False)
| [
"erdicalli@gmail.com"
] | erdicalli@gmail.com |
648453f9015e6f615977b841101a1da4263b24e9 | c94c652ffda7b6d352e40f8a3b3de65b4896b7a3 | /Modules/TestingAndCI/bsearch.py | 18d7c0b797606f66ec9b4c8084f24c0ea54c953c | [
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-4.0",
"BSD-2-Clause"
] | permissive | jputlock/CSCI-49XX-OpenSource | e4d6365a152e814d22bc3af271544029736cb3b7 | 3d3542156339dc8e325718a8c4b87f55f349da8e | refs/heads/master | 2020-06-04T09:40:56.463707 | 2019-07-23T14:54:11 | 2019-07-23T14:54:11 | 191,970,285 | 0 | 0 | NOASSERTION | 2019-06-14T15:53:18 | 2019-06-14T15:53:18 | null | UTF-8 | Python | false | false | 251 | py | def bsearch(a,x):
n = len(a)
i = 0
j = n-1
while (i <= j):
mid = i+(j-i)/2
if (a[mid] == x):
return(1)
if (a[mid]<x):
i = mid+1
else:
j = mid -1
return(0)
| [
"wdturner@gmail.com"
] | wdturner@gmail.com |
2da48d4fe2ab88ad57d4bc2ce4b47d37ade84327 | 00c14f5816c3ef6a9ff5652af89c27c12bcf023c | /example/jspm_0_17/jspm_0_17/urls.py | 9ef86b2113613f0783470d90f157872b78c2522d | [
"MIT",
"ISC"
] | permissive | ilyashupta/django-systemjs | 148fd7de73aeb2cf562a07d3bb392436f3a78010 | f4d26794c06449d4d3ae2a6f7ab0bc550b35b0c7 | refs/heads/master | 2023-04-27T14:41:45.265046 | 2016-09-19T09:15:35 | 2016-09-19T09:15:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | """jspm_0_17 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$', TemplateView.as_view(template_name='base.html')),
]
| [
"sergeimaertens@gmail.com"
] | sergeimaertens@gmail.com |
286d4837a392a3730412cc78c44d91c56603e5b6 | dd8227454b817ccf2ceb24b3dfd4260d4ded7a72 | /scripts/item/consume_2434546.py | fd4214fd1a7ed1410ab093ba89cb8ce96fcf7213 | [
"MIT"
] | permissive | Snewmy/swordie | 0dd3c17808b064c2cb2bd9576b51daf01ae5d686 | ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17 | refs/heads/master | 2023-06-30T21:14:05.225798 | 2021-07-06T14:32:39 | 2021-07-06T14:32:39 | 389,497,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | # Scribbler Damage Skin
success = sm.addDamageSkin(2434546)
if success:
sm.chat("The Scribbler Damage Skin has been added to your account's damage skin collection.")
| [
"vcalheirosdoc@gmail.com"
] | vcalheirosdoc@gmail.com |
4858680842e0e7d094d838872bbf72d6eb10b4f2 | d9ced71adb4ae21931de9376700de4af540acdad | /principal.py | b1d34cee5d3984b8fd5581898397b656b68f123b | [] | no_license | PauloRobertoVieiraDeCastro/CRUD- | f70adff1784614630a0fc5b1a932c6432ffb5840 | 7ec6fe17c9ba4c78ee277ebdabd37130819abac9 | refs/heads/main | 2023-03-21T15:55:37.561141 | 2021-03-12T17:53:45 | 2021-03-12T17:53:45 | 347,150,590 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,070 | py | from tkinter import *
import sqlite3
from tkinter import ttk
from tkinter import tix
from tkinter import messagebox
from tkinter import scrolledtext
import tkinter as tk
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import letter, A4
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus import SimpleDocTemplate
import webbrowser
from PIL import ImageTk, Image
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pandas as pd
import funcionalidades
import relatorio
root = tix.Tk() #instanciando a classe tk
class Principal(funcionalidades.Funcionalidades, relatorio.GeraRelatorio):
def __init__(self):
self.root = root
self.nome_fantasia3 = StringVar()
self.poco3 = StringVar()
self.data_rec3 = StringVar()
self.inf3 = StringVar()
self.contador3 = StringVar()
self.texto3 = StringVar()
self.tipo3 = StringVar()
self.tela_principal()
self.frame_da_tela()
self.w_frame_1()
self.lista_frame2()
self.montaTabelas()
self.select_lista()
self.Menus()
self.root.mainloop()
def tela_principal(self):
"""
Configurando a tela
"""
self.root.title("Cadastro de amostras")
self.a = 'slate blue'
self.root.configure(background=self.a)#'#1e3743')
self.root.geometry("1000x700") #geometria inicial
self.root.resizable(True,True) #redimensiona nos eixos
self.root.maxsize(width=1100,height=750) #dimensões máximas
self.root.minsize(width=900,height=700)#dimensões mínimas
#mudo o favicon
self.favicon = PhotoImage(file = "petro.png")
self.root.iconphoto(False,self.favicon)
def frame_da_tela(self):
self.frame1 = Frame(self.root, bd=4,bg=self.a) #crio uma borda, uma cor de background, uma cor de borda e uma espessura de borda
self.frame1.place(relx = 0.01, rely = 0.0, relwidth = 0.98, relheight=0.53)
self.frame2 = Frame(self.root, bd=4,bg='#dfe3ee',
highlightbackground = 'white',
highlightthickness=3) #crio uma borda, uma cor de background, uma cor de borda e uma espessura de borda
self.frame2.place(relx = 0.01, rely = 0.54, relwidth = 0.98, relheight=0.44)
def w_frame_1(self):
self.abas = ttk.Notebook(self.frame1)
self.aba1 = Frame(self.abas)
self.aba2 = Frame(self.abas)
self.aba1.configure(background='slate blue')
self.aba2.configure(background="slate blue")
self.abas.add(self.aba1, text = "Entrada de amostra")
self.abas.add(self.aba2, text = "Estatísticas")
self.abas.place(relx=0,rely=0,relwidth=1.0,relheight=1.0)
try:
self.df = self.consulta()
self.contagem = len(self.df['Nome_Fantasia'].unique())+1
except:
self.contagem = 1
self.vol = []
self.peso = []
self.local = []
self.qtd_cads = 30
#Entradas e labels-------------------------------------------------------------------------------------------------------------------------------------------
Label(self.aba1,text="Contagem",bg=self.a,font = ('Helvetica', 9, 'bold'),fg='white').place(relx=0.02,rely=0.02)
#-------------------------------------------eVENTO CODIGO--------------------------------------------
self.codigo_entry1 = Entry(self.aba1,font = ('verdana', 8),justify='center')
self.codigo_entry1.place(relx=0.18,rely=0.02)
self.codigo_entry1.insert(END,self.contagem)
#--------------------------------------AUTOCOMPLETE----------------------------------------------------------------------------------
def match_string():
hits = []
got = self.auto.get()
for item in self.chiefs:
if item.startswith(got):
hits.append(item)
return hits
def get_typed(event):
if len(event.keysym) == 1:
hits = match_string()
show_hit(hits)
def show_hit(lst):
if len(lst) == 1:
self.auto.set(lst[0])
detect_pressed.filled = True
def detect_pressed(event):
key = event.keysym
if len(key) == 1 and detect_pressed.filled is True:
pos = self.f_entry.index(tk.INSERT)
self.f_entry.delete(self.pos, tk.END)
self.rr = StringVar()
self.rrr = self.rr.get()
if self.rrr == 'Nome Fantasia':
self.rrr = 'Nome_Fantasia'
if self.rrr == 'Poço':
self.rrr = 'Data'
if self.rrr == 'Tipo':
self.rrr = 'Nome_Poco'
if self.rrr == 'Local':
self.rrr = 'Local'
detect_pressed.filled = False
self.auto = StringVar()
try:
self.chiefs = self.consultando('Nome_Fantasia') #ENTRADA INICIAL
except:
pass
Label(self.aba1,text="Nome fantasia",bg=self.a,font = ('Helvetica', 9, 'bold'),fg='white').place(relx=0.02,rely=0.1)
self.fantasia_entry = Entry(self.aba1,font = ('verdana', 8),justify="center",textvariable=self.auto)
self.fantasia_entry.place(relx=0.18,rely=0.1)
#self.fantasia_entry.focus_set()
#self.fantasia_entry.bind('<KeyRelease>', get_typed)
#self.fantasia_entry.bind('<Key>', detect_pressed)
#-------------------------------------------------------FINAL DO AUTOCOMPLETE POR NOME----------------------------------------------
Label(self.aba1,text="Data de recebimento",bg=self.a,font = ('Helvetica', 9, 'bold'),fg='white').place(relx=0.02,rely=0.18)
self.data_entry = Entry(self.aba1,font = ('verdana', 8),justify="center")
self.data_entry.place(relx=0.18,rely=0.18)
Label(self.aba1,text="Poço",bg=self.a,font = ('Helvetica', 9, 'bold'),fg='white').place(relx=0.02,rely=0.26)
self.poco_entry = Entry(self.aba1,font = ('verdana', 8),justify="center")
self.poco_entry.place(relx=0.18,rely=0.26)
Label(self.aba1,text="Tipo de amostra",bg=self.a,font = ('Helvetica', 9, 'bold'),fg='white').place(relx=0.02,rely=0.34)
self.tipo_entry = Entry(self.aba1,font = ('verdana', 8),justify="center")
self.tipo_entry.place(relx=0.18,rely=0.34)
#Label(self.root,text="I",bg=self.a,font = ('Helvetica', 10, 'bold'),fg='white').place(relx=0.05,rely=0.24)
Label(self.aba1,text="Informações extras",bg=self.a,font = ('Helvetica', 9, 'bold'),fg='white').place(relx=0.02,rely=0.42)
self.textoa = scrolledtext.ScrolledText(self.aba1,bg='white',relief=GROOVE,height=60,width=40,font='TkFixedFont')
self.textoa.place(relx=0.18,rely=0.42,relwidth= 0.2,relheight= 0.4)
Label(self.aba1,text="Quantidade de recipientes",bg=self.a,font = ('Helvetica', 9, 'bold'),fg='white').place(relx=0.45,rely=0.04)
Label(self.aba1,text="Peso (kgs)",bg=self.a,font = ('Helvetica', 9, 'bold'),fg='white').place(relx=0.4,rely=0.2)
Label(self.aba1,text="Volume (L)",bg=self.a,font = ('Helvetica', 9, 'bold'),fg='white').place(relx=0.5,rely=0.2)
Label(self.aba1,text="Local",bg=self.a,font = ('Helvetica', 9, 'bold'),fg='white').place(relx=0.6,rely=0.2)
Label(self.aba1,text="Peso (kgs)",bg=self.a,font = ('Helvetica', 9, 'bold'),fg='white').place(relx=0.71,rely=0.2)
Label(self.aba1,text="Volume (L)",bg=self.a,font = ('Helvetica', 9, 'bold'),fg='white').place(relx=0.81,rely=0.2)
Label(self.aba1,text="Local",bg=self.a,font = ('Helvetica', 9, 'bold'),fg='white').place(relx=0.91,rely=0.2)
#Botões -----------------------------------------------------------------------------------------------------------------------------------------------------
Button(self.aba1,command=self.adicionars,text="Adicionar",width=8,font=('verdana',9,'bold')).place(relx=0.44,rely=0.11)
Button(self.aba1,command=self.removers,text="Remover",width=8,font=('verdana',9,'bold')).place(relx=0.54,rely=0.11)
Button(self.aba1,command=self.add_amostra,text="Cadastrar amostra",width=16,font=('verdana',9,'bold'),bg='blue',fg='white').place(relx=0.02,rely=0.6)
Button(self.aba1,command=self.deleta_amostra,text="Remover amostra",width=16,font=('verdana',9,'bold'),bg='blue',fg='white').place(relx=0.02,rely=0.7)
Button(self.aba1,command=self.altera_amostra,text="Alterar amostra",width=16,font=('verdana',9,'bold'),bg='blue',fg='white').place(relx=0.02,rely=0.8)
Button(self.aba1,command=self.busca_amostra,text="Buscar amostra",width=16,font=('verdana',9,'bold'),bg='blue',fg='white').place(relx=0.02,rely=0.9)
texto_balao_limpar = "Clique aqui para buscar amostra pelo seu nome fantasia"
#entrada especial--------------------------------------------------------------------------------------------------------------------------------------------
self.peso_am = StringVar()
self.vol_am = StringVar()
self.local_am = StringVar()
self.e2 = Entry(self.aba1,font = ("verdana",8), width = 7,relief = "sunken",justify = 'center'
,textvariable = self.peso_am)
self.e2.place(relx=0.405,rely=0.26 + 0.001)
self.peso.append(self.peso_am)
self.v2 = Entry(self.aba1,font = ("verdana",8), width = 7,relief = "sunken",justify = 'center'
,textvariable = self.vol_am)
self.v2.place(relx=0.505,rely=0.26 + 0.001)
self.vol.append(self.vol_am)
self.a2 = Entry(self.aba1,font = ("verdana",8), width = 7,relief = "sunken",justify = 'center'
,textvariable = self.local_am)
self.a2.place(relx=0.595,rely=0.26 + 0.001)
self.local.append(self.local_am)
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------Inserindo na aba2 - estatísticas----------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------
Label(self.aba2,text="Contagem",bg=self.a,font = ('Helvetica', 10, 'bold'),fg='white').place(relx=0.02,rely=0.02)
self.codigo_entry2 = Entry(self.aba2,font = ('verdana', 8),justify='center')
self.codigo_entry2.place(relx=0.18,rely=0.02)
self.codigo_entry2.insert(END,self.contagem)
Label(self.aba2,text="Filtrar por",bg=self.a,font = ('Helvetica', 10, 'bold'),fg='white').place(relx=0.02,rely=0.1)
self.rr = StringVar()
self.met1 = ['Nome Fantasia','Tipo','Local']
Spinbox(self.aba2,values=self.met1,justify = "center",textvariable=self.rr,command = self.evento).place(relx = 0.18,rely = 0.1,relwidth= 0.15)
Label(self.aba2,text="Nome fantasia",bg=self.a,font = ('Helvetica', 10, 'bold'),fg='white').place(relx=0.02,rely=0.18)
self.f_entry = Entry(self.aba2,font = ('verdana', 8),justify="center",textvariable=self.auto)
self.f_entry.place(relx=0.18,rely=0.18)
self.f_entry.focus_set()
#self.f_entry.bind('<Enter>',self.volume_parcial)
self.f_entry.bind('<KeyRelease>', get_typed)
self.f_entry.bind('<Key>', detect_pressed)
Label(self.aba2,text="Volume total disponível (L)",bg=self.a,font = ('Helvetica', 10, 'bold'),fg='white').place(relx=0.02,rely=0.26)
Label(self.aba2,text=self.volume_total(),bg=self.a,font = ('Helvetica', 11, 'bold'),fg='white').place(relx=0.24,rely=0.26)
Label(self.aba2,text="Volume total da amostra (L)",bg=self.a,font = ('Helvetica', 10, 'bold'),fg='white').place(relx=0.02,rely=0.34)
Label(self.aba2,text="Massa total da amostra (Kgs)",bg=self.a,font = ('Helvetica', 10, 'bold'),fg='white').place(relx=0.02,rely=0.42)
Button(self.aba2,command=self.volume_parcial,text="Calcular",width=16,font=('verdana',9,'bold'),bg='blue',fg='white').place(relx=0.13,rely=0.6)
self.tipo_graf = StringVar()
self.tipo_graf2 = StringVar()
self.met1 = ['5 Maiores Volumes por Nome','5 Maiores Volumes por Poço',
"5 Menores Volumes por Nome","Volumes acumulado por data","Volumes de amostra por ano de entrada"]
Spinbox(self.aba2,values=self.met1,justify = "center",textvariable=self.tipo_graf,command = self.graficando).place(relx = 0.41,rely = 0.9,relwidth= 0.23)
self.tipo_graf2 = StringVar()
self.met2 = ['Volume acumulado por Tipo','Volume acumulado por Local',"Quantidade de amostra por Local",
"Proporção de Volume por Tipo","Proporção de Volume por Local"]
Spinbox(self.aba2,values=self.met2,justify = "center",textvariable=self.tipo_graf2,command = self.graficando).place(relx = 0.74,rely = 0.9,relwidth= 0.23)
self.fig = Figure(figsize=(6.5,5.5),dpi=53)
self.graf = self.fig.add_subplot(111)
self.fig2 = Figure(figsize=(6.5,5.5),dpi=53)
self.graf2 = self.fig2.add_subplot(111)
self.graficando()
self.canvas = FigureCanvasTkAgg(self.fig,master=self.aba2)
self.canvas.get_tk_widget().place(relx=0.66,rely=0.0)
self.canvas.draw()
self.canvas2 = FigureCanvasTkAgg(self.fig2,master=self.aba2)
self.canvas2.get_tk_widget().place(relx=0.33,rely=0.0)
self.canvas2.draw()
#Button(self.aba2,command=self.busca_amostra,text="Buscar amostra",width=16,font=('verdana',9,'bold'),bg='blue',fg='white').place(relx=0.02,rely=0.9)
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------
def evento(self): #mudar lista de consulta para autocomplete a partir do spinbox
conn = sqlite3.connect("amostras.db")
df = pd.read_sql_query("select * from amostras",conn)
def match_string():
hits = []
got = self.auto.get()
for item in self.chiefs:
if item.startswith(got):
hits.append(item)
return hits
def get_typed(event):
if len(event.keysym) == 1:
hits = match_string()
show_hit(hits)
def show_hit(lst):
if len(lst) == 1:
self.auto.set(lst[0])
detect_pressed.filled = True
def detect_pressed(event):
key = event.keysym
if len(key) == 1 and detect_pressed.filled is True:
pos = self.f_entry.index(tk.INSERT)
self.f_entry.delete(self.pos, tk.END)
detect_pressed.filled = False
self.rrr = self.rr.get()
if self.rrr == 'Nome Fantasia':
self.rrr = 'Nome_Fantasia'
self.chiefs = self.consultando(self.rrr)
#print('self.chiefs',self.chiefs)
Label(self.aba2, text=" ",bg=self.a,font = ('Helvetica', 10, 'bold'),fg='white').place(relx=0.02,rely=0.18,relwidth=0.12)
Label(self.aba2,text="Nome fantasia",bg=self.a,font = ('Helvetica', 10, 'bold'),fg='white').place(relx=0.02,rely=0.18)
self.auto = StringVar()
self.f_entry = Entry(self.aba2,font = ('verdana', 8),justify="center",textvariable=self.auto)
self.f_entry.place(relx=0.18,rely=0.18)
self.f_entry.place(relx=0.18,rely=0.18)
self.f_entry.bind('<KeyRelease>', get_typed)
self.f_entry.bind('<Key>', detect_pressed)
if self.rrr == 'Tipo':
self.rrr = 'Nome_Poco'
self.chiefs = self.consultando(self.rrr)
self.auto = StringVar()
Label(self.aba2, text=" ",bg=self.a,font = ('Helvetica', 10, 'bold'),fg='white').place(relx=0.02,rely=0.18,relwidth=0.12)
Label(self.aba2,text="Tipo de amostra",bg=self.a,font = ('Helvetica', 10, 'bold'),fg='white').place(relx=0.02,rely=0.18)
self.f_entry = Entry(self.aba2,font = ('verdana', 8),justify="center",textvariable=self.auto)
self.f_entry.place(relx=0.18,rely=0.18)
self.f_entry.bind('<KeyRelease>', get_typed)
self.f_entry.bind('<Key>', detect_pressed)
if self.rrr == 'Local':
self.rrr = 'Local'
self.chiefs = self.consultando(self.rrr)
self.auto = StringVar()
Label(self.aba2, text=" ",bg=self.a,font = ('Helvetica', 10, 'bold'),fg='white').place(relx=0.02,rely=0.18,relwidth=0.12)
Label(self.aba2,text="Local de amostra",bg=self.a,font = ('Helvetica', 10, 'bold'),fg='white').place(relx=0.02,rely=0.18)
self.f_entry = Entry(self.aba2,font = ('verdana', 8),justify="center",textvariable=self.auto)
self.f_entry.place(relx=0.18,rely=0.18)
self.f_entry.bind('<KeyRelease>', get_typed)
self.f_entry.bind('<Key>', detect_pressed)
#------------------------------adicionando amostra -----------------------------------------------------------------------------------------------------------------
def adicionars(self):
if self.qtd_cads <=300:
self.peso_am = StringVar()
self.vol_am = StringVar()
self.local_am = StringVar()
self.e1 = Entry(self.aba1,font = ("verdana",8), width = 7,relief = "sunken",justify = 'center'
,textvariable = self.peso_am)
self.e1.place(relx=0.405,rely=0.26 + 0.0022*self.qtd_cads)
self.peso.append(self.peso_am)
self.v1 = Entry(self.aba1,font = ("verdana",8), width = 7,relief = "sunken",justify = 'center'
,textvariable = self.vol_am)
self.v1.place(relx=0.505,rely=0.26 + 0.0022*self.qtd_cads)
self.vol.append(self.vol_am)
self.a1 = Entry(self.aba1,font = ("verdana",8), width = 7,relief = "sunken",justify = 'center'
,textvariable = self.local_am)
self.a1.place(relx=0.595,rely=0.26 + 0.0022*self.qtd_cads)
self.local.append(self.local_am)
self.qtd_cads += 30
if self.qtd_cads >300 and self.qtd_cads <= 630:
self.peso_am = StringVar()
self.vol_am = StringVar()
self.local_am = StringVar()
self.e1 = Entry(self.aba1,font = ("verdana",8), width = 7,relief = "sunken",justify = 'center'
,textvariable = self.peso_am)
self.e1.place(relx=0.715,rely=0.26 + 0.0022*(self.qtd_cads-330))
self.peso.append(self.peso_am)
self.v1 = Entry(self.aba1,font = ("verdana",8), width = 7,relief = "sunken",justify = 'center'
,textvariable = self.vol_am)
self.v1.place(relx=0.815,rely=0.26 + 0.0022*(self.qtd_cads-330))
self.vol.append(self.vol_am)
self.a1 = Entry(self.aba1,font = ("verdana",8), width = 7,relief = "sunken",justify = 'center'
,textvariable = self.local_am)
self.a1.place(relx=0.905,rely=0.26 + 0.0022*(self.qtd_cads-330))
self.local.append(self.local_am)
self.qtd_cads += 30
#------------------------------removendo amostra --------------------------------------------------------------------------------------------------------
def removers(self):
if self.qtd_cads >30:
self.qtd_cads -= 30
if len(self.peso)>0:
if self.qtd_cads<=300:
del(self.peso[-1])
del(self.vol[-1])
del(self.local[-1])
Label(self.aba1,text = " ",bg = self.a,font = ("Arial",10), width = 10).place(relx=0.4,rely = 0.26 + 0.0022*self.qtd_cads)
Label(self.aba1,text = " ",bg = self.a,font = ("Arial",10), width = 10).place(relx=0.5,rely = 0.26 + 0.0022*self.qtd_cads)
Label(self.aba1,text = " ",bg = self.a,font = ("Arial",10), width = 10).place(relx=0.59,rely = 0.26 + 0.0022*self.qtd_cads)
if self.qtd_cads>300 and self.qtd_cads<=630:
del(self.peso[-1])
del(self.vol[-1])
del(self.local[-1])
Label(self.aba1,text = " ",bg = self.a,font = ("Arial",10), width = 10).place(relx=0.71,rely = 0.26 + 0.0022*(self.qtd_cads-330))
Label(self.aba1,text = " ",bg = self.a,font = ("Arial",10), width = 10).place(relx=0.81,rely = 0.26 + 0.0022*(self.qtd_cads-330))
Label(self.aba1,text = " ",bg = self.a,font = ("Arial",10), width = 8).place(relx=0.9,rely = 0.26 + 0.0022*(self.qtd_cads-330))
#---------------------------------------------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------TABELA COM BANCO DE DADOS----------------------------------------------------------------
def lista_frame2(self):
#CRIANDO A TABELA
style = ttk.Style()
style.configure("Treeview.Heading", font=("Helvetica",10,"bold"),bg="blue",justify='center')
self.listaCli = ttk.Treeview(self.frame2, height=2,
column=("Codigo","Nome Fantasia","Data","Poço","Tipo","Peso","Volume","Local"),selectmode='browse')
self.listaCli["columns"] = ("1", "2","3","4","5","6","7","8")
self.listaCli['show'] = 'headings'
self.listaCli.heading("#1", text="Codigo")
self.listaCli.heading("#2", text="Nome Fantasia")
self.listaCli.heading("#3", text="Data")
self.listaCli.heading("#4", text="Poço")
self.listaCli.heading("#5", text="Tipo")
self.listaCli.heading("#6", text="Peso (kgs)")
self.listaCli.heading("#7", text="Volume (L)")
self.listaCli.heading("#8", text="Local")
self.listaCli.column("#1", width=70, anchor='c')
self.listaCli.column("#2", width=160, anchor='c')
self.listaCli.column("#3", width=120, anchor='c')
self.listaCli.column("#4", width=120, anchor='c')
self.listaCli.column("#5", width=100, anchor='c')
self.listaCli.column("#6", width=100, anchor='c')
self.listaCli.column("#7", width=100, anchor='c')
self.listaCli.column("#8", width=100, anchor='c')
self.listaCli.place(relx=0.01, rely=0.05, relwidth=0.95, relheight=0.88)
self.scroolLista = Scrollbar(self.frame2, orient='vertical',command=self.listaCli.yview)
self.listaCli.configure(yscroll=self.scroolLista.set)
self.scroolLista.place(relx=0.96, rely=0.05, relwidth=0.03, relheight=0.88)
#VINCULANDO A TABELA AO CLICK DUPLO E FAZENDO A FUNÇÃO
self.listaCli.bind('<Double-1>',self.OnDoubleClick) #determina que chama a função qdo interajo com a lista. No caso é um duplo clique
def Menus(self):
menubar = Menu(self.root)
self.root.config(menu=menubar)
filemenu = Menu(menubar)
filemenu2 = Menu(menubar)
def Quit():
self.root.destroy()
menubar.add_cascade(label = "Opções", menu = filemenu) #menu dentro do filemenu
menubar.add_cascade(label = "Relatórios", menu = filemenu2)
filemenu.add_command(label="Sair",command = Quit) #opções dentro do file
filemenu.add_command(label="Limpa Tela",command = self.limpa_amostra)
filemenu2.add_command(label="Imprimir relatório",command = self.geraRelatAmostra)
filemenu2.add_command(label="Download de cadastro de amostras",command = self.download_excel)
Principal()
| [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.