blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8852e9dcd8cde183a336da575c9de3ddf255095c | 15a2a8c612545e61dab18a5d0673b1cef95a9638 | /Part/神龙天女.py | a09f4df92188101b24dd402950e0a0ce29b7c469 | [] | no_license | YICHENG-LAI/DNFCalculating | 6fa10b692580dad119446307508a3bf32ff46d1a | 426375e4e0034e435a8f38974ce81323c8ea7f9c | refs/heads/master | 2022-11-17T00:18:06.650791 | 2020-07-05T07:28:50 | 2020-07-05T07:28:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,360 | py | from PublicReference.base import *
class 神龙天女主动技能(主动技能):
def 等效CD(self, 武器类型):
return round(self.CD / self.恢复 * 1.05, 1)
#念珠1.05
class 神龙天女技能0(神龙天女主动技能):
名称 = '罪业加身'
所在等级 = 10
等级上限 = 60
基础等级 = 48
基础 = 2014 - 204.4468
成长 = 204.4468
CD = 6.0
TP成长 = 0.08
TP上限 = 7
class 神龙天女技能1(神龙天女主动技能):
名称 = '唤雷符'
所在等级 = 15
等级上限 = 60
基础等级 = 46
基础 = 1721 - 174.644
成长 = 174.644
CD = 5.0
TP成长 = 0.08
TP上限 = 7
class 神龙天女技能2(神龙天女主动技能):
名称 = '念珠连射'
备注 = '(TP为基础精通)'
所在等级 = 15
等级上限 = 1
基础等级 = 1
基础 = 9195.58 / 9.362
成长 = 0
CD = 1.0
TP成长 = 0.1
TP上限 = 5
class 神龙天女技能3(神龙天女主动技能):
名称 = '木槵子经'
所在等级 = 15
等级上限 = 60
基础等级 = 46
基础 = 1602 - 163.6
成长 = 163.6
CD = 4.0
TP成长 = 0.1
TP上限 = 7
class 神龙天女技能4(神龙天女主动技能):
名称 = '束灵符'
所在等级 = 20
等级上限 = 60
基础等级 = 43
基础 = 2052 - 208.214
成长 = 208.214
CD = 7.0
TP成长 = 0.1
TP上限 = 7
class 神龙天女技能5(神龙天女主动技能):
名称 = '驱邪咒'
所在等级 = 25
等级上限 = 60
基础等级 = 41
基础 = 5100 - 519
成长 = 519
CD = 12.0
TP上限 = 5
TP倍率 = [1, 1.125, 1.228, 1.330, 1.433, 1.535]
def 等效百分比(self, 武器类型):
if self.等级 == 0:
return 0
else:
return int((self.基础 + self.成长 * self.等级)* self.TP倍率[self.TP等级] * self.倍率)
class 神龙天女技能6(被动技能):
名称 = '祈雨祭'
所在等级 = 25
等级上限 = 20
基础等级 = 10
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.00 + 0.02 * self.等级, 5)
class 神龙天女技能7(被动技能):
名称 = '神术强化'
所在等级 = 30
等级上限 = 20
基础等级 = 10
def 加成倍率(self, 武器类型):
if self.等级 <= 10:
return round(1.05 + 0.015 * self.等级, 5)
else:
return round(1.00 + 0.02 * self.等级, 5)
class 神龙天女技能8(神龙天女主动技能):
名称 = '和合之玉'
所在等级 = 30
等级上限 = 60
基础等级 = 38
基础 = 5233 - 531.108
成长 = 531.108
CD = 15.0
TP成长 = 0.1
TP上限 = 7
class 神龙天女技能9(神龙天女主动技能):
名称 = '聚魂吸星符'
所在等级 = 35
等级上限 = 60
基础等级 = 36
基础 = 6004 - 609.629
成长 = 609.629
CD = 15.0
TP成长 = 0.1
TP上限 = 7
是否有护石 = 1
def 装备护石(self):
self.倍率 *= 1.14
self.CD *= 0.95
class 神龙天女技能10(神龙天女主动技能):
名称 = '龙魂之怒'
所在等级 = 40
等级上限 = 60
基础等级 = 33
基础 = 8116 - 823.406
成长 = 823.406
CD = 20.0
TP成长 = 0.1
TP上限 = 7
class 神龙天女技能11(神龙天女主动技能):
名称 = '百八念珠'
所在等级 = 40
等级上限 = 60
基础等级 = 33
基础 = 13060 - 1326.25
成长 = 1326.25
CD = 25.0
TP成长 = 0.1
TP上限 = 7
是否有护石 = 1
def 装备护石(self):
self.倍率 *= 1.18
self.CD *= 0.83
class 神龙天女技能12(神龙天女主动技能):
名称 = '不动珠箔阵'
所在等级 = 45
等级上限 = 60
基础等级 = 31
基础 = 16138 - 1635.567
成长 = 1635.567
CD = 45.0
TP成长 = 0.1
TP上限 = 7
是否有护石 = 1
def 装备护石(self):
self.倍率 *= 1.09
self.CD *= 0.9
class 神龙天女技能13(神龙天女主动技能):
名称 = '神龙如意珠'
备注 = '(1次)'
是否主动 = 0
所在等级 = 48
等级上限 = 40
基础等级 = 20
基础 = 526 - 83.947
成长 = 83.947
CD = 0.5
关联技能 = ['所有']
def 等效CD(self, 武器类型):
return 0.5
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.115 + 0.015 * self.等级, 5)
class 神龙天女技能14(神龙天女主动技能):
名称 = '神谕:神龙雷雨祭'
所在等级 = 50
等级上限 = 40
基础等级 = 12
基础 = 45113 - 10407
成长 = 10407
CD = 140
class 神龙天女技能15(神龙天女主动技能):
名称 = '因果业火符'
所在等级 = 60
等级上限 = 40
基础等级 = 23
基础 = 13346 - 1354.864
成长 = 1354.864
CD = 30.0
TP成长 = 0.1
TP上限 = 7
是否有护石 = 1
def 装备护石(self):
self.倍率 *= 1.24
class 神龙天女技能16(神龙天女主动技能):
名称 = '夺命大念阵'
所在等级 = 70
等级上限 = 40
基础等级 = 18
基础 = 24291 - 2464.235
成长 = 2464.235
CD = 50.0
TP成长 = 0.1
TP上限 = 7
是否有护石 = 1
def 装备护石(self):
self.倍率 *= 1.24
class 神龙天女技能17(被动技能):
名称 = '龙神之力'
所在等级 = 75
等级上限 = 40
基础等级 = 11
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.23 + 0.02 * self.等级, 5)
class 神龙天女技能18(神龙天女主动技能):
名称 = '退魔阴阳符'
所在等级 = 75
等级上限 = 40
基础等级 = 16
基础 = 42399 - 4303.067
成长 = 4303.067
CD = 40.0
class 神龙天女技能19(神龙天女主动技能):
名称 = '天坠阴阳玉'
所在等级 = 80
等级上限 = 40
基础等级 = 13
基础 = 40585 - 4117.917
成长 = 4117.917
CD = 45.0
class 神龙天女技能20(神龙天女主动技能):
名称 = '龙威如狱·龙恩如海'
所在等级 = 85
等级上限 = 40
基础等级 = 5
基础 = 92783 - 21518
成长 = 21518
CD = 180.0
class 神龙天女技能21(被动技能):
名称 = '卓越之力'
所在等级 = 95
等级上限 = 40
基础等级 = 4
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.18 + 0.02 * self.等级, 5)
class 神龙天女技能22(被动技能):
名称 = '超卓之心'
所在等级 = 95
等级上限 = 11
基础等级 = 1
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.045 + 0.005 * self.等级, 5)
class 神龙天女技能23(被动技能):
名称 = '觉醒之抉择'
所在等级 = 100
等级上限 = 40
基础等级 = 2
关联技能 = ['无']
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.10 + 0.05 * self.等级, 5)
class 神龙天女技能24(被动技能):
名称 = '基础精通'
所在等级 = 1
等级上限 = 200
基础等级 = 100
关联技能 = ['念珠连射']
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(0.463 + 0.089 * self.等级, 5)
神龙天女技能列表 = []
i = 0
while i >= 0:
try:
exec('神龙天女技能列表.append(神龙天女技能'+str(i)+'())')
i += 1
except:
i = -1
神龙天女技能序号 = dict()
for i in range(len(神龙天女技能列表)):
神龙天女技能序号[神龙天女技能列表[i].名称] = i
神龙天女一觉序号 = 0
神龙天女二觉序号 = 0
神龙天女三觉序号 = 0
for i in 神龙天女技能列表:
if i.所在等级 == 50:
神龙天女一觉序号 = 神龙天女技能序号[i.名称]
if i.所在等级 == 85:
神龙天女二觉序号 = 神龙天女技能序号[i.名称]
if i.所在等级 == 100:
神龙天女三觉序号 = 神龙天女技能序号[i.名称]
神龙天女护石选项 = ['无']
for i in 神龙天女技能列表:
if i.是否有伤害 == 1 and i.是否有护石 == 1:
神龙天女护石选项.append(i.名称)
神龙天女符文选项 = ['无']
for i in 神龙天女技能列表:
if i.所在等级 >= 20 and i.所在等级 <= 80 and i.所在等级 != 50 and i.是否有伤害 == 1:
神龙天女符文选项.append(i.名称)
class 神龙天女角色属性(角色属性):
职业名称 = '神龙天女'
武器选项 = ['念珠']
#'物理百分比','魔法百分比','物理固伤','魔法固伤'
伤害类型选择 = ['魔法百分比']
#默认
伤害类型 = '魔法百分比'
防具类型 = '布甲'
防具精通属性 = ['智力']
主BUFF = 2.08
#基础属性(含唤醒)
基础力量 = 793.0
基础智力 = 952.0
#适用系统奶加成
力量 = 基础力量
智力 = 基础智力
#人物基础 + 唤醒
物理攻击力 = 65.0
魔法攻击力 = 65.0
独立攻击力 = 1045.0
火属性强化 = 13
冰属性强化 = 13
光属性强化 = 13
暗属性强化 = 13
远古记忆 = 0
def __init__(self):
self.技能栏= deepcopy(神龙天女技能列表)
self.技能序号= deepcopy(神龙天女技能序号)
class 神龙天女(角色窗口):
def 窗口属性输入(self):
self.初始属性 = 神龙天女角色属性()
self.角色属性A = 神龙天女角色属性()
self.角色属性B = 神龙天女角色属性()
self.一觉序号 = 神龙天女一觉序号
self.二觉序号 = 神龙天女二觉序号
self.三觉序号 = 神龙天女三觉序号
self.护石选项 = deepcopy(神龙天女护石选项)
self.符文选项 = deepcopy(神龙天女符文选项) | [
"wxh_email@yeah.net"
] | wxh_email@yeah.net |
d33b2b4cab54b838414fd70c755f3bcd6fb1580f | 5d34d74965504c363dc294c1ba97a46393759995 | /channels/tech_weekly_radar/app.py | c5371872c92041105e68e3e47f6e22824e230e65 | [
"MIT"
] | permissive | Nalorokk/reddit2telegram | 7f898b7d17771e9de98c7f176a5a1d071f6d47d9 | 28bfc1271f40b219ee7a34e8338fa93f0d44cbd2 | refs/heads/master | 2020-03-18T08:29:33.946768 | 2018-05-23T04:25:52 | 2018-05-23T04:25:52 | 134,513,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,893 | py | #encoding:utf-8
import csv
import importlib
import random
import datetime
import pymongo
import yaml
from utils import SupplyResult
subreddit = 'all'
t_channel = '@r_channels'
def get_active_period(r2t, channel_name):
min_cursor = r2t.stats.find({'channel' : channel_name.lower()}).sort([('ts', pymongo.ASCENDING)]).limit(1)
min_ts = min_cursor.next()['ts']
max_cursor = r2t.stats.find({'channel' : channel_name.lower()}).sort([('ts', pymongo.DESCENDING)]).limit(1)
max_ts = max_cursor.next()['ts']
diff = max_ts - min_ts
return diff.days
def get_newly_active(r2t, channels_list):
newly_active = list()
for channel in channels_list:
days_active = get_active_period(r2t, channel)
if days_active <= 31:
newly_active.append(channel)
return newly_active
def get_top_growers_for_last_week(r2t, channels_list):
top_growers = dict()
now = datetime.datetime.now()
for channel in channels_list:
week_ago_cursor = r2t.stats.find({
'channel': channel.lower(),
'ts': {'$gte': now - datetime.timedelta(days=7)}
}).sort([('ts', pymongo.ASCENDING)]).limit(100)
for stat_record in week_ago_cursor:
if 'members_cnt' in stat_record:
week_ago_members_cnt = stat_record['members_cnt']
break
current_cursor = r2t.stats.find({'channel': channel.lower()}).sort([('ts', pymongo.DESCENDING)]).limit(100)
for stat_record in current_cursor:
if 'members_cnt' in stat_record:
current_members_cnt = stat_record['members_cnt']
break
grow = current_members_cnt - week_ago_members_cnt
if grow >= 10:
top_growers[channel] = grow
return sorted(top_growers, key=top_growers.get, reverse=True)[:3]
def send_post(submission, r2t):
config_filename = 'configs/prod.yml'
with open(config_filename) as config_file:
config = yaml.load(config_file.read())
channels_list = list()
with open(config['cron_file']) as tsv_file:
tsv_reader = csv.DictReader(tsv_file, delimiter='\t')
for row in tsv_reader:
submodule_name = row['submodule_name']
submodule = importlib.import_module('channels.{}.app'.format(submodule_name))
channel_name = submodule.t_channel
if ('@' in channel_name) and (channel_name not in ['@r_channels_test', '@r_channels']):
channels_list.append(channel_name)
newly_active = get_newly_active(r2t, channels_list)
text_to_send = '<b>Weekend news</b>\n\n'
if len(newly_active) > 0:
text_to_send += '🎉 Welcome to newly active channels: {channels_list}. 🎈🎈\n\n'.format(channels_list=', '.join(newly_active))
text_to_send += '🏆 Channel of the week: {channel_name}. Join and enjoy!\n\n'.format(channel_name=random.choice(channels_list))
top_growers = get_top_growers_for_last_week(r2t, channels_list)
if len(top_growers) > 0:
text_to_send += '🔥 Hottest channels of the week: {channels}.\n\n'.format(channels=', '.join(top_growers))
list_of_channels = ['{n}. {channel}'.format(n=str(i + 1).zfill(2), channel=channel)
for i, channel in enumerate(random.sample(channels_list, k=len(channels_list)))]
text_to_send += '⬇️ All active channels:\n{list_of_channels}\n\n'.format(list_of_channels='\n'.join(list_of_channels))
text_to_send += '🙋\nQ: How can I help?\nA: Promote your favorite channels!\n\n'
text_to_send += 'Q: How to make similar channels?\nA: Ask here or use manual at https://github.com/Fillll/reddit2telegram.\n\n'
text_to_send += 'Q: Where to donate?\nA: http://bit.ly/r2t_donate'
r2t.send_text(text_to_send, parse_mode='HTML')
# It's not a proper supply, so just stop.
return SupplyResult.STOP_THIS_SUPPLY
| [
"git@fillll.ru"
] | git@fillll.ru |
907f98fef1b84d6a040c99b9d5262c3edc00a844 | 2314ca98a6ac70de728aa8fbe8dd921c785606d8 | /gibbs.py | 485277d4238d3c172cc7850615c9f8f501971069 | [] | no_license | Khoa100/RegularCode | 479e19664eb94185634d7bfff2bcfe8e3c96f1bd | 4122432c92ef85956ae5120cf7780a82ba8e1e2e | refs/heads/master | 2020-05-29T18:29:41.778738 | 2019-05-29T22:42:44 | 2019-05-29T22:42:44 | 189,301,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,425 | py | # -*- coding: utf-8 -*-
"""
@author: Minhkhoa Vu, 014007797
"""
import sys
import random as r
import math
from functools import reduce
""" To understand how the reduce function works, I went to this link:
https://www.geeksforgeeks.org/reduce-in-python/ """
""" A Bayesian Network is not necessary because we already have information
about each of the nodes. For example, we know that the markov blanket of 'cloudy' is
'sprinkler' and 'rain'. The parents of the node 'rain' is cloudy.
Wetgrass has
In addition, this Bayesian network has been illustrated and
discussed in lecture. As a result, no object-oriented programming is necessary for this
project."""
parentData = {
'cloudy': [],
'sprinkler': ['cloudy'],
'rain': ['cloudy'],
'wetgrass': ['sprinkler', 'rain']
}
childrenData = {
'cloudy': ['sprinkler', 'rain'],
'sprinkler': ['wetgrass'],
'rain': ['wetgrass'],
'wetgrass': []
}
"""these 4 functions return values based on what is provided;
cloudyConstant() is an exception"""
def cloudyConstant():
return 0.5
def getSprinklerVal(cloudy):
return 0.1 if cloudy else 0.5
def getRainVal(cloudy):
return 0.8 if cloudy else 0.2
def getWetgrassVal(sprinkler, rain):
if sprinkler and rain:
return 0.99
elif sprinkler and not rain:
return 0.90
elif not sprinkler and rain:
return 0.90
else: # s and !r or !s and r
return 0.00
#convert parent list to a dictionary and return it
def convertVarListToMap(variables, state):
varDictionary = {} #new dictionary to be returned
for curVar in variables:
varDictionary[curVar] = state[curVar]
return varDictionary
#calculate the parent probability of a queried variable
"""This portion is needed because the values obtained from getSprinklerVal()
or getWetgrassVal() is not static. Although we are given both of them to be true statements,
the provided probabilities are not the same, since 4 different scenarios can happen when given
sprinkler to be true and wetgrass to be true"""
def calculateParentProb(varQueried, state):
# get the raw list of parents
nodeparent = parentData[varQueried]
# convert list into dict given state
#no need to do this if nodeparent is empty (varQueried == cloudy)
nodeparent = convertVarListToMap(nodeparent, state) # {'c': 0}
prob = 0
if varQueried == 'cloudy':
prob = cloudyConstant()
elif varQueried == 'rain':
rainVal = getRainVal(nodeparent['cloudy'])
if state[varQueried] == 0:
prob = 1-rainVal
else:
prob = rainVal
elif varQueried == 'sprinkler':
sprinkVal = getSprinklerVal(nodeparent['cloudy'])
if state[varQueried] == 0:
prob = 1-sprinkVal
else:
prob = sprinkVal
elif varQueried == 'wetgrass':
wetVal = getWetgrassVal(sprinkler=nodeparent['sprinkler'], rain=nodeparent['rain'])
if state[varQueried] == 0:
prob = 1-wetVal
else:
prob = wetVal
else:
# something went wrong; look into
prob = -math.inf
return prob
# compute conditional probability of a queried variable
def calculateConditionalProb(queryVar, state):
# loop through true and false values
true_falseArr = []
tfArr = [1, 0]
# important loop - R can be true or false
for _tf in tfArr:
state[queryVar] = _tf
# P(x | par(x))
parentProb = calculateParentProb(queryVar, state)
# [P(y | par(y) for y in child(x))
childlist = [calculateParentProb(y, state) for y in childrenData[queryVar]]
#I went to the following website to understand the lambda function: https://stackoverflow.com/questions/6076270/python-lambda-function-in-list-comprehensions"
b = reduce(lambda val1, val2: val1*val2, childlist)
p = parentProb * b #P((!)r|(!)c) or P(c|s,r)
true_falseArr.append(p)
p = normalize(true_falseArr)
return p[0] # the true value
variables = ['cloudy','rain','sprinkler','wetgrass']
#gibbs sampling implementation, based on algorithm provided in the book
def gibbsSampling(queryVariable,numTrials, evidenceVariables):
N = {1: 0, 0: 0}
Z = [x for x in variables if x not in evidenceVariables] # ['c', 'r'] -> non-evidence vars
state = evidenceVariables
for z in Z: # randomly init the non-evidence vars
state[z] = r.randint(0, 1)
for j in range(int(numTrials)):
for zVal in Z:
probZ = calculateConditionalProb(zVal, state)
zInitial = int(r.uniform(0.0, 1.0) < probZ)
state[zVal] = zInitial
N[state[queryVariable]] += 1
return normalize(list(N.values()))
def normalize(arr):
#round the probability vector to 4 decimal places
return [round(float(k)/sum(arr),4) for k in arr]
# answer to P(R | s, w)
if __name__ == "__main__":
evidence_variables = {'sprinkler': 1, 'wetgrass': 1}
#probabilityVector = gibbsSampling(sys.argv(1), evidence_variables.copy())
probabilityVector = gibbsSampling(variables[1],10000, evidence_variables.copy())
print('<{0}, {1}>'.format(probabilityVector[0], probabilityVector[1]))
| [
"noreply@github.com"
] | Khoa100.noreply@github.com |
644e18339d7fb2aff15af4424e10767a972ab174 | 607ce7825d3a238cdba7f9cccb2004aa80d81abf | /setup.py | 258b840e179cfff631b4ad6213382a4e7e63b5a4 | [] | no_license | majuscule/concrete_requirements | 3fd3bb4395356fe93a1a74d38fb276c584a3ce17 | 5b876fa92be7777b4f57dd4668a0063074c5aeb0 | refs/heads/master | 2020-12-03T01:42:18.660360 | 2017-07-07T18:45:50 | 2017-07-07T18:45:50 | 95,853,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='concrete-requirements',
use_scm_version=True,
description='setuptools compatible concrete requirement & semver tooling',
url='https://github.com/majuscule',
author='Dylan Lloyd',
author_email='dylan@disinclined.org',
packages=find_packages(),
zip_safe=True,
setup_requires=['setuptools_scm'],
install_requires=[],
extras_require={
'dev': [
'tox~=2.5.0',
'pytest~=3.0.3',
'pylint~=1.6.4',
'pytest-pylint~=0.6.0',
]
},
)
| [
"dylan@disinclined.org"
] | dylan@disinclined.org |
49d7ea2459d53c2a0a23811c37bc338f8ef04c95 | 82c6caf55c5ba1f46c02c2605533b8fdacd2c862 | /PythonIES-SelfTaught/Chapter4/incomplete.py | d7a7ed9ee1cc6954662e1f3d93eece36c2345025 | [] | no_license | jandrews2014/Projects | 3a227bb6145c9b1f72ebc73abbe53f12177e71c2 | 5d046e2e80ce0f8f654b7be385d33555ed9f3913 | refs/heads/master | 2023-03-04T09:51:41.265843 | 2021-07-08T22:39:09 | 2021-07-08T22:39:09 | 160,982,485 | 1 | 0 | null | 2023-02-28T05:30:05 | 2018-12-08T22:03:10 | PHP | UTF-8 | Python | false | false | 276 | py | #Chapter 4 - Exercise #5
#Imcomplete.py
#.py is a Python file
#Start a new Python script by initializing a variable with a
#Boolean value then add an incomplete conditional test
bool = True
if bool:
print('Python In Easy Steps')
else:
#Statements to be inserted here
| [
"jandrews2014@fau.edu"
] | jandrews2014@fau.edu |
f31d7216c6a914eb32fd915d060e378819abeaa9 | 45346fb34514011be98ded60e5c1d8d0f82240e5 | /old model/version_3/bookmarks_parser_iteration.py | 409a2fc762f4dd51d71592fc9b05dbda4d8f5f51 | [
"MIT"
] | permissive | radam9/bookmarks_parser | 768ebb73089da767b40034ce2acfb0f32467d0d2 | fc508908fe4b5551d517e7da7120bcc2480200f7 | refs/heads/master | 2023-02-24T14:22:58.879715 | 2021-01-27T20:41:25 | 2021-01-27T20:41:25 | 286,231,512 | 0 | 0 | MIT | 2021-01-28T16:43:10 | 2020-08-09T12:20:35 | Python | UTF-8 | Python | false | false | 22,056 | py | import json
import os
import re
import time
from bs4 import BeautifulSoup
from .models import Base, Bookmark, create_engine, Folder, sessionmaker, Url
class HTMLMixin:
def save_to_html(self):
"""
Export the bookmarks as HTML.
"""
output_file = os.path.splitext(self.new_filepath)[0] + ".html"
with open(output_file, "w", encoding="Utf-8") as f:
f.write(self.bookmarks)
def parse_root_html(self):
header = """<!DOCTYPE NETSCAPE-Bookmark-file-1>\n<!-- This is an automatically generated file.\n It will be read and overwritten.\n DO NOT EDIT! -->\n<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=UTF-8">\n<TITLE>Bookmarks</TITLE>\n<H1>Bookmarks Menu</H1>\n<DL><p>\n"""
footer = "</DL>"
self.proccessed = []
while self.stack:
stack_item = self.stack.pop()
folder = self.iterate_folder_html(stack_item)
if folder:
placeholder = f'<folder{stack_item.get("id")}>'
if self.proccessed and (placeholder in self.proccessed[-1]):
self.proccessed[-1] = self.proccessed[-1].replace(
placeholder, folder
)
else:
self.proccessed.append(folder)
temp = [header]
temp.extend(self.proccessed)
temp.append(footer)
self.bookmarks = "".join(temp)
def iterate_folder_html(self, stack_item):
folder = [self._create_folder_as_html(stack_item), "<DL><p>\n"]
list_end = "</DL><p>\n"
children = stack_item.get("children")
if children:
for child in children:
if child.get("type") in ("folder", "text/x-moz-place-container"):
item = f'<folder{child.get("id")}>'
self.stack.append(child)
else:
item = self._create_url_as_html(child)
folder.append(item)
folder.append(list_end)
result = "".join(folder)
return result
def _create_folder_as_html(self, folder):
date_added = self.get_date_added(folder)
title = self.get_title(folder)
if title in ("Bookmarks Toolbar", "Bookmarks bar", "toolbar"):
return f'<DT><H3 ADD_DATE="{date_added}" LAST_MODIFIED="0" PERSONAL_TOOLBAR_FOLDER="true">{title}</H3>\n'
elif title in ("Other Bookmarks", "unfiled"):
return f'<DT><H3 ADD_DATE="{date_added}" LAST_MODIFIED="0" UNFILED_BOOKMARKS_FOLDER="true">{title}</H3>\n'
else:
return f'<DT><H3 ADD_DATE="{date_added}" LAST_MODIFIED="0">{title}</H3>\n'
def _create_url_as_html(self, url):
return f'<DT><A HREF="{self.get_url(url)}" ADD_DATE="{self.get_date_added(url)}" LAST_MODIFIED="0" ICON_URI="{url.get("icon_uri")}" ICON="{url.get("icon")}">{self.get_title(url)}</A>\n'
def get_title(self, item):
if self.source == "Chrome":
return item.get("name")
else:
return item.get("title")
def get_date_added(self, item):
if self.source == "Firefox":
return item.get("dateAdded")
else:
return item.get("date_added")
def get_url(self, item):
if self.source == "Firefox":
return item.get("uri")
else:
return item.get("url")
class JSONMixin:
def save_to_json(self):
"""
Function to export the bookmarks as JSON.
"""
output_file = os.path.splitext(self.new_filepath)[0] + ".json"
with open(output_file, "w", encoding="Utf-8") as f:
json.dump(self.bookmarks, f, ensure_ascii=False)
class DBMixin:
def save_to_db(self):
"""
Function to export the bookmarks as SQLite3 DB.
"""
database_path = "sqlite:///" + os.path.splitext(self.new_filepath)[0] + ".db"
engine = create_engine(database_path, echo=True)
Session = sessionmaker(bind=engine)
session = Session()
Base.metadata.create_all(engine)
session.commit()
session.bulk_save_objects(self.bookmarks)
session.commit()
class BookmarksParserHTML(JSONMixin, DBMixin):
def __init__(self, filepath):
self.new_filepath = (
os.path.dirname(filepath) + "/output_" + os.path.basename(filepath)
)
self.format_html_file(filepath)
with open(self.new_filepath, "r", encoding="Utf-8") as f:
self.soup = BeautifulSoup(
markup=f, features="html.parser", from_encoding="Utf-8"
)
self.tree = self.soup.find("h3")
del self.soup
self.source = "Chrome" if self.tree.get("title") == "Bookmarks" else "Firefox"
self.id = 2
# stack containes a tuple of (folder, node).
# folder being the parsed data, and node being the folder data from the tree.
self.stack = []
def format_html_file(self, filepath):
"""
Takes in an absolute path to a HTML Bookmarks file, it creates a new Bookmarks file with the text "output_" prepeneded to the filename. where,
- The main "<H1>" header is converted to "<H3>" and acts as the root folder.
- All "<DT>" tags are removed.
- "<H3>" acts as folders and list containers instead of "<DL>".
- All "<H3>" and "<A>" tag's inner text are added as a "title" attribute within the html element.
:param file_path: absolute path to bookmarks html file
:type file_path: str
"""
with open(filepath, "r") as f:
lines = f.readlines()
# regex to select an entire H1/H3/A HTML element
element = re.compile(r"(<(H1|H3|A))(.*?(?=>))>(.*)(<\/\2>)\n")
# NOTE: maybe change the list comprehensions to Generator Comprehension for better efficiency
lines1 = [element.sub(r'\1\3 TITLE="\4">\5', line) for line in lines]
lines2 = [line.replace("<DT>", "") for line in lines1 if "<DL><p>" not in line]
lines3 = [
line.replace("<H1", "<H3")
.replace("</H1>", "")
.replace("</H3>", "")
.replace("</DL><p>\n", "</H3>")
.replace("\n", "")
.strip()
for line in lines2
]
with open(self.new_filepath, "w") as f:
f.writelines(lines3)
def convert_to_json(self):
self.mode = "json"
self.bookmarks = {
"type": "folder",
"id": 1,
"index": 0,
"parent_id": None,
"title": "root",
"date_added": None,
"date_modified": None,
"children": [],
}
if self.source == "Chrome":
self.parse_chrome_root_to_json()
elif self.source == "Firefox":
self.parse_firefox_root_to_json()
while self.stack:
stack_item = self.stack.pop()
self.iterate_folder(mode="folder", stack_item=stack_item)
def convert_to_db(self):
self.mode = "db"
self.bookmarks = []
root = Folder(_id=1, title="root", parent_id="0", index=0)
self.bookmarks.append(root)
if self.source == "Chrome":
self.parse_chrome_root_to_db(root)
elif self.source == "Firefox":
self.parse_firefox_root_to_db(root)
while self.stack:
stack_item = self.stack.pop()
self.iterate_folder(mode="folder", stack_item=stack_item)
def parse_firefox_root_to_json(self):
"""
Function that will format and iterate through a Firefox bookmarks file.
"""
bookmarks_menu = {
"type": "folder",
"id": self.id_manager(),
"index": 0,
"parent_id": self.bookmarks.get("id"),
"title": "Bookmarks Menu",
"date_added": time.time(),
"date_modified": None,
"children": [],
}
menu_children = []
root_children = [bookmarks_menu]
for child in self.tree:
if child.get("personal_toolbar_folder") == "true":
index = len(root_children)
bookmarks_toolbar = self.parse_folder(
child, index, self.bookmarks.get("id")
)
self.add_to_stack((bookmarks_toolbar, child))
root_children.append(bookmarks_toolbar)
elif child.get("unfiled.bookmarks.folder") == "true":
index = len(root_children)
other_bookmarks = self.parse_folder(
child, index, self.bookmarks.get("id")
)
self.add_to_stack((other_bookmarks, child))
root_children.append(other_bookmarks)
else:
menu_children.append(child)
if menu_children:
self.iterate_folder(
mode="root", folder=bookmarks_menu, children=menu_children
)
self.bookmarks.get("children").extend(root_children)
def parse_chrome_root_to_json(self):
"""
Function that will format and iterate through a Chrome bookmarks file.
"""
other_children = []
for child in self.tree.children:
if child.get("personal_toolbar_folder") == "true":
bookmarks_bar = self.parse_folder(child, 0, self.bookmarks.get("id"))
self.bookmarks["children"].append(bookmarks_bar)
self.add_to_stack((bookmarks_bar, child))
else:
other_children.append(child)
if other_children:
other_bookmarks = {
"type": "folder",
"id": self.id_manager(),
"index": 1,
"parent_id": self.bookmarks.get("id"),
"title": "Other Bookmarks",
"date_added": time.time(),
"date_modified": None,
"children": [],
}
self.iterate_folder(
mode="root", folder=other_bookmarks, children=other_children
)
self.bookmarks.get("children").append(other_bookmarks)
def parse_firefox_root_to_db(self, root):
bookmarks_menu = Folder(
_id=self.id_manager(), index=0, parent_id=root.id, title="Bookmarks Menu"
)
self.bookmarks.append(bookmarks_menu)
menu_children = []
for child in self.tree.children:
if child.get("parsonal_toolbar_folder") == "true":
index = len(self.bookmarks) - 1
bookmarks_toolbar = self.parse_folder(
item=child, index=index, parent_id=root.id
)
self.add_to_stack((bookmarks_toolbar, child))
self.bookmarks.append(bookmarks_toolbar)
elif child.get("unfiled.bookmarks.folder") == "true":
index = len(self.bookmarks) - 1
other_bookmarks = self.parse_folder(
item=child, index=index, parent_id=root.id
)
self.add_to_stack((other_bookmarks, child))
self.bookmarks.append(other_bookmarks)
else:
menu_children.append(child)
if menu_children:
self.iterate_folder(
mode="root", folder=bookmarks_menu, children=menu_children
)
def parse_chrome_root_to_db(self, root):
other_children = []
for child in self.tree.children:
if child.get("personal_toolbar_folder") == "true":
index = len(self.bookmarks) - 1
bookmarks_bar = self.parse_folder(
item=child, index=index, parent_id=root.id
)
self.add_to_stack((bookmarks_bar, child))
self.bookmarks.append(bookmarks_bar)
else:
other_children.append(child)
if other_children:
index = len(self.bookmarks) - 1
other_bookmarks = Folder(
_id=self.id_manager(),
index=index,
parent_id=root.id,
title="Other Bookmarks",
)
self.bookmarks.append(other_bookmarks)
self.iterate_folder(
mode="root", folder=other_bookmarks, children=other_children
)
def iterate_folder(self, mode, stack_item=None, folder=None, children=None):
"""
Function that appends the folders children, and adds any new folders to the stack.
"""
if mode == "root":
folder = folder
children = children
elif mode == "folder":
folder, node = stack_item
children = node.children
if self.mode == "json":
parent_id = folder.get("id")
for index, child in enumerate(children):
item = self.child_type_check(child, index, parent_id)
folder.get("children").append(item)
else:
parent_id = folder.id
for index, child in enumerate(children):
item = self.child_type_check(child, index, parent_id)
self.bookmarks.append(item)
def child_type_check(self, child, index, parent_id):
"""
Function checks if the child element is a hyperlink <A> or a folder <H3>, parses the child, and adds to stack if child is a folder.
"""
if child.name == "a":
item = self.parse_url(child, index, parent_id)
elif child.name == "h3":
item = self.parse_folder(child, index, parent_id)
self.add_to_stack((item, child))
return item
def add_to_stack(self, stack_item):
"""
Function to check that the node has contents before adding it to the stack
"""
node = stack_item[1]
if node.contents:
self.stack.append(stack_item)
def parse_folder(self, item, index, parent_id):
"""
Function to parse a given folder into a dictionary object.
"""
if self.mode == "json":
folder = {
"type": "folder",
"id": self.id_manager(),
"index": index,
"parent_id": parent_id,
"title": item.get("title"),
"date_added": item.get("add_date"),
"children": [],
}
else:
folder = Folder(
_id=self.id_manager(),
index=index,
parent_id=parent_id,
title=item.get("title"),
date_added=item.get("add_date"),
)
return folder
def parse_url(self, item, index, parent_id):
"""
Function to parse a given hyperlink into a dictionary object.
"""
if self.mode == "json":
url = {
"type": "url",
"id": self.id_manager(),
"index": index,
"parent_id": parent_id,
"url": item.get("href"),
"title": item.get("title"),
"date_added": item.get("add_date"),
"icon": item.get("icon"),
"iconuri": item.get("icon_uri"),
"tags": item.get("tags"),
}
else:
url = Url(
_id=self.id_manager(),
index=index,
parent_id=parent_id,
url=item.get("href"),
title=item.get("title"),
date_added=item.get("add_date"),
icon=item.get("icon"),
icon_uri=item.get("icon_uri"),
tags=item.get("tags"),
)
return url
def id_manager(self):
"""
Function to increment the id of the folders/hyperlinks.
"""
the_id = self.id
self.id += 1
return the_id
class BookmarksParserJSON(HTMLMixin, DBMixin):
def __init__(self, filepath):
self.new_filepath = (
os.path.dirname(filepath) + "/output_" + os.path.basename(filepath)
)
with open(filepath, "r", encoding="Utf-8") as f:
self.tree = json.load(f)
if self.tree.get("checksum"):
self.source = "Chrome"
self.tree = {
"title": "root",
"id": 0,
"children": list(self.tree.get("roots").values()),
}
elif self.tree.get("root"):
self.source = "Firefox"
folders = {
"menu": "Bookmarks Menu",
"toolbar": "Bookmarks Toolbar",
"unfiled": "Other Bookmarks",
"mobile": "Mobile Bookmarks",
}
for child in self.tree.get("children"):
child["title"] = folders[child.get("title")]
else:
self.source = "Bookmarkie"
def convert_to_html(self):
if self.source == "Firefox":
self.stack = []
for child in self.tree.get("children")[::-1]:
if child.get("title") == "Bookmarks Menu":
self.stack.extend(child.get("children")[::-1])
else:
self.stack.append(child)
else:
self.stack = self.tree.get("children")[::-1]
self.parse_root_html()
def convert_to_db(self):
self.bookmarks = []
# contains tuples (folder, index, parent_id).
self.stack = []
root = Folder(_id=1, title="root", index=0, parent_id=0, date_added=time.time())
self.bookmarks.append(root)
for index, folder in enumerate(self.tree.get("children")):
self.stack.append((folder, index, root.get("id")))
while self.stack:
stack_item = self.stack.pop()
self.iterate_folder_db(stack_item)
def iterate_folder_db(self, stack_item):
item, index, parent_id = stack_item
_id, title, index, date_added = self.get_properties(item, index)
folder = Folder(
_id=_id,
title=title,
index=index,
parent_id=parent_id,
date_added=date_added,
)
self.bookmarks.append(folder)
parent_id = _id
children = item.get("children")
if children:
for index, child in enumerate(children):
_id, title, index, date_added = self.get_properties(child, index)
if child.get("type") in ("folder", "text/x-moz-place-container"):
contents = child.get("children")
if contents:
self.stack.append((child, index, parent_id))
else:
if self.source == "Firefox":
url = child.get("uri")
else:
url = child.get("url")
url = Url(
_id=_id,
title=title,
url=url,
index=index,
parent_id=parent_id,
date_added=date_added,
icon=child.get("icon"),
icon_uri=child.get("iconuri"),
tags=child.get("tags"),
)
self.bookmarks.append(url)
def get_properties(self, item, index):
_id = int(item.get("id"))
if self.source == "Chrome":
_id += 1
title = item.get("name")
index = index
else:
title = item.get("title")
index = item.get("index")
if self.source == "Firefox":
date_added = item.get("dateAdded")
else:
date_added = item.get("date_added")
return (_id, title, index, date_added)
class BookmarksParserDB(HTMLMixin, JSONMixin):
def __init__(self, filepath):
self.new_filepath = (
os.path.dirname(filepath) + "/output_" + os.path.basename(filepath)
)
database_path = "sqlite:///" + filepath
engine = create_engine(database_path)
Session = sessionmaker(bind=engine)
session = Session()
self.tree = session.query(Bookmark).get(1)
self.source = "Database"
def convert_to_html(self):
self.stack = self.tree.get("children")[::-1]
self.parse_root_html()
def convert_to_json(self):
self.stack = []
self.bookmarks = self.parse_folder(self.tree)
self.stack.append((self.bookmarks, self.tree))
while self.stack:
stack_item = self.stack.pop()
folder, node = stack_item
for child in node.children:
if child.type == "url":
item = self.parse_url(child)
elif child.type == "folder":
item = self.parse_folder(child)
if child.children:
self.stack.append((item, child))
folder.get("children").append(item)
def parse_folder(self, folder):
"""
Function to parse a given folder into a dictionary object.
"""
folder = {
"type": "folder",
"id": folder.id,
"index": folder.index,
"parent_id": folder.parent_id,
"title": folder.title,
"date_added": folder.date_added,
"children": [],
}
return folder
def parse_url(self, url):
"""
Function to parse a given hyperlink into a dictionary object.
"""
url = {
"type": "url",
"id": url.id,
"index": url.index,
"parent_id": url.parent_id,
"url": url.url,
"title": url.title,
"date_added": url.date_added,
"icon": url.icon,
"iconuri": url.icon_uri,
"tags": url.tags,
}
return url
| [
"radam9@gmail.com"
] | radam9@gmail.com |
ca5730f42424db91567d350a28af2eb48aad2b8c | a3c150743b493fedfd2cca5abebc471a1be3a4f0 | /hello.py | 24b8a6aa2f2b9c3edcb0dab11e1b3815447f164b | [] | no_license | krjukOFF/stepic_web_project | 048257c8294f23e974d77624b4dda464b3aca2c3 | 822a2d44b6b7d5e8a34e1b60d605f8cd5051a869 | refs/heads/master | 2021-01-10T07:59:39.483963 | 2016-03-09T12:31:02 | 2016-03-09T12:31:02 | 52,791,351 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | def app(env, start_response):
status = '200 OK'
headers = [('Content-Type', 'text/plain')]
body = env['QUERY_STRING'].replace('&', '\n')
start_response(status, headers)
return [body]
| [
"k.krjukovs@gmail.com"
] | k.krjukovs@gmail.com |
503ab499313a3e8b8ddfbc9225bc2978dbe218b8 | 028f04462a5c12a63095028596ebc574aa432eaa | /Day5/T_2_High_score_finder.py | 830630390dc32254e7adb2ec5946f6d0775cb3c2 | [] | no_license | Akshay-Murali/100_day_of_python | e5e18b4e836514ca424c71c9a21aa7c6dd2e37f3 | 72dca7db9df739bce858ab400f1fdea9d2bf2eae | refs/heads/main | 2023-05-02T07:33:55.081804 | 2021-05-18T09:25:44 | 2021-05-18T09:25:44 | 357,737,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | # High Score Finder
print("Welcome to high score finder")
marks = input("Enter the Marks: ").split()
high_score = 0
# loops through every score , when the current score is
# higher than high_score it replaces the high_score
for mark in marks:
if int(mark) > high_score :
high_score = int(mark)
print(f"High score is : {high_score}") | [
"m.akshay1998@gmail.com"
] | m.akshay1998@gmail.com |
71c30464092ca759bbb801e6282b2bc827f19be1 | ceb620c4be8b34f4aa08156226187db081fc3b55 | /loca_13/vat_retention/models/retention_vat.py | a78c159f4bcd7d8ea5616ae7b08ad7533a48d860 | [] | no_license | hjrhjr/entrenamiento_13_odoo_ref | f73e292b91d085473283f63a88ccd2363a03d9bf | 9a492c006d9c0aab68d0b095281dafda97ebdfda | refs/heads/main | 2023-08-25T06:46:39.075724 | 2021-10-19T14:51:27 | 2021-10-19T14:51:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,751 | py | # -*- coding: utf-8 -*-
import logging
from odoo import api, fields, models, _
from odoo.exceptions import UserError, ValidationError
from datetime import datetime
_logger = logging.getLogger('__name__')
class InvoiceLineInherit(models.Model):
_inherit = 'account.move.line'
retention_id = fields.Many2one('vat.retention', string='VAT Retention')
class VatRetentionTaxLines(models.Model):
"""This model is about tax withheld in a invoice."""
_name = 'vat.retention.tax.lines'
name = fields.Char(string='Tax name', size=40)
tax_id = fields.Many2one('account.tax', string="Tax")
company_id = fields.Many2one('res.company', string='Company')
vat_ret_line_id = fields.Many2one('vat.retention.invoice.line', ondelete="cascade",string='vat_ret_line_id')
base_tax = fields.Float(string='Base tax')
tax_amount = fields.Float(string='Tax amount')
amount_withheld = fields.Float(string='Amount withheld')
class VatRetentionInvoiceLine(models.Model):
"""This model is for a line invoices withholed."""
_name = 'vat.retention.invoice.line'
def formato_fecha(self):
fecha = str(self.invoice_id.invoice_date)
fecha_aux=fecha
ano=fecha_aux[0:4]
mes=fecha[5:7]
dia=fecha[8:10]
resultado=dia+"/"+mes+"/"+ano
return resultado
def float_format(self,valor):
#valor=self.base_tax
if valor:
result = '{:,.2f}'.format(valor)
result = result.replace(',','*')
result = result.replace('.',',')
result = result.replace('*','.')
else:
result = "0,00"
return result
def valida_excento(self,id_tax,id_retention):
tipo=self.tax_id.aliquot
valor_excento=0
cant_reduced=0
cant_general=0
cant_additional=0
resultado=''
lista_det = self.env['vat.retention.invoice.line'].search([('retention_id','=',self.retention_id.id)])
for det in lista_det:
if det.tax_id.amount==0:
valor_excento=valor_excento+det.amount_untaxed
if det.tax_id.aliquot=='reduced':
cant_reduced=cant_reduced+1
if det.tax_id.aliquot=='general':
cant_general=cant_general+1
if det.tax_id.aliquot=='additional':
cant_additional=cant_additional+1
if tipo=='general' and cant_general>0:
resultado=str(self.float_format(valor_excento))
if tipo=='reduced' and cant_reduced>0 and cant_general==0:
resultado=str(self.float_format(valor_excento))
if tipo=='additional' and cant_additional>0 and cant_reduced==0 and cant_general==0:
resultado=str(self.float_format(valor_excento))
return str(resultado)
#@api.depends('amount_vat_ret', 'retention_rate')
def _compute_amount_withheld(self):
return 0
"""This function compute the VAT retention."""
#amount = (self.amount_vat_ret * self.retention_rate) / 100
#_logger.info('\n\n\n amount %s \n\n\n', amount)
#self.retention_amount = amount
#voucher = self.env['vat.retention'].search([('id', '=', self.retention_id.id)])
#_logger.info("\n\n\n voucher %s\n\n\n",voucher)
#voucher.vat_retentioned = amount
name = fields.Char(string='Description')
retention_id = fields.Many2one('vat.retention', string='Vat retention')
amount_untaxed = fields.Float(string='Amount untaxed')
invoice_number = fields.Char(string='Invoice number')
amount_vat_ret = fields.Float(string='Amount tax')
retention_amount = fields.Float(string='Retention', readonly=True, store=True)
retention_rate = fields.Float(string='Rate', help="The retention rate can vary between 75% al 100% depending on the taxpayer.")
move_id = fields.Many2one('account.move', string='Asiento')
invoice_id = fields.Many2one('account.move', string='Invoice', ondelete='restrict', help="Retention invoice")
tax_line_ids = fields.One2many('vat.retention.tax.lines', 'vat_ret_line_id', string='tax lines')
#campo por agregar
# tax_book_id = fields.Many2one('tax.book', string="Tax book")
# campos a ser eliminados
tax_id = fields.Many2one('account.tax', string='Tax')
# sql constrain por agregar
# _sql_constraints = [
# ('one_name', 'unique (invoice_id)', 'message')
# ]
class RetentionVat(models.Model):
"""This is a main model for rentetion vat control."""
_name = 'vat.retention'
_inherit = ['mail.thread', 'mail.activity.mixin']
journal_id=fields.Char(string='journal_id')
move_id = fields.Many2one('account.move', string='Id del movimiento')
"""def unlink(self):
for vat in self:
if vat.state=='posted':
raise UserError(_("El comprobante de retencion IVA ya esta Publicado, No se puede eliminar"))
return super(RetentionVat,self).unlink() """
def formato_fecha2(self):
fecha = str(self.voucher_delivery_date)
fecha_aux=fecha
ano=fecha_aux[0:4]
mes=fecha[5:7]
dia=fecha[8:10]
resultado=dia+"/"+mes+"/"+ano
return resultado
def periodo(self):
fecha = str(self.voucher_delivery_date)
fecha_aux=fecha
ano=fecha_aux[0:4]
mes=fecha[5:7]
dia=fecha[8:10]
resultado=ano+"-"+mes
return resultado
def float_format2(self,valor):
#valor=self.base_tax
if valor:
result = '{:,.2f}'.format(valor)
result = result.replace(',','*')
result = result.replace('.',',')
result = result.replace('*','.')
else:
result = "0,00"
return result
def doc_cedula(self,aux):
#nro_doc=self.partner_id.vat
busca_partner = self.env['res.partner'].search([('id','=',aux)])
for det in busca_partner:
tipo_doc=busca_partner.doc_type
nro_doc=str(busca_partner.vat)
nro_doc=nro_doc.replace('V','')
nro_doc=nro_doc.replace('v','')
nro_doc=nro_doc.replace('E','')
nro_doc=nro_doc.replace('e','')
nro_doc=nro_doc.replace('G','')
nro_doc=nro_doc.replace('g','')
nro_doc=nro_doc.replace('J','')
nro_doc=nro_doc.replace('j','')
nro_doc=nro_doc.replace('P','')
nro_doc=nro_doc.replace('p','')
nro_doc=nro_doc.replace('-','')
if tipo_doc=="v":
tipo_doc="V"
if tipo_doc=="e":
tipo_doc="E"
if tipo_doc=="g":
tipo_doc="G"
if tipo_doc=="j":
tipo_doc="J"
if tipo_doc=="p":
tipo_doc="P"
resultado=str(tipo_doc)+"-"+str(nro_doc)
return resultado
#raise UserError(_('cedula: %s')%resultado)
#@api.depends('retention_line_ids.retention_amount')
def _amount_all(self):
""" It shows total in this form view"""
return 0
#amount = 0
#retention = 0
#for invoice in self.retention_line_ids:
# amount += invoice.amount_untaxed
# retention += invoice.retention_amount
#self.amount_untaxed = amount
#self.vat_retentioned = retention
@api.model
def _type(self):
"""Return invoice type."""
return self._context.get('type', 'in_refund')
# CORRELATIVO Segun indicaciones del seniat
name = fields.Char(string='Voucher number', default='New')
# datos del proveedor
partner_id = fields.Many2one('res.partner', string='Partner')
rif = fields.Char(string='RIF')
# datos de emision y entrega del comprobante
accouting_date = fields.Date(string='Accounting date', help='Voucher generation date', readonly="True")
voucher_delivery_date = fields.Date(string='Voucher delivery date')
# datos de la factura
invoice_id = fields.Many2one('account.move', string="Invoice")
invoice_number = fields.Char(string='Invoice Number')
invoice_ctrl_num = fields.Char(string='Invoice control number')
company_id = fields.Many2one('res.company', string="Company", default=lambda self: self.env.company)
# retenciones aplicadas
retention_line_ids = fields.One2many('vat.retention.invoice.line', 'retention_id', string='Retention')
# totales
amount_untaxed = fields.Float(string='Importe Base', help='This concept is tax base')
vat_retentioned = fields.Float(string='VAT retentioned')
#datos contables
# journal_id = fields.Many2one('account.journal', string='Journal')
currency_id = fields.Many2one('res.currency', string='Currency')
account_id = fields.Many2one('account.account', string='Account')
manual=fields.Boolean(default=True)
line_ids = fields.One2many('account.move.line', 'retention_id', string='Invoice lines',
copy=True, readonly=True,
states={'draft': [('readonly', False)]})
type = fields.Selection(selection=[
('out_invoice', 'Customer Invoice'),
('in_invoice','Supplier Invoince'),
('in_refund','Suplier Refund'),
('out_refund','Customer Refund'),
('in_receipt','Nota Debito cliente'),
('out_receipt','Nota Debito proveedor'),
], string="Type invoice", store=True, default=_type)
# otra informacion
state = fields.Selection(selection=[
('draft', 'Draft'),
('posted', 'Posted'),
# ('done', 'Done'),
('cancel', 'Cancelled')
], string='Status', readonly=True, copy=False, tracking=True,
default='draft')
is_supplier = fields.Boolean(string='Supplier')
is_customer = fields.Boolean(string='Customer')
description = fields.Char(string="Description", help="Description about this voucher.")
@api.onchange('partner_id')
def _rif(self):
if self.partner_id:
_logger.info("\n\n\n RIF \n\n\n")
self.rif = self.partner_id.vat
else:
self.rif = ''
def action_cancel(self):
if self.invoice_id.state == 'cancel':
self.write({'state': 'cancel'})
else:
raise ValidationError("Debe cancelar primero la factura")
#@api.model
def cargar_fact(self):
if not self.invoice_id.id:
raise UserError(_(' Debe Seleccionar una Factura Interna'))
if self.invoice_id.id:
map_id = self.env['account.move'].search([('id','=',self.invoice_id.id)],order="id asc")
#raise UserError(_(' map_id:%s')%map_id)
#self.rif=map_id.name # ojo aqui esta la clave
if not map_id.partner_id.ret_agent:
raise UserError(_(' La empresa %s no esta configurada como agente de retencion iva')%map_id.partner_id.name)
else:
if map_id.vat_ret_id.id:
raise UserError(_(' Esta Factura ya tiene asignado un comprobante de retencion'))
if not map_id.vat_ret_id:
acum_iva=0
acum_mon_ret=0
retention = self.env['vat.retention']
self.rif=map_id.rif
self.partner_id=map_id.partner_id.id
self.accouting_date=datetime.now()
self.voucher_delivery_date=datetime.now()
self.invoice_number=map_id.invoice_number
self.move_id= self.invoice_id.id,
self.journal_id=self.invoice_id.journal_id.id
self.type=self.invoice_id.type
self.invoice_ctrl_num=self.invoice_id.invoice_ctrl_number
self.manual=False
lista_movline = self.env['account.move.line'].search([('move_id','=',self.invoice_id.id)])
for det_mov_line in lista_movline:
importe_base=det_mov_line.price_subtotal
monto_total=det_mov_line.price_total
monto_iva=(monto_total-importe_base)
acum_iva=acum_iva+monto_iva
monto_retenido=(monto_iva*map_id.partner_id.vat_retention_rate/100)
acum_mon_ret=acum_mon_ret+monto_retenido
ret_lines = self.env['vat.retention.invoice.line']
values = {
'name': self.invoice_id.name,
'invoice_id': self.invoice_id.id,
'move_id': self.invoice_id.id,
'invoice_number': map_id.invoice_number,
'amount_untaxed': importe_base,
'retention_amount':monto_retenido,
'amount_vat_ret':monto_iva,
'retention_rate':map_id.partner_id.vat_retention_rate,
'retention_id':self.id,
'tax_id':det_mov_line.tax_ids.id,
}
if monto_iva!=0:
ret_line = ret_lines.create(values)
self.amount_untaxed=acum_iva
self.vat_retentioned=acum_mon_ret
map_id.write({
'vat_ret_id':self.id,
})
def action_posted(self):
#raise UserError(_('ID MOVE = %s')%self)
if not self.voucher_delivery_date:
raise ValidationError("Debe establecer una fecha de entrega")
self.state = 'posted'
nombre_ret_iva = self.get_name()
id_move=self.registro_movimiento_retencion(nombre_ret_iva)
idv_move=id_move.id
valor=self.registro_movimiento_linea_retencion(idv_move,nombre_ret_iva)
moves= self.env['account.move'].search([('id','=',idv_move)])
moves.filtered(lambda move: move.journal_id.post_at != 'bank_rec').post()
##self.concilio_saldo_pendiente()
def action_draft(self):
#self.state = 'draft'
for item in self:
_logger.info("\n\n\n\n self %s \n\n\n\n", type(self))
_logger.info("\n\n\n self %s \n\n\n", self)
# @api.onchange('partner_id')
def get_address_partner(self):
location = ''
streets = ''
if self.partner_id:
location = self._get_state_and_city()
streets = self._get_streets()
return (streets + " " + location)
def _get_state_and_city(self):
state = ''
city = ''
if self.partner_id.state_id:
state = "Edo." + " " + str(self.partner_id.state_id.name or '')
_logger.info("\n\n\n state %s \n\n\n", state)
if self.partner_id.city:
city = str(self.partner_id.city or '')
# _logger.info("\n\n\n city %s\n\n\n", city)
result = city + " " + state
_logger.info("\n\n\n result %s \n\n\n", result)
return result
def _get_streets(self):
street2 = ''
av = ''
if self.partner_id.street:
av = str(self.partner_id.street or '')
if self.partner_id.street2:
street2 = str(self.partner_id.street2 or '')
result = av + " " + street2
return result
def get_company_address(self):
location = ''
streets = ''
if self.company_id:
streets = self._get_company_street()
location = self._get_company_state_city()
_logger.info("\n\n\n street %s location %s\n\n\n", streets, location)
return (streets + " " + location)
def _get_company_street(self):
street2 = ''
av = ''
if self.company_id.street:
av = str(self.company_id.street or '')
if self.company_id.street2:
street2 = str(self.company_id.street2 or '')
result = av + " " + street2
return result
def _get_company_state_city(self):
state = ''
city = ''
if self.company_id.state_id:
state = "Edo." + " " + str(self.company_id.state_id.name or '')
_logger.info("\n\n\n state %s \n\n\n", state)
if self.company_id.city:
city = str(self.company_id.city or '')
_logger.info("\n\n\n city %s\n\n\n", city)
result = city + " " + state
_logger.info("\n\n\n result %s \n\n\n", result)
return result
#def unlink(self):
"""Throw an exception if the retention voucher is not in cancel state."""
#for voucher in self:
#raise ValidationError(_("No se pueden eliminar comprobantes"))
@api.model
def create(self, vals):
partners=vals['type']
#partners=vals['partners']
#del vals['partners']
if vals.get('name', 'New') == 'New':
_logger.info("\n\n\n vals.get.tpye %s \n\n\n", vals.get('type', 'in_invoice'))
if partners=='in_invoice' or partners=='in_refund' or partners=='in_receipt':
vals['name'] = self.env['ir.sequence'].next_by_code('purchase.vat.retention.voucher.number') or '/'
_logger.info("\n\n\n vals[name] %s \n\n\n",vals['name'])
else:
vals['name']= '00000000'
return super().create(vals)
def conv_div_extranjera(self,valor):
self.invoice_id.currency_id.id
fecha_contable_doc=self.invoice_id.date
monto_factura=self.invoice_id.amount_total
valor_aux=0
#raise UserError(_('moneda compañia: %s')%self.company_id.currency_id.id)
if self.invoice_id.currency_id.id!=self.company_id.currency_id.id:
tasa= self.env['res.currency.rate'].search([('currency_id','=',self.invoice_id.currency_id.id),('name','<=',self.invoice_id.date)],order="name asc")
for det_tasa in tasa:
if fecha_contable_doc>=det_tasa.name:
valor_aux=det_tasa.rate
rate=round(1/valor_aux,2) # LANTA
#rate=round(valor_aux,2) # ODOO SH
resultado=valor/rate
else:
resultado=valor
return resultado
def registro_movimiento_retencion(self,consecutivo_asiento):
#raise UserError(_('darrell = %s')%self.partner_id.vat_retention_rate)
name = consecutivo_asiento
signed_amount_total=0
amont_totall=self.vat_retentioned #self.conv_div_extranjera(self.vat_retentioned)
#amount_itf = round(float(total_monto) * float((igtf_porcentage / 100.00)),2)
if self.type=="in_invoice" or self.type=="in_receipt":
signed_amount_total=amont_totall
if self.type=="out_invoice" or self.type=="out_receipt":
signed_amount_total=(-1*amont_totall)
if self.type=="out_invoice" or self.type=="out_refund" or self.type=="out_receipt":
id_journal=self.partner_id.ret_jrl_id.id
rate_valor=self.partner_id.vat_retention_rate
if self.type=="in_invoice" or self.type=="in_refund" or self.type=="in_receipt":
if self.env.company.confg_ret_proveedores=="c":#loca14
id_journal=self.env.company.partner_id.ret_jrl_id.id#loca14
rate_valor=self.env.company.partner_id.vat_retention_rate#loca14
if self.env.company.confg_ret_proveedores=="p":#loca14
id_journal=self.partner_id.ret_jrl_id.id
rate_valor=self.partner_id.vat_retention_rate
#raise UserError(_('papa = %s')%signed_amount_total)
value = {
'name': name,
'date': self.move_id.date,#listo
#'amount_total':self.vat_retentioned,# LISTO
'partner_id': self.partner_id.id, #LISTO
'journal_id':id_journal,
'ref': "Retención del %s %% IVA de la Factura %s" % (rate_valor,self.move_id.name),
#'amount_total':self.vat_retentioned,# LISTO
#'amount_total_signed':signed_amount_total,# LISTO
'type': "entry",# estte campo es el que te deja cambiar y almacenar valores
'vat_ret_id': self.id,
'company_id':self.env.company.id,#loca14
#'currency_id':self.invoice_id.currency_id.id,
}
#raise UserError(_('value= %s')%value)
move_obj = self.env['account.move']
move_id = move_obj.create(value)
#raise UserError(_('move_id= %s')%move_id)
return move_id
def registro_movimiento_linea_retencion(self,id_movv,consecutivo_asiento):
#raise UserError(_('ID MOVE = %s')%id_movv)
name = consecutivo_asiento
valores = self.vat_retentioned #self.conv_div_extranjera(self.vat_retentioned) #VALIDAR CONDICION
#raise UserError(_('valores = %s')%valores)
cero = 0.0
if self.type=="out_invoice" or self.type=="out_refund" or self.type=="out_receipt":
cuenta_ret_cliente=self.partner_id.account_ret_receivable_id.id# cuenta retencion cliente
cuenta_ret_proveedor=self.partner_id.account_ret_payable_id.id#cuenta retencion proveedores
cuenta_clien_cobrar=self.partner_id.property_account_receivable_id.id
cuenta_prove_pagar = self.partner_id.property_account_payable_id.id
rate_valor=self.partner_id.vat_retention_rate
if self.type=="in_invoice" or self.type=="in_refund" or self.type=="in_receipt":
if self.env.company.confg_ret_proveedores=="c":#loca14
cuenta_ret_cliente=self.env.company.partner_id.account_ret_receivable_id.id# loca14 cuenta retencion cliente
cuenta_ret_proveedor=self.env.company.partner_id.account_ret_payable_id.id# loca14 cuenta retencion proveedores
cuenta_clien_cobrar=self.env.company.partner_id.property_account_receivable_id.id #loca14
cuenta_prove_pagar = self.env.company.partner_id.property_account_payable_id.id #loca14
rate_valor=self.env.company.partner_id.vat_retention_rate #loca14
if self.env.company.confg_ret_proveedores=="p": #loca14
cuenta_ret_cliente=self.partner_id.account_ret_receivable_id.id# cuenta retencion cliente
cuenta_ret_proveedor=self.partner_id.account_ret_payable_id.id#cuenta retencion proveedores
cuenta_clien_cobrar=self.partner_id.property_account_receivable_id.id
cuenta_prove_pagar = self.partner_id.property_account_payable_id.id
rate_valor=self.partner_id.vat_retention_rate
tipo_empresa=self.move_id.type
#raise UserError(_('papa = %s')%tipo_empresa)
if tipo_empresa=="in_invoice" or tipo_empresa=="in_receipt":#aqui si la empresa es un proveedor
cuenta_haber=cuenta_ret_proveedor
cuenta_debe=cuenta_prove_pagar
if tipo_empresa=="in_refund":
cuenta_haber=cuenta_prove_pagar
cuenta_debe=cuenta_ret_proveedor
if tipo_empresa=="out_invoice" or tipo_empresa=="out_receipt":# aqui si la empresa es cliente
cuenta_haber=cuenta_clien_cobrar
cuenta_debe=cuenta_ret_cliente
if tipo_empresa=="out_refund":
cuenta_haber=cuenta_ret_cliente
cuenta_debe=cuenta_clien_cobrar
balances=cero-valores
value = {
'name': name,
'ref' : "Retención del %s %% IVA de la Factura %s" % (rate_valor,self.move_id.name),
'move_id': int(id_movv),
'date': self.move_id.date,
'partner_id': self.partner_id.id,
'account_id': cuenta_haber,
#'currency_id':self.invoice_id.currency_id.id,
#'amount_currency': 0.0,
#'date_maturity': False,
'credit': valores,
'debit': 0.0, # aqi va cero EL DEBITO CUNDO TIENE VALOR, ES QUE EN ACCOUNT_MOVE TOMA UN VALOR
'balance':-valores, # signo negativo
'price_unit':balances,
'price_subtotal':balances,
'price_total':balances,
}
move_line_obj = self.env['account.move.line']
move_line_id1 = move_line_obj.create(value)
balances=valores-cero
value['account_id'] = cuenta_debe
value['credit'] = 0.0 # aqui va cero
value['debit'] = valores
value['balance'] = valores
value['price_unit'] = balances
value['price_subtotal'] = balances
value['price_total'] = balances
move_line_id2 = move_line_obj.create(value)
def concilio_saldo_pendiente(self):
id_retention=self.id
tipo_empresa=self.move_id.type
if tipo_empresa=="in_invoice" or tipo_empresa=="out_refund":#aqui si la empresa es un proveedor
type_internal="payable"
if tipo_empresa=="out_invoice" or tipo_empresa=="in_refund":# aqui si la empresa es cliente
type_internal="receivable"
busca_movimientos = self.env['account.move'].search([('vat_ret_id','=',id_retention)])
for det_movimientos in busca_movimientos:
busca_line_mov = self.env['account.move.line'].search([('move_id','=',det_movimientos.id),('account_internal_type','=',type_internal)])
if busca_line_mov.credit==0:
id_move_debit=busca_line_mov.id
monto_debit=busca_line_mov.debit
if busca_line_mov.debit==0:
id_move_credit=busca_line_mov.id
monto_credit=busca_line_mov.credit
if tipo_empresa=="in_invoice" or tipo_empresa=="out_refund":
monto=monto_debit
if tipo_empresa=="out_invoice" or tipo_empresa=="in_refund":
monto=monto_credit
value = {
'debit_move_id':id_move_debit,
'credit_move_id':id_move_credit,
'amount':monto,
'max_date':self.accouting_date,
}
self.env['account.partial.reconcile'].create(value)
#raise UserError(_('concilia = %s')%busca_movimientos)
def get_name(self):
'''metodo que crea el Nombre del asiento contable si la secuencia no esta creada, crea una con el
nombre: 'l10n_ve_cuenta_retencion_iva'''
self.ensure_one()
SEQUENCE_CODE = 'l10n_ve_cuenta_retencion_iva'
company_id = 1
IrSequence = self.env['ir.sequence'].with_context(force_company=1)
name = IrSequence.next_by_code(SEQUENCE_CODE)
# si aún no existe una secuencia para esta empresa, cree una
if not name:
IrSequence.sudo().create({
'prefix': 'RET_IVA/',
'name': 'Localización Venezolana Retenciones IVA %s' % 1,
'code': SEQUENCE_CODE,
'implementation': 'no_gap',
'padding': 8,
'number_increment': 1,
'company_id': self.env.company.id,#loca14
})
name = IrSequence.next_by_code(SEQUENCE_CODE)
return name
| [
"inmldrsolucionestecnologicas@gmail.com"
] | inmldrsolucionestecnologicas@gmail.com |
7c901dd586db3c2163b73b8f6fe6fb98eb5601eb | 10b3d1ce02eaa4908dc16ca378ddfb1955b2d625 | /MV3D_TF_release/lib/datasets/kitti_mv3d.py | 89c9798b9fcbd959dd7851e2ed8b0745a38e18fc | [
"MIT",
"BSD-3-Clause"
] | permissive | ZiningWang/Sparse_Pooling | 7281aa0d974849eac8c48faa5ba08519b091ef6e | f46882832d0e2fed5ab4a0af15cead44fd3c6faa | refs/heads/master | 2023-05-26T08:47:16.232822 | 2023-05-20T08:39:11 | 2023-05-20T08:39:11 | 141,640,800 | 56 | 21 | null | null | null | null | UTF-8 | Python | false | false | 40,261 | py | # WZN: Note here we unify all LIDAR points to camera frame!!!
__author__ = 'yuxiang' # derived from honda.py by fyang
import datasets
import datasets.kitti_mv3d
import os
import time
import PIL
import datasets.imdb
import numpy as np
from matplotlib import pyplot as plt
import scipy.sparse
from utils.cython_bbox import bbox_overlaps
from utils.boxes_grid import get_boxes_grid
import subprocess
import pickle
from fast_rcnn.config import cfg
import math
from rpn_msr.generate_anchors import generate_anchors_bv
from utils.transform import camera_to_lidar_cnr, lidar_to_corners_single, computeCorners3D, lidar_3d_to_bv, lidar_cnr_to_3d,bv_anchor_to_lidar,lidar_cnr_to_camera_bv,lidar_cnr_to_bv_cnr
class kitti_mv3d(datasets.imdb):
def __init__(self, image_set, kitti_path=None,object_name='cars'):
datasets.imdb.__init__(self, image_set)
self._image_set = image_set
# self._kitti_path = '$Faster-RCNN_TF/data/KITTI'
self._kitti_path = self._get_default_path() if kitti_path is None \
else kitti_path
# self._data_path = '$Faster-RCNN_TF/data/KITTI/object'
self._data_path = os.path.join(self._kitti_path, 'object')
self._set_label_dir()
self.set_object(object_name)
'''
if object_name=='cars':
#for cars
self._classes = ('__background__', 'Car', 'Van', 'Truck', 'Tram')#, 'Pedestrian', 'Cyclist')
self._class_to_ind = dict(zip(self.classes, [0,1,1,1,1]))
elif object_name=='peds':
#for peds and cyclists
#self.num_classes = 3 #0 for background, 1 ped, 2 for cyc, 3 for non-interested region
self._classes = ('__background__', 'Pedestrian')
self._class_to_ind = dict(zip(self.classes, [0,1]))
else:
assert False, 'invalid training object'
'''
self._image_ext = '.png'
self._lidar_ext = '.npy'
self._lidar_pc_ext = '.npy'
self._subset = object_name
self._image_index = self._load_image_set_index()
# Default to roidb handler
self._roidb_handler = self.gt_roidb
self.config = {'top_k': 100000}
# statistics for computing recall
# self._num_boxes_all = np.zeros(self.num_classes, dtype=np.int)
# self._num_boxes_covered = np.zeros(self.num_classes, dtype=np.int)
# self._num_boxes_proposal = 0
assert os.path.exists(self._kitti_path), \
'KITTI path does not exist: {}'.format(self._kitti_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def set_object(self,object_name):
if object_name=='cars':
#for cars
self._classes = ('__background__', 'Car', 'Van', 'Truck', 'Tram')#, 'Pedestrian', 'Cyclist')
self.classes_write = ('Car','Pedestrian')
self._class_to_ind = dict(zip(self.classes, [0,1,1,1,1]))
elif object_name=='peds':
#for peds and cyclists
#self.num_classes = 3 #0 for background, 1 ped, 2 for cyc, -1 for non-interested region, -2 for person_sitting (because thet have bv_boxes)
self._classes = ('__background__', 'Pedestrian','Person_sitting') #,'DontCare'
self.classes_write = ('Car', 'Pedestrian')
self._class_to_ind = dict(zip(self.classes, [0,1,1])) # I think treating them as 1 makes more positives, that's good #,-1
else:
assert False, 'invalid training object'
self._subset = object_name
self._roidb_handler = self.gt_roidb
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self.image_index[i])
def lidar_pc_path_at(self, i):
if self._image_set == 'test':
prefix = 'testing/lidar_pc' #for voxel
else:
prefix = 'training/lidar_pc' #for voxel
# lidar_bv_path = '$Faster-RCNN_TF/data/KITTI/object/training/lidar_bv/000000.npy'
lidar_path = os.path.join(self._data_path, prefix, self.image_index[i] + self._lidar_pc_ext)
assert os.path.exists(lidar_path), \
'Path does not exist: {}'.format(lidar_path)
return lidar_path
def lidar_path_at(self, i):
"""
Return the absolute path to lidar i in the lidar sequence.
"""
return self.lidar_path_from_index(self.image_index[i])
def calib_at(self, i):
"""
Return the calib sequence.
"""
index = self.image_index[i]
calib_ori = self._load_kitti_calib(index)
calib = np.zeros((4, 12))
calib[0,:] = calib_ori['P2'].reshape(12)
calib[1,:] = calib_ori['P3'].reshape(12)
calib[2,:9] = calib_ori['R0'].reshape(9)
calib[3,:] = calib_ori['Tr_velo2cam'].reshape(12)
return calib
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
# set the prefix
if self._image_set == 'test':
prefix = 'testing/image_2'
else:
prefix = 'training/image_2'
# image_path = '$Faster-RCNN_TF/data/KITTI/object/training/image_2/000000.png'
image_path = os.path.join(self._data_path, prefix, index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def lidar_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
# set the prefix
if self._image_set == 'test':
prefix = 'testing/lidar_bv' #for MV3D
else:
prefix = 'training/lidar_bv' #for MV3D
# lidar_bv_path = '$Faster-RCNN_TF/data/KITTI/object/training/lidar_bv/000000.npy'
lidar_bv_path = os.path.join(self._data_path, prefix, index + self._lidar_ext)
assert os.path.exists(lidar_bv_path), \
'Path does not exist: {}'.format(lidar_bv_path)
return lidar_bv_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# image_set_file = '$Faster-RCNN_TF/data/KITTI/ImageSets/train.txt'
image_set_file = os.path.join(self._kitti_path, 'ImageSets',self._image_set + '.txt')
self.list_dir = image_set_file
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
#WZN: return lines without '\n'
image_index = [x.rstrip('\n') for x in f.readlines()]
print ('image sets length: ', len(image_index))
return image_index
def _get_default_path(self):
"""
Return the default path where KITTI is expected to be installed.
"""
return os.path.join(datasets.ROOT_DIR, 'data', 'KITTI')
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
WZN: first time read Kitti labels, and save a cache
"""
cache_file = os.path.join(self.cache_path, self.name +'_' +self._subset + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print ('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_kitti_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print ('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def _load_kitti_calib(self, index):
"""
load projection matrix
"""
if self._image_set == 'test':
prefix = 'testing/calib'
else:
prefix = 'training/calib'
calib_dir = os.path.join(self._data_path, prefix, index + '.txt')
# P0 = np.zeros(12, dtype=np.float32)
# P1 = np.zeros(12, dtype=np.float32)
# P2 = np.zeros(12, dtype=np.float32)
# P3 = np.zeros(12, dtype=np.float32)
# R0 = np.zeros(9, dtype=np.float32)
# Tr_velo_to_cam = np.zeros(12, dtype=np.float32)
# Tr_imu_to_velo = np.zeros(12, dtype=np.float32)
# j = 0
with open(calib_dir) as fi:
lines = fi.readlines()
# assert(len(lines) == 8)
# obj = lines[0].strip().split(' ')[1:]
# P0 = np.array(obj, dtype=np.float32)
# obj = lines[1].strip().split(' ')[1:]
# P1 = np.array(obj, dtype=np.float32)
obj = lines[2].strip().split(' ')[1:]
P2 = np.array(obj, dtype=np.float32)
obj = lines[3].strip().split(' ')[1:]
P3 = np.array(obj, dtype=np.float32)
obj = lines[4].strip().split(' ')[1:]
R0 = np.array(obj, dtype=np.float32)
obj = lines[5].strip().split(' ')[1:]
Tr_velo_to_cam = np.array(obj, dtype=np.float32)
# obj = lines[6].strip().split(' ')[1:]
# P0 = np.array(obj, dtype=np.float32)
return {'P2' : P2.reshape(3,4),
'P3' : P3.reshape(3,4),
'R0' : R0.reshape(3,3),
'Tr_velo2cam' : Tr_velo_to_cam.reshape(3, 4)}
def _set_label_dir(self):
self.gt_dir = os.path.join(self._data_path, 'training/label_2')
def _load_kitti_annotation(self, index):
"""
Load image and bounding boxes info from txt file in the KITTI
format.
WZN: The non-interested area (Dontcare) is just ignored (treated same as background)
"""
if self._image_set == 'test':
return {'ry' : np.array([]),
'lwh' : np.array([]),
'boxes' : np.array([]), #xy box in image
#'boxes_bv' : boxes_bv, #xy box in bird view
'boxes_3D_cam' : np.array([]), #[xyz_center, lwh] in 3D, cam frame
#'boxes_3D' : boxes3D_lidar, #[xyz_center, lwh] in 3D, absolute
'boxes3D_cam_corners' : np.array([]), #8 corners of box in 3D, cam frame
#'boxes_corners' : boxes3D_corners, #8 corners of box in 3D
#'boxes_bv_corners' : boxes_bv_corners, #4 corners of box in bird view
'gt_classes': np.array([]), #classes
'gt_overlaps' : np.array([]), #default 1, changed later
'xyz' : np.array([]),
'alphas' :np.array([]),
'diff_level': np.array([]),
'flipped' : False}
else:
# filename = '$Faster-RCNN_TF/data/KITTI/object/training/label_2/000000.txt'
filename = os.path.join(self.gt_dir, index + '.txt')
# print("Loading: ", filename)
# calib
calib = self._load_kitti_calib(index)
Tr = np.dot(calib['R0'],calib['Tr_velo2cam'])
# print 'Loading: {}'.format(filename)
with open(filename, 'r') as f:
lines = f.readlines()
num_objs = len(lines)
translation = np.zeros((num_objs, 3), dtype=np.float32)
rys = np.zeros((num_objs), dtype=np.float32)
lwh = np.zeros((num_objs, 3), dtype=np.float32)
boxes = np.zeros((num_objs, 4), dtype=np.float32)
boxes_bv = np.zeros((num_objs, 4), dtype=np.float32)
boxes3D = np.zeros((num_objs, 6), dtype=np.float32)
boxes3D_lidar = np.zeros((num_objs, 6), dtype=np.float32)
boxes3D_cam_cnr = np.zeros((num_objs, 24), dtype=np.float32)
boxes3D_corners = np.zeros((num_objs, 24), dtype=np.float32)
alphas = np.zeros((num_objs), dtype=np.float32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# new difficulty level for in training evaluation
diff_level = np.zeros((num_objs), dtype=np.int32)
# print(boxes3D.shape)
# Load object bounding boxes into a data frame.
ix = -1
for line in lines:
obj = line.strip().split(' ')
try:
#WZN.strip() removes white spaces
cls = self._class_to_ind[obj[0].strip()]
# print cls
except:
continue
# ignore objects with undetermined difficult level
level = self._get_obj_level(obj)
if level > 3:
continue
ix += 1
# 0-based coordinates
alpha = float(obj[3])
x1 = float(obj[4])
y1 = float(obj[5])
x2 = float(obj[6])
y2 = float(obj[7])
h = float(obj[8])
w = float(obj[9])
l = float(obj[10])
tx = float(obj[11])
ty = float(obj[12])
tz = float(obj[13])
ry = float(obj[14])
diff_level[ix]=level
if obj[0].strip() == 'Person_sitting':
diff_level[ix]=-1
rys[ix] = ry
lwh[ix, :] = [l, w, h]
alphas[ix] = alpha
translation[ix, :] = [tx, ty, tz]
boxes[ix, :] = [x1, y1, x2, y2]
boxes3D[ix, :] = [tx, ty, tz, l, w, h]
# convert boxes3D cam to 8 corners(cam)
boxes3D_cam_cnr_single = computeCorners3D(boxes3D[ix, :], ry)
boxes3D_cam_cnr[ix, :] = boxes3D_cam_cnr_single.reshape(24)
# convert 8 corners(cam) to 8 corners(lidar)
boxes3D_corners[ix, :] = camera_to_lidar_cnr(boxes3D_cam_cnr_single, Tr)
# convert 8 corners(cam) to lidar boxes3D, note this is not ivertible because we LOSE ry!
boxes3D_lidar[ix, :] = lidar_cnr_to_3d(boxes3D_corners[ix, :], lwh[ix,:])
# convert 8 corners(lidar) to lidar bird view
boxes_bv[ix, :] = lidar_3d_to_bv(boxes3D_lidar[ix, :])
# boxes3D_corners[ix, :] = lidar_to_corners_single(boxes3D_lidar[ix, :])
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
rys.resize(ix+1)
lwh.resize(ix+1, 3)
translation.resize(ix+1, 3)
alphas.resize(ix+1)
boxes.resize(ix+1, 4)
boxes_bv.resize(ix+1, 4)
boxes3D.resize(ix+1, 6)
boxes3D_lidar.resize(ix+1, 6)
boxes3D_cam_cnr.resize(ix+1, 24)
boxes3D_corners.resize(ix+1, 24)
boxes_bv_corners = lidar_cnr_to_bv_cnr(boxes3D_corners)
gt_classes.resize(ix+1)
# print(self.num_classes)
overlaps.resize(ix+1, self.num_classes)
diff_level.resize(ix+1)
# if index == '000142':
# print(index)
# print(overlaps)
overlaps = scipy.sparse.csr_matrix(overlaps)
# if index == '000142':
# print(overlaps)
#if ix>=0:
# print index
return {'ry' : rys,
'lwh' : lwh,
'boxes' : boxes, #xy box in image
#'boxes_bv' : boxes_bv, #xy box in bird view
'boxes_3D_cam' : boxes3D, #[xyz_center, lwh] in 3D, cam frame
#'boxes_3D' : boxes3D_lidar, #[xyz_center, lwh] in 3D, absolute
'boxes3D_cam_corners' : boxes3D_cam_cnr, #8 corners of box in 3D, cam frame
#'boxes_corners' : boxes3D_corners, #8 corners of box in 3D
#'boxes_bv_corners' : boxes_bv_corners, #4 corners of box in bird view
'gt_classes': gt_classes, #classes
'gt_overlaps' : overlaps, #default 1, changed later
'xyz' : translation,
'alphas' :alphas,
'diff_level': diff_level,
'flipped' : False}
def _get_obj_level(self, obj):
height = float(obj[7]) - float(obj[5]) + 1
trucation = float(obj[1])
occlusion = float(obj[2])
if height >= 40 and trucation <= 0.15 and occlusion <= 0:
return 1
elif height >= 25 and trucation <= 0.3 and occlusion <= 1:
return 2
#WZN: changed from <=2 to <2
elif height >= 25 and trucation <= 0.5 and occlusion < 2:
return 3
else:
return 4
def _write_kitti_results_file(self, all_boxes, all_boxes3D):
# use_salt = self.config['use_salt']
# comp_id = ''
# if use_salt:
# comp_id += '{}'.format(os.getpid())
#WZN: only write 2D detection result.
path = os.path.join(datasets.ROOT_DIR, 'kitti/results', 'kitti_' + self._subset + '_' + self._image_set + '_' \
+ '-' + time.strftime('%m-%d-%H-%M-%S',time.localtime(time.time())), 'data')
if os.path.exists(path):
pass
else:
os.makedirs(path)
for im_ind, index in enumerate(self.image_index):
filename = os.path.join(path, index + '.txt')
with open(filename, 'wt') as f:
for cls_ind, cls in enumerate(self.classes):
if cls=='__background__' or cls=='DontCare' or cls=='Person_sitting':
continue
dets = all_boxes[cls_ind][im_ind]
# dets3D = all_boxes3D[cls_ind][im_ind]
# alphas = all_alphas[cls_ind][im_ind]
if dets == []:
continue
# the KITTI server expects 0-based indices
for k in range(dets.shape[0]):
# TODO
# alpha = dets3D[k, 0] - np.arctan2(dets3D[k, 4], dets3D[k, 6])
alpha = 0
# WZN: .lower() changes letters to lower case.
f.write('{:s} -1 -1 {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} -1 -1 -1 -1 -1 -1 -1 -1\n' \
.format(cls.lower(), alpha, \
dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3]))
return path
def _write_kitti_results_bv_file(self, all_2d_boxes, all_ry, all_bv_boxes, calibs, all_scores,result_path=None):
# use_salt = self.config['use_salt']
# comp_id = ''
# if use_salt:
# comp_id += '{}'.format(os.getpid())
#WZN: only write 2D detection result.
if result_path is None:
result_path = 'kitti/results'
else:
result_path = 'kitti/results/'+result_path
path = os.path.join(datasets.ROOT_DIR, result_path, 'kitti_' + self._subset + '_' + self._image_set + '_' \
+ '-' + time.strftime('%m-%d-%H-%M-%S',time.localtime(time.time())), 'data')
if os.path.exists(path):
pass
else:
os.makedirs(path)
print_debug=0
for im_ind, index in enumerate(self.image_index):
filename = os.path.join(path, index + '.txt')
with open(filename, 'wt') as f:
for cls_ind, cls_name in enumerate(self.classes_write):
if cls_name == '__background__':
continue
dets2d = all_2d_boxes[cls_ind][im_ind] # should be [x1,y1,x2,y2]
if dets2d is None:
continue
#print im_ind, len(all_2d_boxes[cls_ind])
rys = all_ry[cls_ind][im_ind]
calib = calibs[im_ind]
scores = all_scores[cls_ind][im_ind].reshape([-1])
R0 = calib[2,:9].reshape((3,3))
Tr_velo2cam = calib[3,:].reshape((3,4))
#print R0, Tr_velo2cam
Tr = np.dot(R0,Tr_velo2cam)
detslidar = bv_anchor_to_lidar(all_bv_boxes[cls_ind][im_ind]) # should be [x,y,z,l,w,h] in lidar
dets_bv_cam = np.zeros((detslidar.shape[0],4))
ry_bv = np.zeros(detslidar.shape[0])
for iry, ry in enumerate(rys):
detscorner = lidar_to_corners_single(detslidar[iry,:],ry) # should be corners in lidar
dets_bv_cam[iry,:],ry_bv[iry] = lidar_cnr_to_camera_bv(detscorner, Tr)
# the KITTI server expects 0-based indices
alpha = 0
k=0
#if print_debug==0:
# print cls_name.lower(), alpha, dets2d[k, 0], dets2d[k, 1], dets2d[k, 2], dets2d[k, 3],dets_bv_cam[k,3],dets_bv_cam[k,2],dets_bv_cam[k,0],dets_bv_cam[k,1],ry_bv[k], scores[k]
# print scores.shape,ry_bv.shape
# print_debug=1
for k in range(dets2d.shape[0]):
# WZN: .lower() changes letters to lower case.
f.write('{:s} -1 -1 {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} -1000 {:.2f} {:.2f} {:.2f} -1000 {:.2f} {:.2f} {:.3f}\n' \
.format(cls_name.lower(), alpha, \
dets2d[k, 0], dets2d[k, 1], dets2d[k, 2], dets2d[k, 3],\
dets_bv_cam[k,3],dets_bv_cam[k,2],dets_bv_cam[k,0],dets_bv_cam[k,1],\
ry_bv[k], scores[k]*1000)) # WZN: *1000 is used in MSCNN, does not change result but makes it more readble
return path
def _write_kitti_results_bv_cnr_file(self, all_2d_boxes, all_ry, all_3d_cnrs, calibs, all_scores,result_path=None):
# use_salt = self.config['use_salt']
# comp_id = ''
# if use_salt:
# comp_id += '{}'.format(os.getpid())
#WZN: only write 2D detection result.
if result_path is None:
result_path = 'kitti/results'
else:
result_path = 'kitti/results/'+result_path
path = os.path.join(datasets.ROOT_DIR, result_path, 'kitti_' + self._subset + '_' + self._image_set + '_' \
+ '-' + time.strftime('%m-%d-%H-%M-%S',time.localtime(time.time())), 'data')
if os.path.exists(path):
pass
else:
os.makedirs(path)
print_debug=0
for im_ind, index in enumerate(self.image_index):
filename = os.path.join(path, index + '.txt')
with open(filename, 'wt') as f:
for cls_ind, cls_name in enumerate(self.classes_write):
if cls_name == '__background__':
continue
dets2d = all_2d_boxes[cls_ind][im_ind] # should be [x1,y1,x2,y2]
if dets2d is None:
continue
#print im_ind, len(all_2d_boxes[cls_ind])
rys = all_ry[cls_ind][im_ind]
calib = calibs[im_ind]
scores = all_scores[cls_ind][im_ind].reshape([-1])
R0 = calib[2,:9].reshape((3,3))
Tr_velo2cam = calib[3,:].reshape((3,4))
#print R0, Tr_velo2cam
Tr = np.dot(R0,Tr_velo2cam)
#detslidar = bv_anchor_to_lidar(all_bv_boxes[cls_ind][im_ind]) # should be [x,y,z,l,w,h] in lidar
detscorners = all_3d_cnrs[cls_ind][im_ind]
dets_bv_cam = np.zeros((detscorners.shape[0],4))
ry_bv = np.zeros(detscorners.shape[0])
for iry, ry in enumerate(rys):
#detscorner = lidar_to_corners_single(detslidar[iry,:],ry) # should be corners in lidar
dets_bv_cam[iry,:],ry_bv[iry] = lidar_cnr_to_camera_bv(detscorners[iry,:], Tr)
# the KITTI server expects 0-based indices
alpha = 0
k=0
#if print_debug==0:
# print cls_name.lower(), alpha, dets2d[k, 0], dets2d[k, 1], dets2d[k, 2], dets2d[k, 3],dets_bv_cam[k,3],dets_bv_cam[k,2],dets_bv_cam[k,0],dets_bv_cam[k,1],ry_bv[k], scores[k]
# print scores.shape,ry_bv.shape
# print_debug=1
for k in range(dets2d.shape[0]):
# WZN: .lower() changes letters to lower case.
f.write('{:s} -1 -1 {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} -1000 {:.2f} {:.2f} {:.2f} -1000 {:.2f} {:.2f} {:.3f}\n' \
.format(cls_name.lower(), alpha, \
dets2d[k, 0], dets2d[k, 1], dets2d[k, 2], dets2d[k, 3],\
dets_bv_cam[k,3],dets_bv_cam[k,2],dets_bv_cam[k,0],dets_bv_cam[k,1],\
ry_bv[k], scores[k]*1000)) # WZN: *1000 is used in MSCNN, does not change result but makes it more readble
return path
def _write_kitti_results_voxel_file(self, all_ry, all_3d_bbox, all_scores,result_path=None):
#WZN: only write 2D detection result. difference is here the corners are already in camera frame
if result_path is None:
result_path = 'kitti/results'
else:
result_path = 'kitti/results/'+result_path
path = os.path.join(datasets.ROOT_DIR, result_path, 'kitti_' + self._subset + '_' + self._image_set + '_' \
+ '-' + time.strftime('%m-%d-%H-%M-%S',time.localtime(time.time())), 'data')
if os.path.exists(path):
pass
else:
os.makedirs(path)
print_debug=0
for im_ind, index in enumerate(self.image_index):
filename = os.path.join(path, index + '.txt')
with open(filename, 'wt') as f:
for cls_ind, cls_name in enumerate(self.classes_write):
if cls_name == '__background__':
continue
rys = all_ry[cls_ind][im_ind]
if rys is None:
continue
dets_3d_bbox = all_3d_bbox[cls_ind][im_ind]
scores = all_scores[cls_ind][im_ind].reshape([-1])
dets_bv_cam = dets_3d_bbox[:,[0,2,3,4]]
#dets2d =
ry_bv = rys
# the KITTI server expects 0-based indices
alpha = 0
k=0
#if print_debug==0:
# print cls_name.lower(), alpha, dets2d[k, 0], dets2d[k, 1], dets2d[k, 2], dets2d[k, 3],dets_bv_cam[k,3],dets_bv_cam[k,2],dets_bv_cam[k,0],dets_bv_cam[k,1],ry_bv[k], scores[k]
# print scores.shape,ry_bv.shape
# print_debug=1
for k in range(ry_bv.shape[0]):
# WZN: .lower() changes letters to lower case.
f.write('{:s} -1 -1 {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} -1000 {:.2f} {:.2f} {:.2f} -1000 {:.2f} {:.2f} {:.3f}\n' \
.format(cls_name.lower(), alpha, \
0,0,100,100,\
dets_bv_cam[k,3],dets_bv_cam[k,2],dets_bv_cam[k,0],dets_bv_cam[k,1],\
ry_bv[k], scores[k]*1000)) # WZN: *1000 is used in MSCNN, does not change result but makes it more readble #dets2d[k, 0], dets2d[k, 1], dets2d[k, 2], dets2d[k, 3],\
return path
def _write_corners_results_file(self, all_boxes, all_boxes3D):
# use_salt = self.config['use_salt']
# comp_id = ''
# if use_salt:
# comp_id += '{}'.format(os.getpid())
#WZN: looks like this is still not usable
path = os.path.join(datasets.ROOT_DIR, 'kitti/results_cnr', 'kitti_' + self._subset + '_' + self._image_set + '_' \
+ '-' + time.strftime('%m-%d-%H-%M-%S',time.localtime(time.time())), 'data')
if os.path.exists(path):
pass
else:
os.makedirs(path)
for im_ind, index in enumerate(self.image_index):
filename = os.path.join(path, index + '.npy')
with open(filename, 'wt') as f:
for cls_ind, cls in enumerate(self.classes_write):
if cls == '__background__':
continue
dets = all_boxes[cls_ind][im_ind]
dets3D = all_boxes3D[cls_ind][im_ind]
# alphas = all_alphas[cls_ind][im_ind]
if dets == []:
continue
# the KITTI server expects 0-based indices
for k in range(dets.shape[0]):
obj = np.hstack((dets[k], dets3D[k, 1:]))
# print obj.shape
np.save(filename, obj)
# # TODO
# alpha = dets3D[k, 0] - np.arctan2(dets3D[k, 4], dets3D[k, 6])
# f.write('{:s} -1 -1 {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.3f}\n' \
# .format(cls.lower(), alpha, \
# dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], \
# dets3D[k, 2], dets3D[k, 3], dets3D[k, 1], \
# dets3D[k, 4], dets3D[k, 5], dets3D[k, 6], dets3D[k, 0], dets[k, 4]))
print ('Done')
# return path
def _do_eval(self, path, output_dir='output'):
#WZN: do 2D evaluation
cmd = os.path.join(datasets.ROOT_DIR, 'kitti/eval/cpp/evaluate_object {}'.format(os.path.dirname(path)))
print('Running:\n{}'.format(cmd))
status = subprocess.call(cmd, shell=True)
def _do_eval_bv(self, path, output_dir='output'):
#WZN: do 2D evaluation
cmd = os.path.join(datasets.ROOT_DIR, 'kitti/eval/cpp/evaluate_bv {} {} {}'.format(self.gt_dir,os.path.dirname(path),self.list_dir))
print('Running:\n{}'.format(cmd))
status = subprocess.call(cmd, shell=True)
def evaluate_detections(self, all_boxes, all_boxes3D, output_dir):
#WZN: call write result and 2D evaluation, no more fancy things
self._write_kitti_results_file(all_boxes, all_boxes3D)
# path = self._write_kitti_results_file(all_boxes, all_boxes3D)
# if self._image_set != 'test':
# self._do_eval(path)
# multiple threshold to get PR-curve
def _do_validation_bv(self,boxes_bv,gt_blob,scores=None,thres=0.5,ignore=0.05,DEBUG=False):
diff_level = gt_blob['diff_level']
#ignored_height = gt_blob['ignored_height'] #same as cpp for eval (40,25,25 in pixels, but here transformed)
#the processed bv_boxes, first we only do eval here because polygon intersection is not easy
#diff_level is the difficulty of ground truth in KITTI, should be the same as gt_box. {-1,0,1,2}, -1 can be ignored
positive_ind = gt_blob['gt_boxes_bv'][:,4]>0
diff_level = diff_level[positive_ind]
#print diff_level.T
gt_bv = gt_blob['gt_boxes_bv'][positive_ind,0:4]
bbox_bv = boxes_bv[:,0:4]
#filter low scores
assert not(scores is None), 'no score to produce PR-curve'
scores = np.reshape(scores,[-1])
bbox_bv = bbox_bv[scores>ignore,:]
scores = scores[scores>ignore]
#print scores.shape, scores.size , gt_bv.shape
##sort so that we can accumulately calculate
#ind_sort = np.argsort(scores)
if scores.size>0 and gt_bv.shape[0]>0:
overlaps_all = bbox_overlaps(
np.ascontiguousarray(bbox_bv, dtype=np.float),
np.ascontiguousarray(gt_bv, dtype=np.float))
else:
overlaps_all = np.zeros([scores.size,gt_bv.shape[0]])
t_score_range = np.arange(0.04,0.87,0.02)
nt = t_score_range.shape[0]
recalls = np.zeros((nt,3))
precisions = np.zeros((nt,3))
gt_nums = np.zeros((nt,3))
pos_nums = np.zeros((nt,3))
for diff in [1,2,3]:
idiff = diff-1
ind_diff = np.logical_and(diff_level>0,diff_level<=diff)
for it in range(nt):
t_score = t_score_range[it]
ind_score = scores>t_score
scores_above = scores[ind_score]
overlaps = overlaps_all[ind_score,:]
if scores_above.shape[0]==0:
tp = 0
fp = 0
if gt_bv[ind_diff,:].shape[0]>0:
fn = np.sum(ind_diff)
#return 0.0,0.0,gt_bv.shape[0],0
#recall=0.0; precision=0.0; gt_num=gt_bv.shape[0]; pos_num=0
else:
fn = 0
#return 0.0,0.0,0,0
#recall=0.0; precision=0.0; gt_num=0; pos_num=0
elif gt_bv.shape[0]==0:
tp = 0
fn = 0
fp = bbox_bv.shape[0]
else:
# NOTE this is looser than actual eval!!
argmax_overlaps = overlaps.argmax(axis=1)
max_overlaps = overlaps[np.arange(overlaps.shape[0]), argmax_overlaps]
ind1 = max_overlaps<thres #if <0.5, definitely false positive
#ind2 = a #if >2 positive for one gt, it's fp but now ignore that because we have very low NMS thre
fp = np.sum(ind1)
if gt_bv[ind_diff,:].shape[0]==0:
tp = 0
fn = 0
#return 0.0,0.0,0,fp
#recall=0.0; precision=0.0; gt_num=0; pos_num=fp
else:
#argmax_overlaps = overlaps.argmax(axis=1)
gt_argmax_overlaps = overlaps[:,ind_diff].argmax(axis=0)
gt_max_overlaps = overlaps[:,ind_diff][gt_argmax_overlaps,
np.arange(overlaps[:,ind_diff].shape[1])]
if DEBUG:
#print 'prop_max_overlaps:',overlaps[np.arange(overlaps.shape[0]), argmax_overlaps]
print ('gt_max_overlaps:', gt_max_overlaps)
print (gt_max_overlaps>=thres)
print (np.sum(gt_max_overlaps>=thres))
tp = np.sum(gt_max_overlaps>=thres)
fn = np.sum(gt_max_overlaps<thres)
gt_num = tp+fn
pos_num = tp+fp
if gt_num==0:
recall = 1
else:
recall = float(tp)/gt_num
if pos_num==0:
precision = 1
else:
precision = float(tp)/pos_num
recalls[it,idiff] = recall
precisions[it,idiff] = precision
gt_nums[it,idiff] = gt_num
pos_nums[it,idiff] = pos_num
##the unprocessed 3d_corners project to bv
#gt_cnr = gt_blob['gt_boxes_corners']
return recalls,precisions,gt_nums,pos_nums
def _calc_AP(self,recalls,precisions,plot_file=None):
legends = ['Easy','Moderate','Hard']
if len(recalls.shape)==1:
ind_sort = np.argsort(recalls)
recalls = recalls[ind_sort]
precisions = precisions[ind_sort]
delta_recalls = recalls-np.hstack((0,recalls[0:-1]))
AP = np.sum(delta_recalls*precisions)
if not(plot_file is None):
plt.plot(recall,precision)
plt.xlabel('recall')
plt.ylabel('precision')
plt.savefig(plot_file)
plt.close()
else:
AP = np.zeros(recalls.shape[1])
for j in range(recalls.shape[1]):
ind_sort = np.argsort(recalls[:,j])
recalls_j = recalls[ind_sort,j]
precisions_j = precisions[ind_sort,j]
delta_recalls = recalls_j-np.hstack((0,recalls_j[0:-1]))
AP[j] = np.sum(delta_recalls*precisions_j)
if not(plot_file is None):
plt.plot(np.hstack((0,recalls_j,recalls_j[-1])),np.hstack((precisions_j[0],precisions_j,0)),label=legends[j])
#plt.hold(True)
plt.xlabel('recall')
plt.xlim((0.0,1.0))
plt.ylabel('precision')
plt.ylim((0.0,1.0))
plt.legend()
plt.savefig(plot_file)
plt.close()
return AP
''' one threshold
def _do_validation_bv(self,boxes_bv,gt_blob,scores=None,thres=0.5,ignore=0.2,DEBUG=False):
#the processed bv_boxes, first we only do eval here because polygon intersection is not easy
positive_ind = gt_blob['gt_boxes_bv'][:,4]>0
gt_bv = gt_blob['gt_boxes_bv'][positive_ind,0:4]
bbox_bv = boxes_bv[:,0:4]
#filter low scores
if scores != None:
bbox_bv = bbox_bv[scores>ignore,:]
if bbox_bv.shape[0]==0:
tp = 0
fp = 0
if gt_bv.shape[0]>0:
return 0.0,0.0,gt_bv.shape[0],0
else:
return 0.0,0.0,0,0
elif gt_bv.shape[0]==0:
tp = 0
fp = bbox_bv.shape[0]
fn = 0
return 0.0,0.0,0,fp
else:
overlaps = bbox_overlaps(
np.ascontiguousarray(bbox_bv, dtype=np.float),
np.ascontiguousarray(gt_bv, dtype=np.float))
argmax_overlaps = overlaps.argmax(axis=1)
gt_argmax_overlaps = overlaps.argmax(axis=0)
gt_max_overlaps = overlaps[gt_argmax_overlaps,
np.arange(overlaps.shape[1])]
if DEBUG:
print 'prop_max_overlaps:',overlaps[np.arange(overlaps.shape[0]), argmax_overlaps]
print 'gt_max_overlaps:', gt_max_overlaps
print gt_max_overlaps>=thres
print np.sum(gt_max_overlaps>=thres)
tp = np.sum(gt_max_overlaps>=thres)
fn = np.sum(gt_max_overlaps<thres)
fp = bbox_bv.shape[0]-tp
gt_num = tp+fn
pos_num = tp+fp
recall = float(tp)/gt_num
precision = float(tp)/pos_num
#the unprocessed 3d_corners project to bv
gt_cnr = gt_blob['gt_boxes_corners']
return recall,precision,gt_num,pos_num
'''
if __name__ == '__main__':
d = datasets.kitti_mv3d('train')
res = d.roidb
from IPython import embed; embed()
| [
"kiwoo.shin@berkeley.edu"
] | kiwoo.shin@berkeley.edu |
5139804e41100a6589ddaa48f989cb0aab6176c5 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_309/ch28_2020_03_24_22_36_16_011013.py | 793207e861aac9f5ffad8b0007ec0a9447743a06 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | exp = 0
soma = 1
while exp < 100:
conta = 1/2**exp
#soma += conta
exp += 1
print (soma)
| [
"you@example.com"
] | you@example.com |
36cf07b7a0737f747f982d2420f144ed52f70a03 | 6fabb792f6c1bfceb6df4ba2573f012f65b3a8a0 | /lib/python3.6/_weakrefset.py | ef17e0499c65b54f765c7193480e7488b9b2a0e5 | [] | no_license | yash0311/hotel | 420a682e290a4921811caa0fa955af41a52457d0 | d1840a0636a513e9f744f6e187624b0df1af74c3 | refs/heads/master | 2020-03-21T13:42:13.144694 | 2018-06-25T16:23:26 | 2018-06-25T16:23:26 | 138,620,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | /home/yash0311/anaconda3/lib/python3.6/_weakrefset.py | [
"yprajapati0311@gmail.com"
] | yprajapati0311@gmail.com |
b01429d6519103eadc9dcdee1d1f4258382aa881 | 5d7c30cb46435ffbbf16980e5ab2732711ac6f71 | /scripts/dedup_training_data.py | 8018a87d2c8f77354b7ce2c8f5cfc11536f585c5 | [
"CC-BY-SA-3.0",
"Apache-2.0",
"MIT"
] | permissive | mhagiwara/nanigonet | d5b25e64312376a62d6db08435649736eac17745 | e6b5db34a263aeb17c5647768d401837de378f6f | refs/heads/master | 2023-05-24T09:21:16.215974 | 2020-06-19T04:29:01 | 2020-06-19T04:29:01 | 211,418,641 | 68 | 9 | MIT | 2023-05-22T22:34:54 | 2019-09-27T23:54:45 | Python | UTF-8 | Python | false | false | 2,087 | py | from nanigonet.language_info import LanguageInfo
import os
import random
import sys
from pathlib import Path
TRAIN_DIR = Path('data/train')
def get_num_lines_from_rank(rank):
if not rank:
return 2000
rank = int(rank)
if rank <= 10:
return 6000
elif 10 < rank <= 20:
return 3000
def get_deduped_lines(file_path, num_lines=1000):
lines = set()
with open(file_path) as f:
for line in f:
line = line.strip()
if not line:
continue
if len(line) > 1024:
continue
lines.add(line)
lines = list(lines)
random.shuffle(lines)
lines = lines[:num_lines]
return lines
def main():
random.seed(1)
for info in LanguageInfo.values():
num_lines = get_num_lines_from_rank(info['rank'])
print(f"Creating training data for {info['id']}. num_lines={num_lines}", file=sys.stderr)
if info['type'] == 'h':
target_dir = TRAIN_DIR / info['id']
else:
target_dir = TRAIN_DIR / f"p-{info['id']}"
if not os.path.exists(target_dir):
print(f"Directory for {info['id']} does not exist. Skipping.", file=sys.stderr)
continue
all_lines = []
tatoeba_path = target_dir / 'tatoeba.txt'
if os.path.exists(tatoeba_path):
new_lines = get_deduped_lines(tatoeba_path, num_lines=num_lines)
all_lines.extend(new_lines)
w2c_path = target_dir / 'w2c.txt'
if os.path.exists(w2c_path):
new_lines = get_deduped_lines(w2c_path, num_lines=num_lines)
all_lines.extend(new_lines)
github_path = target_dir / 'github.small.txt'
if os.path.exists(github_path):
new_lines = get_deduped_lines(github_path, num_lines=num_lines)
all_lines.extend(new_lines)
with open(target_dir / 'combined.txt', mode='w') as f:
for line in all_lines:
f.write(line)
f.write('\n')
if __name__ == '__main__':
main()
| [
"hagisan@gmail.com"
] | hagisan@gmail.com |
ce4b054d55f0ee2456443948b49fb82e169bc81e | 1971a8177b2cf1990c1bedce797a40184de491e5 | /ui_process.py | 9304845f520129f2287051704b9cd03abe65499c | [] | no_license | shivprasadk97/Webpage-generator | 0bb60c0e928c52d48106540a0513bb070d148694 | 07917128a8fd3ef90c82e368285a8c4249d86407 | refs/heads/master | 2022-08-15T17:44:45.301149 | 2020-05-25T14:29:01 | 2020-05-25T14:29:01 | 266,798,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,483 | py | #!C:\Python\python
import cgi,cgitb
f = open("company.txt","w")
form = cgi.FieldStorage()
# Fetching all info
company_name = str(form.getvalue("company_name"))
home_para_body = str(form.getvalue("home_para_body"))
footer = str(form.getvalue("footer"))
about_para = str(form.getvalue("about_para"))
# services
service1 = str(form.getvalue("service_1"))
service1_desc = str(form.getvalue("service_1_desc"))
service2 = str(form.getvalue("service_2"))
service2_desc = str(form.getvalue("service_2_desc"))
service3 = str(form.getvalue("service_3"))
service3_desc = str(form.getvalue("service_3_desc"))
# services end
# contact us
street_name = str(form.getvalue("street_name"))
city_name = str(form.getvalue("city_name"))
state_name = str(form.getvalue("state_name"))
pin_code = str(form.getvalue("pin_code"))
address = str(form.getvalue("address"))
phone= str(form.getvalue("phone"))
mail = str(form.getvalue("mail"))
# social media
fb = str(form.getvalue("facebook_link"))
twitter = str(form.getvalue("twitter_link"))
gplus = str(form.getvalue("google_link"))
#End of fetching all info
#writing home
f.write("HOME###")
f.write("TITLE|"+company_name+"###")
f.write("PARA|WHAT WE DO!!!|"+home_para_body+"###")
f.write("FOOTER|"+footer+"###\n")
#writing home end
#writing about page
f.write("ABOUT###")
f.write("PARA|ABOUT US|"+about_para+"###\n")
#writing about page end
# services page
f.write("SERVICES###")
f.write("SERVICE1|"+service1+"|"+service1_desc+"###")
f.write("SERVICE2|"+service2+"|"+service2_desc+"###")
f.write("SERVICE3|"+service3+"|"+service3_desc+"###\n")
#writing contact us page
f.write("CONTACT###")
f.write("ADDRESS|"+street_name+"|"+city_name+"|"+state_name+"|"+pin_code+"###")
f.write("PHONE|"+phone+"###")
f.write("MAIL|"+mail+"###\n")
# contact us page ends
# social media
f.write("SOCIAL###")
if(fb!="None"):
f.write("FB|"+fb+"###")
if(gplus!="None"):
f.write("GPLUS|"+gplus+"###")
f.write("TWITTER|"+twitter+"###")
f.close()
# writing contact us page end
print("Content-type: text/html")
print()
full_html= '''<!DOCTYPE html>
<html>
<head>
<!--Import Google Icon Font-->
<link href="https://fonts.googleapis.com/icon?family=Material+Icons" rel="stylesheet">
<link href="https://fonts.googleapis.com/css?family=Do+Hyeon|Gugi" rel="stylesheet">
<!--Import materialize.css-->
<!-- Compiled and minified CSS -->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/1.0.0-beta/css/materialize.min.css">
<!-- Style -->
<style>
body{
font-family: 'Gugi', cursive;
font-size:30px;
}
h1,h2,h3,h4{
font-size:50px;
font-family: 'Do Hyeon', sans-serif;
}
h1{
font-size: 75px;
}
</style>
<title>Success</title>
<!--Let browser know website is optimized for mobile-->
<meta name="viewport" content="width=device-width, initial-scale=1.0"/>
</head>
<body class="black">
<div class="container">
<h1 class="indigo-text text-darken-1 center-align"> Hurray<span class="blue-text text-darken-2">!!!! </h1>
<h2 class="purple-text center-align">
Your File is ready and saved in <span class="red-text">"company.txt"</span>
</h2>
<div class="center-align">
<a class="btn waves-effect waves-light btn-large" href="http://localhost/cgi-bin/FS/page_view.py" target="_blank">Click here to view it
</a>
</div>
</div>
<hr>
<hr>
<div class=" container center-align">
<h2 class="white-text">Click the following button to start processing it into html files</h2>
<a class="waves-effect waves-light btn-large blue" href="http://localhost/cgi-bin/FS/process_page.py" target="_blank">Process it</a>
</div>
</div>
<br>
<hr>
<!--JavaScript at end of body for optimized loading-->
<!-- Compiled and minified JavaScript -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/1.0.0-beta/js/materialize.min.js"></script>
</body>
</html>
'''
print(full_html)
# <a class="waves-effect waves-light btn-large red" href="http://localhost/cgi-bin/FS/editor.py" target="_blank">CLICK HERE TO EDIT IT</a> | [
"noreply@github.com"
] | shivprasadk97.noreply@github.com |
598c8438b98186597ca8f5924df30968ff7176fb | 62e40e9384bbe29d2942b04a6f39c534e82e8e2d | /projects/views.py | dc820eff750571ff5cb5cdbd42b20a5cb13cfaaa | [] | no_license | regnald205/Terry_Project | f421784e7a1032451086e29231a2d0c9378bd12d | 279d641d7a6a304854c6efec328386306718d4c0 | refs/heads/master | 2020-09-11T17:19:19.942886 | 2019-11-16T17:42:13 | 2019-11-16T17:42:13 | 222,079,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,154 | py | from django.shortcuts import render,get_object_or_404
from .models import Projects
from django.core.paginator import EmptyPage,PageNotAnInteger,Paginator
from .choices import prices_choices,bedroom_choices
# Create your views here.
def index(request):
#if you want to order it by date
#FILTER function is used to specify which data may be displayed
projects=Projects.objects.order_by('list_date').filter(is_published=True)
paginator=Paginator(projects,6)#Paginator function it carry two values one is from models define and other is page number
page=request.GET.get('page')
paged_projects=paginator.get_page(page)
context={
'projects': paged_projects
}
return render(request,'projects/projects.html',context)
def search(request):
queryset_list=Projects.objects.order_by('-list_date')
#for keywords search
if 'keywords' in request.GET:
keywords=request.GET['keywords']
#make sure keywords is not an empty string
if keywords:
queryset_list=queryset_list.filter(description__icontains=keywords)
#search for city
if 'city' in request.GET:
city=request.GET['city']
if city:
queryset_list=queryset_list.filter(city__iexact=city)#"i" for case sensitive
#search for bedrooms
if 'bedrooms' in request.GET:
bedrooms=request.GET['bedrooms']
if bedrooms:
queryset_list=queryset_list.filter(bedrooms__lte=bedrooms)
#search for price
if 'price' in request.GET:
price=request.GET['price']
if price:
queryset_list=queryset_list.filter(price__iexact=price)#"i" for case sensitive
context={
'bedroom_choices': bedroom_choices,
'prices_choices': prices_choices,
'projects': queryset_list,
'values': request.GET
}
return render(request,'projects/search.html',context)
def project(request,project_id):
project = get_object_or_404(Projects,pk=project_id)
context = {
'project':project
}
return render(request,'projects/project.html',context)
| [
"terryabraham84@gmail.com"
] | terryabraham84@gmail.com |
96de1d2dae6890d9e2a24035c4ed2c25de752016 | aa059a2f4cd520f0052355a4dee15fd58af6daab | /adventure.py | 786e0878f37d4053654b9e08f5ed58fe8639bb29 | [] | no_license | NielMc/adventure | 635fa31a2972283d8df181f25c021c928dc60a8b | 4357548878640e70e9a36660d862522f295b044b | refs/heads/master | 2021-01-12T14:27:35.223932 | 2016-10-05T15:33:14 | 2016-10-05T15:33:14 | 70,068,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | from data import locations
directions = {
'west': (-1, 0),
'east': (1, 0),
'north': (0, -1),
'south': (0, 1),
}
position = (0, 0)
while True:
location = locations[position]
print 'you are at the %s' % location
valid_directions = {}
for k, v in directions.iteritems():
possible_position = (position[0] + v[0], position[1] + v[1])
possible_location = locations.get(possible_position)
if possible_location:
print 'to the %s is a %s' % (k, possible_location)
valid_directions[k] = possible_position
direction = raw_input('which direction do you want to go?\n')
newPosition = valid_directions.get(direction)
if newPosition:
newPosition == position
else:
print "This is not a valid direction" | [
"nielmce@gmail.com"
] | nielmce@gmail.com |
93e6c18ec2e6248b110d4e7f3e5bbcd4a8b891d9 | 20975c996939d2981f1e44061f49a579dfa71e3e | /client/network.py | a1e545d4d1be100a509e747ba739e01cde1b2b0b | [] | no_license | x0152/ping_pong | 5c7b215da4ce47d2a9595228f09fd03ae13a9c53 | ec8b72809707cec1312f768b2a25b65eb065296f | refs/heads/master | 2020-05-17T15:40:58.270428 | 2019-08-07T19:50:53 | 2019-08-07T19:50:53 | 183,797,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | import requests
import json
class Network:
def __init__(self):
self.key = ""
def registration(self):
r = requests.post("http://127.0.0.1:8080/registration")
if r.status_code == 200:
setting = r.json()
self.key = setting["Key"]
return setting, True
else:
return setting, False
def send_request(self, mouse_pos_x, mouse_pos_y):
resp = requests.post("http://127.0.0.1:8080/handle", data={"x" : mouse_pos_x, "y": mouse_pos_y, "key" : self.key})
if resp.status_code == 200:
return resp.json(), True
return [], False
| [
"sunkencityr.yeh@gmail.com"
] | sunkencityr.yeh@gmail.com |
4b5d02ec60cd10a15ec51fbd3a7661aada867c08 | 68b4b70cbcfc4d489eccdf9a9b173a4702452dce | /date&time.py | 460afac7a4739c9faa6c579ac2ac91727d66a960 | [] | no_license | Monsteryogi/Python | 2083fb7225ee4baf2bf7fed779a8c832c3633ffd | b9db013ba95824cc7c40f33b12f0edc4ecf82086 | refs/heads/master | 2021-04-09T11:22:38.991921 | 2020-02-06T09:38:41 | 2020-02-06T09:38:41 | 125,476,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | #date and time in various format
from datetime import datetime #importing module datetime
print("dd/mm/yyyy and hh/mm/ss")
now= datetime.now()
print ( "Date: " + '%s/%s/%s' %(now.day,now.month,now.year))
print ( "Time: " + '%s:%s:%s' %(now.hour,now.minute,now.second))
print("\nyyyy-dd-mm and hh/mm/ss")
print ( "Date: " + '%s-%s-%s' %(now.year,now.day,now.month))
print ( "Time: " + '%s:%s:%s' %(now.hour,now.minute,now.second))
| [
"noreply@github.com"
] | Monsteryogi.noreply@github.com |
54c5a5c4de2870c5d31c35e9ca40f07e6ce8acbf | f07d244215051c9c9a22fbabe204e514908a07b4 | /Mugen/characters/p2/example/example char.py | 83ba33ff95be62404f082f6f078d12906e5986f2 | [
"MIT"
] | permissive | KeithCissell/BUGEN-Fighter-Game | a1fad1550ebb07f00c3389937afd6db2bee5d581 | 7551d5786b87bfacdebbaecb76cdf3fcdfb66ae4 | refs/heads/master | 2020-03-27T16:55:12.304122 | 2018-10-02T22:16:47 | 2018-10-02T22:16:47 | 146,815,738 | 2 | 0 | MIT | 2018-10-02T22:16:48 | 2018-08-30T23:04:39 | Python | UTF-8 | Python | false | false | 1,062 | py | import arcade
class example(arcade.Sprite):
""" Class to represent a character on the screen """
def __init__(self):
""" Initialize our character variables """
#initialize the char
#load the sprites into spritelists
# i.e. self.player_sprite = arcade.Sprite("sprites/character.png", SPRITE_SCALING_PLAYER)
#load character sounds
#load caracter effects
def update(self):
"""
Description: This function updates the char for the game canvas.
"""
pass
def draw(self):
"""
Description: This function draws the character.
"""
pass
def move(self):
"""
Description: This function gets called whenever a directional button was pushed.
"""
pass
def action(self):
"""
Description: This function is called whenever an action button was pressed.
"""
pass
def collision(self):
""" detect collisions"""
pass
| [
"thydnguyen@gmail.com"
] | thydnguyen@gmail.com |
7f291f249ebd483bdcedea8fc41556a26d0160d0 | 1e4a9bee001ac1acff398e9c6354ad5703712d2f | /StudyPortal/test.py | 79516ab6fcbe60a531c92c45cd62ada721d610f0 | [] | no_license | currysunxu/SeleneUI | 47fd9cf367ddaab07d6d2863d6ad515b2985f83f | 828ed27a139c5b04bc3543b26890f2149c93cfc3 | refs/heads/main | 2023-07-16T22:24:54.851317 | 2021-08-24T08:25:17 | 2021-08-24T08:25:17 | 397,515,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | from ptest.decorator import TestClass, Test
from selene import by, config
import time
from StudyPortal.pages.LoginPage import LoginPage
from StudyPortal.pages.PtReviewPage import PtReviewPage
from StudyPortal.pages.HomePage import HomePage
@TestClass()
class test():
@Test(data_provider=[("Take Test","You can take the test now."),("Continue","You can continue your test."),("View Result","")])
def test_whole_flow(self):
print("hello world")
login = LoginPage()
login.openWebSite()
login.loginPortal()
home = HomePage()
home.open_ptreview()
pt_review = PtReviewPage()
pt_review.switch_specific_book("D")
pt_review.check_before_test_text()
time.sleep(3)
@Test(data_provider=[("Take Test","You can take the test now."),("Continue","You can continue your test."),("View Result","")])
def test_whole_flow1(self,status,desc):
print(status)
print(desc)
| [
"curry.sun@ef.com"
] | curry.sun@ef.com |
125e57cfd05bc521eb93190033bb529a1457555c | 510f228c70df9d2c4b83cf3853422ceac95d4ae0 | /sasc.py | 77cc934ed81090df944aec224d0467705ff70582 | [] | no_license | rodrigowue/SASC | dc3084adbf27559252c10e17943d07cb8f051bfb | 737d86a918470cc2b4b259949ab3bfc46fa51542 | refs/heads/master | 2021-06-26T21:44:32.981325 | 2020-12-03T18:09:52 | 2020-12-03T18:09:52 | 188,244,136 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,774 | py | import re
import sys
import networkx as nx
import matplotlib.pyplot as plt
def print_gmelogo():
print("============================================================");
print(" .:-. `..--.`` . . ........");
print(" . .--/+++` -++/---:++/. // -+` /+//////");
print(" `:. `-/++:. `/+: .:` .++: -++- /+. ");
print(" /- +++- :+: /+/+- -+/++ /+:-----");
print(" `/: /++/ /+- .+++++/ `+/ :+- `++`:+. /+-.....");
print("-++:.` `+++- .++` :+: -+- /+-+/` .+/ /+. ");
print("++:-. `/+++` .++-.` ``-/+: /+` `/++` ++` /+.`````");
print("/++:` `.:/++++: `-:/+/+//-` `/: `/. -/. :///////");
print(" -/++/+++++/-` ");
print(" ./++:.` ");
print("============================================================");
def is_empty(any_structure):
if any_structure:
return False
else:
return True
class Transistor:
def __init__(self, name,source,gate,drain,bulk,ttype,wsize,fingers,lsize):
self.name = name;
self.source = source;
self.gate = gate;
self.drain = drain;
self.bulk = bulk;
self.ttype = ttype;
self.wsize = wsize;
self.fingers = fingers;
self.lsize = lsize;
self.stack = 1;
#===============================================
#Methods for Attribute Manipulation
#===============================================
def get_name(self):
return self.name;
def set_name(self, name):
self.name = name;
def get_source(self):
return self.source;
def set_source(self,source):
self.source = source;
def get_gate(self):
return self.gate;
def set_gate(self,gate):
self.gate = gate;
def get_drain(self):
return self.drain;
def set_drain(self,drain):
self.drain = drain;
def get_bulk(self):
return self.bulk;
def set_bulk(self,bulk):
self.bulk = bulk;
def get_ttype(self):
return self.ttype;
def set_ttype(self,ttype):
self.ttype = ttype;
def get_wsize(self):
return self.wsize;
def set_wsize(self,wsize):
self.wsize = wsize;
def get_fingers(self):
return self.fingers;
def set_fingers(self,fingers):
self.fingers = fingers;
def get_lsize(self):
return self.lsize;
def set_lsize(self,lsize):
self.lsize = lsize;
def get_stack(self):
return self.stack;
def set_stack(self, stack):
self.stack = stack;
def main():
print("============================================================");
print("SPICE PARSER AND AUTOMATIC STACK CALCULATOR");
print("============================================================");
print("By: Rodrigo N. Wuerdig");
print("Contact: rnwuerdig@inf.ufrgs.br");
print_gmelogo();
#===========================================================================
#Fetch Args
#===========================================================================
if (sys.argv[1]==None):
print("WARNING: ARG1 SHOULD CONTAIN WP/WN RATIO");
wpwn_ratio=2.0;
else:
wpwn_ratio=float(sys.argv[1]);
if (sys.argv[2]==None):
print("ERROR: ARG2 SHOULD CONTAIN SPICE FILE");
return -1;
else:
file = sys.argv[2];
#===========================================================================
#Open Spice File
#===========================================================================
f=open(file,"r");
ntransistors=[]; #Start List for N Transistors
ptransistors=[]; #Start List for P Transistors
inputs=[]; #Start List for Inputs
outputs=[]; #Start List for Output Nodes
for line in f:
newline = line.rstrip('\n');
#Check if the line starts with *.pininfo
if "*.pininfo" in newline.lower():
#Fetches Outputs from the pininfo line
outpins = re.findall('[a-zA-Z0-9]*:[Oo]', newline);
#Fetches Inputs from the pininfo line
inpins = re.findall('[a-zA-Z0-9]*:[Ii]', newline);
#Fetches Vdd pin from the pininfo line
vddpin = str(re.search('[a-zA-Z0-9]*:[Pp]', newline)[0]);
#Fetches Gnd pin from the pininfo line
gndpin = str(re.search('[a-zA-Z0-9]*:[Gg]', newline)[0]);
#Check if its missing output pins
if is_empty(outpins):
print("pattern not found outputs");
else:
for out in outpins:
print("Output Pins:",out);
outputs.append(out.replace(':O',''));
#Check if its missing output pins
if is_empty(inpins):
print("pattern not found outputs");
else:
for in_pin in inpins:
print("input Pins:",in_pin);
inputs.append(in_pin.replace(':O',''));
#Check if its missing vdd pins
if is_empty(vddpin):
print("pattern not found outputs");
return -3;
else:
print("Circuit Supply Pin:", vddpin);
vddpin=vddpin.replace(':P','');
#Check if its missing gnd pins
if is_empty(gndpin):
print("pattern not found outputs");
return -3;
else:
print("Circuit Ground Pin:", gndpin);
gndpin=gndpin.replace(':G','');
#===========================================================================
#Transistor Lines
elif ("pch" in newline.lower()) or ("nch" in newline.lower()):
print("\n=========================")
name = newline.split()[0];
print("Name:",name);
source = newline.split()[1];
print("Source:",source);
gate = newline.split()[2];
print("Gate:",gate);
drain = newline.split()[3];
print("Drain:",drain);
bulk = newline.split()[4];
print("Bulk:",bulk);
ttype = newline.split()[5];
print("Type:",ttype);
wsize = re.findall('[Ww]=[0-9Ee]*.[0-9Ee]*[\-+0-9]*', newline);
if is_empty(wsize):
print("pattern not found W size")
else:
wsize = wsize[0].replace('w=','');
wsize = wsize.replace('W=','');
wsize = float(wsize);
print("W Size:",wsize);
lsize = re.findall('[Ll]=[0-9Ee]*.[0-9Ee]*[\-+0-9]*', newline);
if is_empty(lsize):
print("pattern not found L Size")
else:
lsize = lsize[0].replace('l=','')
lsize = lsize.replace('L=','')
lsize = float(lsize);
print("L Size:",lsize);
fingers = re.findall('nf=[0-9]*', newline.lower());
if is_empty(fingers):
print("pattern not found: Number of Fingers")
fingers=1;
else:
fingers = fingers[0].replace('nf=','')
fingers = fingers.replace('NF=','')
fingers = int(fingers);
print("Fingers:",fingers);
if (ttype.lower()=="pch"):
mos = Transistor(name,source,gate,drain,bulk,ttype,wsize,fingers,lsize);
ptransistors.append(mos);
elif (ttype.lower()=="nch"):
mos = Transistor(name,source,gate,drain,bulk,ttype,wsize,fingers,lsize);
ntransistors.append(mos);
f.close();
#===========================================================================
#Prints Number of Fetched Transistors
#===========================================================================
print("\n\n============================================================");
print("The Circuit Contains:");
print("PMOS TRANSISTORS", len(ptransistors));
print("NMOS TRANSISTORS", len(ntransistors));
print("\n\n============================================================");
#===========================================================================
#Creates Networkx Node Graph and Include Nodes
#===========================================================================
G=nx.Graph(); #Creates an graph called G
color_map=[]; #list that will define node colors
node_size=[]; #list that will define node sizes
#-----------------------------------------
#Searches Nodes and Color them
#-----------------------------------------
G.add_node(vddpin); #create vdd node
color_map.append('green');
node_size.append(2000);
G.add_node(gndpin); #create gnd node
color_map.append('green');
node_size.append(2000);
for outpin in outputs:
G.add_node(outpin);
color_map.append('magenta');
node_size.append(1000)
for n in ptransistors:
G.add_node(n.get_name());
color_map.append('red');
node_size.append(500);
for n in ntransistors:
G.add_node(n.get_name());
color_map.append('blue');
node_size.append(500);
for n in ptransistors:
G.add_edge(n.get_name(),n.get_source());
color_map.append('yellow');
node_size.append(100);
G.add_edge(n.get_name(),n.get_drain());
color_map.append('yellow');
node_size.append(100);
for n in ntransistors:
G.add_edge(n.get_name(),n.get_source());
color_map.append('yellow');
node_size.append(100);
G.add_edge(n.get_name(),n.get_drain());
color_map.append('yellow');
node_size.append(100);
#===========================================================================
#Fetches Common Nodes
#===========================================================================
common_nodes=[];
for n in ntransistors:
for p in ptransistors:
if (n.get_drain()==p.get_drain()):
common_nodes.append(n.get_drain());
elif (n.get_drain()==p.get_source()):
common_nodes.append(n.get_drain());
elif (n.get_source()==p.get_drain()):
common_nodes.append(n.get_source());
elif (n.get_source()==p.get_source()):
common_nodes.append(n.get_source());
common_nodes = list(dict.fromkeys(common_nodes));
#===========================================================================
#Searches Euler Paths from COMMON_NODE to VDD
#===========================================================================
for common_node in common_nodes:
print("PATH FROM",common_node ,"TO",vddpin);
print("============================================================");
for path in nx.all_simple_paths(G, source=common_node, target=vddpin):
nodes_path_p=[];
stack=0;
if not(gndpin) in path:
print("Full Path:", path);
for node in ptransistors:
if node.get_name() in path:
stack=stack+1;
nodes_path_p.append(node);
for node in nodes_path_p:
if node.get_stack()<stack:
node.set_stack(stack);
print("Stack Size =", stack);
print("============================================================");
#===========================================================================
#Searches Euler Paths from COMMON_NODE to VSS
#===========================================================================
for common_node in common_nodes:
print("PATH FROM",common_node ,"TO",gndpin);
print("============================================================");
for path in nx.all_simple_paths(G, source=common_node, target=gndpin):
nodes_path_n=[];
stack=0;
if not(vddpin) in path:
print("Full Path:", path);
for node in ntransistors:
if node.get_name() in path:
stack=stack+1;
nodes_path_n.append(node);
for node in nodes_path_n:
if node.get_stack()<stack:
node.set_stack(stack);
print("Stack Size =", stack);
print("============================================================");
#===========================================================================
#Drawn Plot
#===========================================================================
print("============================================================");
nx.draw(G,node_size=node_size,node_color = color_map,with_labels=True);
#===========================================================================
#Print Calculed Stack Size for Each Transistor
#===========================================================================
for node in ptransistors:
sizew=node.get_wsize()*node.get_stack()*float(wpwn_ratio);
print("Node:",node.get_name(),"StackFactor:",node.get_stack(),"Calculated Size:", sizew," Original Size:", node.get_wsize());
for node in ntransistors:
sizew=node.get_wsize()*node.get_stack();
print("Node:",node.get_name(),"StackFactor:",node.get_stack(),"Calculated Size:", sizew," Original Size:", node.get_wsize());
plt.show();
#===========================================================================
#Write File
#===========================================================================
file = sys.argv[2];
in_file =open(file,"r");
file2 = "out_"+file;
out_file = open(file2,"w");
for line in in_file:
found=0;
for node in ptransistors:
if node.get_name() in line:
sizew=node.get_wsize()*node.get_stack()*float(wpwn_ratio);
out_file.write(node.get_name()+" "+node.get_source() +" " + node.get_gate() +" " + node.get_drain() + " " + node.get_bulk() + " " + node.get_ttype() + " W=" + str(sizew) + " NF="+str(node.get_fingers()) + " L="+str(node.get_lsize())+"\n");
found=1;
for node in ntransistors:
if node.get_name() in line:
sizew=node.get_wsize()*node.get_stack();
out_file.write(node.get_name()+" "+node.get_source() +" " + node.get_gate() +" " + node.get_drain() + " " + node.get_bulk() + " " + node.get_ttype() + " W=" + str(sizew) + " NF="+str(node.get_fingers()) + " L="+str(node.get_lsize())+"\n");
found=1;
if found !=1:
out_file.write(line);
in_file.close();
out_file.close();
return 0;
if __name__ == "__main__":
main();
| [
"rodrigowuerdig@gmail.com"
] | rodrigowuerdig@gmail.com |
7a0a3a277b02addb1e326d10fb728c20339483e7 | d9a11615b57624a47e4719222ffd346eedbbabc1 | /tests/test_flow.py | cbb43664378e920dfe878bdfd884a44676142e9b | [] | no_license | mattjegan/pyzerem | 79461659521bf98551d8b54e74861a0609db29e3 | d3fe9fb54454b14747cc1d238961a93b854aee46 | refs/heads/master | 2021-04-28T21:12:13.909647 | 2018-02-19T11:13:54 | 2018-02-19T11:13:54 | 121,944,907 | 0 | 0 | null | 2018-02-18T11:19:44 | 2018-02-18T11:19:44 | null | UTF-8 | Python | false | false | 1,568 | py |
from zerem import Flow, Slot, process
class TestFlow(object):
def test_slots_register(self):
"""
Tests that slot is added to the flows available slots
"""
class MyFlow(Flow):
slot = Slot()
m = MyFlow()
assert getattr(m, '__flow_available') == {
'slot': 0,
}
def test_processes_register(self):
"""
Test that the process is added to the flows processes/watchers
"""
class MyFlow(Flow):
@process
def step1(self):
pass
m = MyFlow()
assert getattr(m, '__flow_watchers') == [
(['self'], m.step1),
]
def test_setattr_triggers_methods(self):
"""
Tests that setting a slot triggers appropriate processes
"""
class MyFlow(Flow):
slot = Slot()
triggered = False
@process
def step1(self, slot):
self.triggered = True
m = MyFlow()
m.slot = 'test_value'
assert m.triggered is True
def test_setattr_does_not_trigger_when_wrong_args(self):
"""
Tests that setting a slot does not trigger processes it shouldn't
"""
class MyFlow(Flow):
slot = Slot()
triggered = False
@process
def step1(self, slot, nonexistant):
self.triggered = True
m = MyFlow()
m.slot = 'test_value'
assert m.triggered is False
| [
"matthewj.egan@hotmail.com"
] | matthewj.egan@hotmail.com |
fb2af1434ff50ec233c879992cdde0e67e0d64d4 | 618ab096e66fc4319a8745c78716503cec5e5e0e | /setup.py | 490607a39d495b128dacdb3f09588258b34a67ae | [] | no_license | lemoce/caps-lock | f198c9d9683b55fbdfce142970b9820b16205968 | a737cb88fec08b4beee606741aaeecf9ebd93b61 | refs/heads/master | 2021-01-20T15:38:52.999398 | 2015-03-03T23:09:07 | 2015-03-03T23:09:07 | 31,622,524 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | from setuptools import setup
classifiers=['Development Status :: 4 - Beta',
'Environment :: X11 Applications :: GTK',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Desktop Environment :: Window Managers :: Applets']
setup(name='caps-lock',
version='1.0',
description='Keyboard Status Applet',
author='Leandro Cerencio',
author_email='cerencio@yahoo.com.br',
url='https://www.github.com/lemoce/caps-lock',
packages=['caps_lock'],
package_dir={'caps_lock':'src/caps_lock'},
package_data={'caps_lock':['data/*.svg']},
entry_points={'console_scripts': ['keystatusapplet=caps_lock.module:main']})
| [
"cerencio@yahoo.com.br"
] | cerencio@yahoo.com.br |
dae32be8f653d6bdd9542d422f29540b2448f64c | a4ecadebcfddf8896548e8d87ec8db2c0854fa05 | /manage.py | 41a59c6269e0c595c43601c84d8ddb0ab2318f57 | [] | no_license | abdulhafeez1432/SeedStarsDjangoApp | 93e1f8e7bcc430f1a964a506d736ae2ab333a9b8 | 98d36fdcd42312d07bcebfade31aa2b27d7cbbbe | refs/heads/master | 2021-08-24T00:28:35.982733 | 2017-12-07T08:13:25 | 2017-12-07T08:13:25 | 113,409,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SeedStarsProject.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"noreply@github.com"
] | abdulhafeez1432.noreply@github.com |
595e7b3b489fcd6cca7917c59f28d4eacd84001a | cd1adc009411ab2cee41b43a765ef7c7774dc986 | /polls/views.py | d4dbabf9f182a1043447d097a9d5403cf8bb9f1c | [] | no_license | kokoros/understandTA | 56920339714a37c28650a628934109098fbe8603 | f80e81ff82ccb7645c446285c49197ac3a7d860e | refs/heads/master | 2020-04-27T16:33:56.847539 | 2019-05-14T03:16:57 | 2019-05-14T03:16:57 | 174,486,035 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,625 | py | from django.shortcuts import render, render_to_response, redirect
from django.http import HttpResponse
from polls.models import Pets, Collect, Order
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
import time
from django.views.decorators.csrf import csrf_exempt
# uname = 'test'
# 初始化默认显示前100条
def pets(request):
pet = Pets.objects.all()[:100]
page = request.GET.get('page')
pagin = Paginator(pet, 8)
try:
posts = pagin.page(page)
except PageNotAnInteger:
posts = pagin.page(1)
except EmptyPage:
posts = pagin.page(pagin.num_pages)
data = {'posts': posts}
return render(request, 'polls/pet.html', context=data)
#商品添加到购物车
def goodadd(request):
#判断用户是否登录
if not request.session.get('is_login', None):
return redirect("/login")
request.encoding = 'utf-8'
if 'collect' in request.GET: # 判断是否有添加到购物车请求
pid = request.GET['collect']
collectone = Collect.objects.filter(pid=pid, cenable=0)
getpetone = Pets.objects.filter(id=pid)
cprice = getpetone[0].pprice
if collectone.count() < 1: # 如果该购车车没有该商品就添加
# 1.插入数据
cuser = request.session['user_name'] # 此处替换保留的用户名
cname = getpetone[0].pname
ctype = getpetone[0].ptype
cpath = getpetone[0].ppath
cdes = getpetone[0].pdescribe
ctime = time.strftime("%Y-%m-%d %X") # 加入购物车时间
# cnumber为商品数量
col = Collect(pid=pid, uname=cuser, cname=cname, ctype=ctype, ctime=ctime, cnumber=1, cdescribe=cdes,
cprice=cprice, callprice=cprice, ctypeid=0, cpath=cpath)
col.save()
else: # 购物车数量增加
cnumold = collectone[0].cnumber
allprice = collectone[0].callprice
newallprice = cprice + allprice
cnumnew = cnumold + 1
coladdnum = Collect.objects.get(pid=pid, cenable=0)
coladdnum.cnumber = cnumnew
coladdnum.callprice = newallprice
coladdnum.save()
pnumold = getpetone[0].pnumber
pnumnew = pnumold + 1
petnew = Pets.objects.get(id=pid)
petnew.pnumber = pnumnew
petnew.save()
uname = request.session['user_name']
posts=Collect.objects.all().filter(uname=uname,cenable=0)[:6]
data={'posts':posts}
return render(request,'polls/collect.html',context=data)
# 根据名称模糊查询
def showbyname(request):
request.encoding = 'utf-8'
if 'query' in request.GET:
pname = request.GET['query']
# 根据名称模糊获取
print('shwobyname:', pname)
pet = Pets.objects.filter(pname__contains='%s' % pname)
page = request.GET.get('page')
pagin = Paginator(pet, 8)
try:
posts = pagin.page(page)
except PageNotAnInteger:
posts = pagin.page(1)
except EmptyPage:
posts = pagin.page(pagin.num_pages)
data = {'posts': posts}
return render(request, 'polls/pet.html', context=data)
# 购物车模糊查询
def goodbyname(request):
request.encoding = 'utf-8'
uname = request.session['user_name']
if 'good' in request.GET:
cname = request.GET['good']
posts = Collect.objects.filter(cname__contains=cname, uname=uname, cenable=0)
data = {'posts': posts}
return render(request, 'polls/collect.html', context=data)
# 购物车默认加载,增加或者减少数据,动态生成价格
def collect(request):
if not request.session.get('is_login', None):
return redirect("/login")
request.encoding = 'utf-8'
#用户名
username = request.session['user_name']
posts = Collect.objects.all().filter(uname=username, cenable=0)[:6]
print('ind')
return render(request, 'polls/collect.html', {'posts': posts})
# 购物车到订单
def getgood(request):
jid = request.GET['jid']
print('jid:', jid)
posts = Collect.objects.filter(id=jid)
data = {'posts': posts}
return render(request, 'polls/order.html', context=data)
# 显示订单数据
def orderlist(request):
username = request.session['user_name']
posts=Order.objects.filter(uname=username,oenable=0)
data={'posts':posts}
return render(request,'polls/orderlist.html',context=data)
# 提交订单方案1取消 ostatue:-1表示已经删除的订单;0表示提交未支付;1表示已经支付待发货;2已发货未收货;3确认收货交易完成||oenable:0为有效数据,-1为无效数据
# 使用备用字段显示订单状态:odesc
def orderput(request):
uname = request.session['user_name'] # 用户名
request.encoding = 'utf-8'
if 'address' in request.GET and 'phone' in request.GET and 'uname' in request.GET:
add = request.GET['address']
pho = request.GET['phone']
ouser = request.GET['uname']
desc = request.GET['desc']
ocid = request.GET['oid']
# 根据collect_id获取信息
start=ocid.index('=')
ocid=ocid[start+1:]
print('ocid:',ocid,pho)
col=Collect.objects.filter(id=ocid)
print('col',col)
oname=col[0].cname
oprice=col[0].callprice
onum=col[0].cnumber
opath=col[0].cpath
otime=time.strftime("%Y-%m-%d %X")
adord = Order(cid=ocid, uname=uname, oname=oname, oprice=oprice, onum=onum, oaddress=add, ouser=ouser,
ophone=pho, otime=otime, oenable=0, ostatue=0, opath=opath, odesc='未支付', ohandle1='支付',
ohandle2='删除', ohandle3='')
adord.save()
coll = Collect.objects.get(id=ocid)
coll.cenable = -2 # 购物车到订单状态调整
coll.save()
posts = Order.objects.all().filter(uname=uname, oenable=0)[:6]
data = {'posts': posts}
return render(request, 'polls/orderlist.html', context=data)
# 查询订单
def orderquery(request):
request.encoding = 'utf-8'
uname = request.session['user_name']
if 'ordername' in request.GET:
cname = request.GET['ordername']
posts = Order.objects.filter(oname__contains=cname, uname=uname, oenable=0)
data = {'posts': posts}
return render(request, 'polls/orderlist.html', context=data)
# 删除订单
def orderdelete(request):
if 'odid' in request.GET: # 更新状态不做删除
did = request.GET['odid']
ordd = Order.objects.get(id=did)
ordd.oenable = -1
ordd.ostatue = -1
ordd.save()
uname = request.session['user_name']
posts = Order.objects.all().filter(uname=uname, oenable=0)[:6]
data = {'posts': posts}
return render(request, 'polls/orderlist.html', context=data)
# 支付跳转
def orderpay(request):
return render(request, 'polls/pay.html')
# 支付结果 密码为:666666
@csrf_exempt
def payresult(request):
pawss = '666666'
if request.POST:
paw = request.POST['paw']
# print(paw)
olid = request.POST['olid']
start = olid.index('=')
olid = olid[start + 1:]
print(olid)
orde = Order.objects.get(id=olid)
uname = request.session['user_name']
if paw == pawss:
orde.odesc='待发货'
orde.ohandle1=''
orde.ohandle2=''
orde.ohandle3='确认收货'
orde.save()
posts = Order.objects.filter(uname=uname, oenable=0)
data = {'posts': posts}
return render(request, 'polls/orderlist.html', context=data)
else:
orde.odesc = '待支付'
orde.save()
posts = Order.objects.filter(uname=uname, oenable=0)
data = {'posts': posts}
return render(request, 'polls/orderlist.html', context=data)
# 显示商品详情
def petdetail(request):
petid=request.GET['detailid']
posts=Pets.objects.filter(id=petid)
print(posts)
data={'posts':posts}
return render(request,'polls/petdetail.html',context=data)
# 商品详情直接下单 不经过购物车,数量无法调整,需要是登录状态
def deorders(request):
#判断用户是否登录
if not request.session.get('is_login', None):
return redirect("/login")
jid=request.GET['jid']
print('jid:',jid)
posts=Pets.objects.filter(id=jid)
data={'posts':posts}
return render(request,'polls/orderde.html',context=data)
# 从详情直接确认订单,需要登录状态
def orderputde(request):
#判断用户是否登录
if not request.session.get('is_login', None):
return redirect("/login")
uname=request.session['user_name'] #用户名
request.encoding='utf-8'
if 'address' in request.GET and 'phone' in request.GET and 'uname' in request.GET:
add=request.GET['address']
pho=request.GET['phone']
ouser=request.GET['uname']
desc=request.GET['desc']
ocid=request.GET['oid']
#根据collect_id获取信息
start=ocid.index('=')
ocid=ocid[start+1:]
print('ocid:',ocid,pho)
col=Pets.objects.filter(id=ocid)
print('col',col)
oname=col[0].pname
oprice=col[0].pprice
onum=1
opath=col[0].ppath
otime=time.strftime("%Y-%m-%d %X")
adord=Order(cid=ocid,uname=uname,oname=oname,oprice=oprice,onum=onum,oaddress=add,ouser=ouser,ophone=pho,otime=otime,oenable=0,ostatue=1,opath=opath,odesc='未支付',ohandle1='支付',ohandle2='删除',ohandle3='')
adord.save()
posts=Order.objects.all().filter(uname=uname,oenable=0)[:6]
data={'posts':posts}
return render(request,'polls/orderlist.html',context=data)
# 商品详情到购物车
def degoodadd(request):
request.encoding='utf-8'
#判断用户是否登录
if not request.session.get('is_login', None):
return redirect("/login")
if 'collect' in request.GET:#判断是否有添加到购物车请求
pid=request.GET['collect']
collectone=Collect.objects.filter(pid=pid,cenable=0) #
print('collect',collectone)
getpetone=Pets.objects.filter(id=pid)
cprice=getpetone[0].pprice
if collectone.count()<1:#如果该购车车没有该商品就添加
#1.插入数据
cuser=request.session['user_name'] #此处替换保留的用户名
cname=getpetone[0].pname
ctype=getpetone[0].ptype
cpath=getpetone[0].ppath
cdes=getpetone[0].pdescribe
ctime=time.strftime("%Y-%m-%d %X") #加入购车车时间
#cnumber为商品数量
col=Collect(pid=pid,uname=cuser,cname=cname,ctype=ctype,ctime=ctime,cnumber=1,cdescribe=cdes,cprice=cprice,callprice=cprice,ctypeid=0,cpath=cpath)
col.save()
else: #购物车数量增加
cnumold=collectone[0].cnumber
allprice=collectone[0].callprice
newallprice=cprice+allprice
cnumnew=cnumold+1
coladdnum=Collect.objects.get(pid=pid,cenable=0)
coladdnum.cnumber=cnumnew
coladdnum.callprice=newallprice
coladdnum.save()
pnumold=getpetone[0].pnumber
pnumnew=pnumold+1
petnew=Pets.objects.get(id=pid)
petnew.pnumber=pnumnew
petnew.save()
uname = request.session['user_name']
posts=Collect.objects.all().filter(uname=uname,cenable=0)[:6]
data={'posts':posts}
return render(request,'polls/collect.html',context=data)
def colles(request):
request.encoding='utf-8'
#判断用户是否登录
if not request.session.get('is_login', None):
return redirect("/login")
if 'sid' in request.GET: #减少数量
sid=request.GET['sid']
print('sid')
coll=Collect.objects.filter(id=sid)
oldnum=coll[0].cnumber
price=coll[0].cprice
oldallprice=coll[0].callprice
print('oldnum:',oldnum)
if oldnum>1: #数量改变同时金额也发生变化
newnum=oldnum-1
allprice=oldallprice-price
snum=Collect.objects.get(id=sid)
snum.callprice=allprice
snum.cnumber=newnum
snum.save()
username=request.session['user_name']
posts=Collect.objects.all().filter(uname=username,cenable=0)[:6]
print('ind')
return render(request,'polls/collect.html',{'posts':posts})
def coladd(request):
request.encoding='utf-8'
#判断用户是否登录
if not request.session.get('is_login', None):
return redirect("/login")
if 'aid' in request.GET:
aid=request.GET['aid']
print('sid')
coll=Collect.objects.filter(id=aid)
oldnum=coll[0].cnumber
price=coll[0].cprice
oldallprice=coll[0].callprice
print('oldnum:',oldnum)
if oldnum<10000: #假设最大库存1000 金额发生变化
newnum=oldnum+1
allprice=oldallprice+price
anum=Collect.objects.get(id=aid)
anum.cnumber=newnum
anum.callprice=allprice
anum.save()
username=request.session['user_name']
posts=Collect.objects.all().filter(uname=username,cenable=0)[:6]
print('ind')
return render(request,'polls/collect.html',{'posts':posts})
def coldel(request):
request.encoding='utf-8'
#判断用户是否登录
if not request.session.get('is_login', None):
return redirect("/login")
if 'did' in request.GET:
did=request.GET['did']
col=Collect.objects.get(id=did)
col.cenable=-1
col.save()
username=request.session['user_name']
posts=Collect.objects.all().filter(uname=username,cenable=0)[:6]
print('ind')
return render(request,'polls/collect.html',{'posts':posts})
# 支付后
def oresult(request):
if 'oqid' in request.GET: #更新状态不做删除
did=request.GET['oqid']
ordd=Order.objects.get(id=did)
ordd.ohandle1=''
ordd.ohandle2='关闭订单'
ordd.ohandle3=''
ordd.odesc='已经收货'
ordd.save()
username = request.session['user_name']
posts=Order.objects.all().filter(uname=username,oenable=0)[:6]
data={'posts':posts}
return render(request,'polls/orderlist.html',context=data)
| [
"korosue7@icloud.com"
] | korosue7@icloud.com |
afed2374a90d5f5c0bbdad2edc9694d5aacf12f2 | 29a4ac750e4784fd61292c43a5b50c13c13c0a74 | /recite/apps.py | 8c8ddc50b83a7aadbeb82ff8fbd24f939d2760d7 | [] | no_license | catfish18674282560/myproject | ff6bb6afddb907725c39e37a87ba1ac3b8aaa388 | ad11b7ff310602116f845294fe2ba07816a3cb56 | refs/heads/master | 2021-07-16T09:02:03.464461 | 2021-01-18T02:17:47 | 2021-01-18T02:17:47 | 217,272,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | from django.apps import AppConfig
class ReciteConfig(AppConfig):
name = 'recite'
verbose_name = "单词本"
| [
"catfish1921@outlook.com"
] | catfish1921@outlook.com |
afb9463a2e3ec2f6584dbe2d00849104e04ba7ac | 0e8d70ba0b7229614736e279fecd74118f45761a | /class21_15day/test_case111/read_excel.py | 94cc0f5ffdfce2c9a1ab336d7a4a6604987b1f32 | [] | no_license | Amyli0601/python | 41709d43aaf1270157fb0f598c273b906ea659a2 | eaf09ef2ca0fdaa5a5a001aa0d2c35c1ffdef321 | refs/heads/master | 2020-08-08T15:18:33.915520 | 2019-10-09T08:18:07 | 2019-10-09T08:18:07 | 213,856,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,227 | py | """
============================
Author:柠檬班-木森
Time:2019/8/20
E-mail:3247119728@qq.com
Company:湖南零檬信息技术有限公司
============================
"""
import openpyxl
class CaseData(object):
"""测试用数据类"""
def __init__(self, zip_obj):
# 变量zip对象
for i in zip_obj:
# 将表头作为属性,值作为属性值
setattr(self, i[0], i[1])
# [('表头',值),(),()]
# 定义一个类专门用例读取excel中的数据
class ReadExcel(object):
"""读取excel中的用例数据"""
def __init__(self, file_name, sheet_name):
"""
:param file_name: excel文件名
:param sheet_name: sheet表单名
"""
self.file_name = file_name
self.sheet_name = sheet_name
def open(self):
"""打开工作薄和表单"""
# # 打开文件,返回一个工作薄对象
self.wb = openpyxl.load_workbook(self.file_name)
# 通过工作薄,选择表单对象
self.sh = self.wb[self.sheet_name]
def read_data(self):
"""读取所有用例数据"""
# 打开文件和表单
self.open()
# 按行获取所有的表格对象,每一行的内容放在一个元祖中,以列表的形式返回
rows = list(self.sh.rows)
# 创建一个列表cases,存放所有的用例数据
cases = []
# 获取表头
titles1 = [r.value for r in rows[0]]
# 遍历其他的数据行,和表头进行打包,转换为字典,放到cases这个列表中
for row in rows[1:]:
# 获取该行数据
data = [r.value for r in row]
# 和表头进行打包,转换为字典
case = dict(zip(titles1, data))
cases.append(case)
# 将读取出来的数据进行返回
return cases
def read_data_obj(self):
# 打开工作簿
self.open()
# 创建一个空的列表,用例存放所有的用例数据
cases = []
# 读取表单中的数据
rows = list(self.sh.rows)
# 读取表头
print(rows[0])
# 获取表头
titles = [r.value for r in rows[0]]
# 遍历其他的数据行,和表头进行打包,转换为字典,放到cases这个列表中
for row in rows[1:]:
# 获取该行数据
data = [r.value for r in row]
zip_obj = zip(titles, data)
# 将每一条用例的数据,存储为一个对象
# 通过Case这个类来创建一个对象,参数,zip_obj
case_data = CaseData(zip_obj)
cases.append(case_data)
# 将包含所有用例的列表cases进从返回
return cases
def write_data(self, row, column, value):
"""
:param row: 写入的行
:param column: 写入的列
:param value: 写入的内容
:return:
"""
# 打开文件
self.open()
# 按照传入的行、列 内容进行写入
self.sh.cell(row=row, column=column, value=value)
# 保存
self.wb.save(self.file_name)
if __name__ == '__main__':
pass
| [
"panzhi@junbaob2b.com"
] | panzhi@junbaob2b.com |
dcaab1775a4fbf8221d2d1e5a24b257c3fd703a0 | ef2dfd512a6fe6a33490a899fc7d358d5df0270a | /dbuseractions.py | 6294c6c67e6b2403816524ec5669d201601be020 | [] | no_license | FilLav/librarysite | a402cc0e7efd46d21fbe423f396a2c01df227a7b | 5c875d604085a592e0127023094210db9ccbade1 | refs/heads/master | 2020-04-30T22:14:07.041015 | 2019-03-25T12:42:36 | 2019-03-25T12:42:36 | 177,113,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | import sqlite3
conn = sqlite3.connect('libdb.db')
c = conn.cursor()
# search for books - quicksearch
def quicksearch(title):
comparison_string = f'%{quicksearchbox}%'
results = c.execute("""SELECT title, author, imgsrc from books WHERE title LIKE ?""", comparison_string)
# borrow a book
c.execute("""INSERT INTO loans (book_id, borrower_id, borrowed_on, borrowing_duration, is_returned)
VALUES (?,?,?,?,0);
UPDATE books
SET available = 0 WHERE id = ;""",
(, , , , ))
# edit own details
c.execute("""UPDATE users
SET first_name = ?, last_name = ? WHERE id = """,
(, )) | [
"filipp.lavrentiev@softwire.com"
] | filipp.lavrentiev@softwire.com |
36acfadb97d22726051cbbc8af95810d9b8534c1 | 28b0c983ac33c6b62c4755d8daf561819ecda2c9 | /yaproject/vcard/templatetags/edit_link.py | ddff3f4fc5b6c661941f171437083fac0514ce5b | [] | no_license | yatsarevsky/tsarev42cctest | 9682cc5aa4f2bd3cb3ce0399aa23eb1e6b6c5cb0 | 20a2f8c507fdaf58d9f73fcb58c912d41f13dcc4 | refs/heads/master | 2021-01-02T09:14:34.368609 | 2013-02-14T15:51:10 | 2013-02-14T15:51:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | from django import template
from django.core.urlresolvers import reverse
register = template.Library()
class EditLinkNode(template.Node):
def __init__(self, target):
self.target = template.Variable(target)
def render(self, context):
obj = self.target.resolve(context)
pattern = 'admin:%s_%s_change' % (obj._meta.app_label,
obj._meta.module_name)
return '<a href="%s">Edit</a>' % reverse(pattern, args=[obj.id])
def get_edit_link(parser, token):
try:
tag_name, target = token.split_contents()
except ValueError:
err = "%r tag requires only one arguments" % token.contents.split()[0]
raise template.TemplateSyntaxError(err)
return EditLinkNode(target)
register.tag('get_edit_link', get_edit_link)
| [
"yatsarevsky@gmail.com"
] | yatsarevsky@gmail.com |
a7c8d7bddd7e6040690e8f4603b1523914061ddc | 113f8ae533a75e9f2fdc1728661af0f19c8460a6 | /template_advanced_demos/venv/bin/django-admin | d4b372e021a8bb698113792a0a8c99472edaa67e | [] | no_license | PeterM358/Python-web-2021 | cf08beaa3330495afc53e640f4a2aaf0429049e9 | a3b7e1d1be0cc85675aaff646917d4f5b7f97b00 | refs/heads/master | 2023-07-09T15:09:08.868548 | 2021-07-24T13:49:22 | 2021-07-24T13:49:22 | 382,328,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | #!/Users/petermihailov/django-test/template_advanced_demos/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"estestveno"
] | estestveno | |
e630c1055898d507869b7af0dfb38879c316ef50 | 5ad69af34748b6a6159d2eaaba5a961b4d3ef0f9 | /SyntheticDataset/main.py | 4010220b57cd7c31c10af22220e48b6b6db61061 | [] | no_license | Henryevogt/LOTO | 0119968f33dad9a988ff001988b2a26c367b9cfd | c64d74426459a26576141212861ba0b783aeb290 | refs/heads/master | 2023-01-24T12:14:44.402340 | 2020-12-01T23:48:59 | 2020-12-01T23:48:59 | 307,848,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | import cv2
import numpy
#from vision import Vision
#import matplotlib
tag_cascade = cv2.CascadeClassifier('tags_cascade/cascade.xml')
#cap = cv2.VideoCapture(0)
for i in range(5):
img = cv2.imread('tags_test/tag' + str(i) + '.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
rect = tag_cascade.detectMultiScale(img)
rect2 = tag_cascade.detectMultiScale(gray)
for (x,y,w,h) in rect:
# loop taken from https://www.bogotobogo.com/python/OpenCV_Python/python_opencv3_Image_Object_Detection_Face_Detection_Haar_Cascade_Classifiers.php
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
for (x,y,w,h) in rect2:
# loop taken from https://www.bogotobogo.com/python/OpenCV_Python/python_opencv3_Image_Object_Detection_Face_Detection_Haar_Cascade_Classifiers.php
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('tag', img)
cv2.waitKey(0) # Wait til a button is pressed
cv2.destroyAllWindows() # discard window and move on
| [
"Henryevogt@yahoo.com"
] | Henryevogt@yahoo.com |
d1d0508de70a0ada37a1c3e68468cb649846a73f | 9a423dfb84041a926970e10afad93f15619a34d8 | /backend/google_helpers/utils.py | cc3592f7ee47ae2ca715dbd7623e04aa1cc1fb21 | [] | no_license | Babalwa01/Tilde | 3c2d6295b3d5e8a0cce1331f657ad835688a4db5 | 8eaffeb2c6b78aec4f0d6b5f573106e0a705ae53 | refs/heads/master | 2023-05-28T23:06:49.205259 | 2021-05-18T08:41:14 | 2021-05-18T08:41:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,651 | py | import logging
import pandas as pd
from functools import lru_cache
import re
from timezone_helpers import timestamp_str_to_tz_aware_datetime
from google_helpers.constants import TIMESTAMP_FORMAT, TIMEZONE_NAME
def timestamp_to_datetime(timestamp):
return timestamp_str_to_tz_aware_datetime(
timestamp=timestamp, zone_name=TIMEZONE_NAME, dt_format=TIMESTAMP_FORMAT
)
def fetch_sheet(sheet: str = None, url: str = None):
print(f"Fetching sheet: {sheet} {url}")
service = authorize()
if sheet:
book = service.open(sheet)
elif url:
book = service.open_by_url(url)
logging.info(f"fetched sheet {sheet}")
sheet = book.sheet1 # choose the first sheet
return pd.DataFrame(sheet.get_all_records())
def authorize():
import json
from oauth2client.client import SignedJwtAssertionCredentials
import gspread
import os
# insert name of json service account key
SCOPE = [
"https://spreadsheets.google.com/feeds",
"https://www.googleapis.com/auth/drive",
]
SECRETS_FILE = os.getenv("GOOGLE_SHEETS_CREDENTIALS_FILE")
if not SECRETS_FILE:
raise Exception(
"Missing environmental variable: GOOGLE_SHEETS_CREDENTIALS_FILE"
)
# Based on docs here - http://gspread.readthedocs.org/en/latest/oauth2.html
# Load in the secret JSON key in working directory (must be a service account)
json_key = json.load(open(SECRETS_FILE))
# Authenticate using the signed key
credentials = SignedJwtAssertionCredentials(
json_key["client_email"], json_key["private_key"], SCOPE
)
ret = gspread.authorize(credentials)
return ret
# def date_from_args(date): # Not tz aware
# if type(date) is datetime.datetime:
# return date.date()
# for dt_format in [
# "%m/%d/%Y %H:%M:%S",
# "%m/%d/%Y %H:%M",
# "%m/%d/%Y",
# "%d/%m/%Y",
# "%d/%m/%Y %H:%M",
# "%d/%m/%Y %H:%M:%S",
# "%Y/%m/%d %H:%M:%S",
# ]:
# try:
# return datetime.datetime.strptime(date, dt_format).date()
# except ValueError:
# pass
# raise Exception(f"date '{date}' not allowed")
# def timestamp_to_date(timestamp): # Not tz aware
# return timestamp_to_datetime(timestamp).date()
def clean_project_url_part(df, source_col, dest_col):
def mapper(row):
found = re.match(".*(projects/.*$)", str(row[source_col]))
if found:
return found.groups()[0]
return ""
df[dest_col] = df.apply(mapper, axis=1)
df = df[df[source_col].str.contains("projects/")]
return df
| [
"sheena.oconnell@gmail.com"
] | sheena.oconnell@gmail.com |
e4df547aff3a3fe3d625a6c012478ce4a1959bc3 | 183e5563514c1f13bd1982358db089e02b4d7eeb | /graphmodels/vis/views.py | b70b420a33c185964e76318707d91eed598d2363 | [] | no_license | jiangfeng1124/bigdata | fe2019ed8b6c1d365b871056e60369985c79a892 | 16a02fe33f7fedbbdb9f050dc12b31609b019f08 | refs/heads/master | 2020-04-24T12:32:12.299328 | 2013-01-31T09:31:38 | 2013-01-31T09:31:38 | 7,175,556 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,531 | py | # Create your views here.
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response
from forms import UploadVisForm
from models import Vis
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.db.models import Q
import os
import datetime
import simplejson
import pickle
@login_required
def vis_view(request):
user = request.user
vis_list = Vis.objects.filter(Q(owner__id=user.id) | Q(access="public")).order_by('-id')
return render_to_response("vis/view.html", RequestContext(request, {'user': user, 'vis_list': vis_list}))
def vis_info(request, offset):
user = request.user
try:
vis_id = int(offset)
except ValueError:
raise Http404()
vis = Vis.objects.filter(Q(id=vis_id))
if len(vis) == 0:
vis_list = Vis.objects.filter(Q(owner__id=user.id) | Q(access="public")).order_by('-id')
notice = "Couldn't find Task with ID=" + str(vis_id)
return render_to_response('vis/view.html', RequestContext(request, {'user': user, 'vis_list': vis_list, 'notice': notice}))
vis = vis[0]
if vis.progress != "visualized":
return render_to_response("vis/info.html", RequestContext(request, {'vis': vis, 'status': "0"}))
vis_res_path = os.path.join(settings.USR_VIS_ROOT)
f_graph_json = open(os.path.join(vis_res_path, str(vis_id), "graph.json"))
graph_json_data = simplejson.load(f_graph_json)
graph_json_data = simplejson.dumps(graph_json_data)
f_icov_json = open(os.path.join(vis_res_path, str(vis_id), "icov.json"))
icov_json_data = simplejson.load(f_icov_json)
icov_json_data = simplejson.dumps(icov_json_data)
f_degree = open(os.path.join(vis_res_path, str(vis_id), "graph.degree"))
degree_tbl = pickle.load(f_degree)
result_graph = os.path.join("/vresult", str(vis_id), "graph.png")
circos_png = os.path.join("/vresult", str(vis_id), "circos.png")
circos_svg = os.path.join("/vresult", str(vis_id), "circos.svg")
return render_to_response('vis/info.html', RequestContext(request, {'vis': vis, 'status': "1", 'result_graph': result_graph, 'circos_png': circos_png, 'circos_svg': circos_svg, 'graph_json_data': graph_json_data, 'degree_tbl': degree_tbl, 'icov_json_data': icov_json_data}))
@login_required
def vis_upload(request):
user = request.user
if request.method == 'POST':
if user.is_authenticated():
file = request.FILES.get('filename', '')
file_name = file.name
dest_dir = os.path.join(settings.USR_VDATASET_ROOT, user.username)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
full_path = os.path.join(dest_dir, file_name)
rel_path = os.path.join(user.username, file_name)
destination = open(full_path, "wb+")
for chunk in file.chunks():
destination.write(chunk)
destination.close()
description = request.POST['description']
access = request.POST['access']
tbl_separator = {"tab":'\t', "space":' ', "comma":',', "semicolon":';'}
sep_str = request.POST['sep']
sep = tbl_separator[sep_str]
header = request.POST['header']
if header == 'yes':
header = True;
elif header == 'no':
header = False;
## a simple check
dim = 0
for line in open(full_path):
dim += 1
if header == True:
dim -= 1 # exclude the header line
create_date = datetime.datetime.now()
new_vis = Vis(owner = user, data_path = rel_path, data_name = file_name, data_dim = dim, data_description = description, create_date = create_date, progress = "waiting", access = access, data_sep = sep_str, data_header = header)
new_vis.save()
new_vis_id = new_vis.id
new_vis.result_dir = str(new_vis_id)
new_vis.save()
notice = "Congratulations! Your dataset has been successfully uploaded."
# return render_to_response('dataset/success.html', RequestContext(request, {'dataset': new_dataset, 'notice': notice}))
return HttpResponseRedirect('/viss/%s/' % new_vis.id)
else:
notice = "You must be logged in to upload datasets"
form = UploadVisForm()
return render_to_response('vis/upload.html', RequestContext(request, {'form': form, 'notice': notice}))
else:
form = UploadVisForm()
return render_to_response('vis/upload.html', RequestContext(request, {'form': form}))
| [
"jiangfeng1124@gmail.com"
] | jiangfeng1124@gmail.com |
25f36707799253f370eeb2ff989176d7430e52ac | 0c84cc9a2c06594e01835a617a7d5866f9db68a4 | /importing-example/example_2/use_animals.py | a55375cf73a3a4e3806d644da20f0e1ba7b9f72f | [] | no_license | 01-Jacky/Python-Things | a508ac4161c0f836fb793bd07e8c69ff0f3d6e1d | 5153a27cdf9dc17ec3344c2774674c7f92156cf6 | refs/heads/master | 2021-03-19T16:59:50.000741 | 2018-04-04T23:48:46 | 2018-04-04T23:48:46 | 100,906,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | # Import classes from your brand new package
from package.Animals import Mammals
from package.Animals import Birds
# Create an object of Mammals class & call a method of it
myMammal = Mammals()
myMammal.printMembers()
# Create an object of Birds class & call a method of it
myBird = Birds()
myBird.printMembers() | [
"hklee310@gmail.com"
] | hklee310@gmail.com |
69adac80e2cb020bfabb0f39be9d2f6f3469bd4b | cc0eb66fe99e9e72c2c1b37e68df9307aed33b71 | /window_develop.py | bd0937c7c4c730ba526199a56af4840a615fdbaa | [] | no_license | deltacluse/flying_attack | 0404f89f5b1d786cbdcee4d2353b08da7ed46a42 | 266b26d7bbcfa7bc3d9799b5003a4e8c32be109b | refs/heads/master | 2021-05-01T18:52:58.600448 | 2018-05-23T04:01:10 | 2018-05-23T04:01:10 | 121,010,474 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,434 | py | import tkinter as tk
class Develop:
def __init__(self):
self.root = tk.Tk()
self.set_window()
self.create_frame()
self.create_widgets()
self.root.mainloop()
# 창 설정
def set_window(self):
self.window_width = 300
self.window_height = 500
window_x = 150
window_y = 130
window_background = 'white'
self.root.title('Develop & Version')
self.root.geometry('{0}x{1}+{2}+{3}'
.format(self.window_width, self.window_height, window_x, window_y))
self.root.configure(bg=window_background)
self.root.resizable(False, False)
# 프레임 생성
def create_frame(self):
self.frame_developer = tk.Frame(self.root,
width=self.window_width,
height=(self.window_height * 0.2))
self.frame_developer.grid(row=0, column=0)
self.frame_version = tk.Frame(self.root,
width=self.window_width,
height=(self.window_height * 0.8))
self.frame_version.grid(row=1, column=0)
# 위젯 생성
def create_widgets(self):
self.widget_developer()
self.widget_version()
# 개발자 소개
def widget_developer(self):
pass
# 버전 소개
def widget_version(self):
pass
| [
"deltacluse@naver.com"
] | deltacluse@naver.com |
f1bdd8f735b1b4dec5647d07584d880d785855a7 | 1f8f13aefb7faf3c4e98a31f8ba3654b480cfd17 | /find_paths.py | 508c31d24e462a2aa413a40cea7d2b0b13e86a97 | [] | no_license | jcass8695/Interview-Prep | d5b25919127a8b8fd6685186c615d9ce2ad3cf03 | 0811484462e07d67739a49224e8537b52106582b | refs/heads/master | 2021-10-29T09:21:59.084708 | 2018-01-23T17:35:31 | 2018-01-23T17:35:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | from pprint import pprint
from random import randrange
def main():
memo = {}
grid = [[' ' for i in range(10)] for j in range(10)]
for _ in range(30):
rand_row = randrange(len(grid))
rand_col = randrange(len(grid[0]))
grid[rand_row][rand_col] = 'X'
pprint(grid)
print(find_num_paths(grid, 0, 0, memo))
pprint(grid)
def find_num_paths(grid, row, col, memo):
if row == 9 and col == 9:
return 1
if row > 9 or col > 9:
return 0
if grid[row][col] == 'X':
return 0
if (row, col) in memo.keys():
return memo[(row, col)]
grid[row][col] = '0'
return find_num_paths(grid, row + 1, col, memo) + find_num_paths(grid, row, col + 1, memo)
if __name__ == '__main__':
main()
| [
"j.cassidy45@gmail.com"
] | j.cassidy45@gmail.com |
f1d4083a96c834d44d9d23e60dddbdf027aa4950 | ef43f1c6531bc38dc78f58c16a6a3fecee5c1dc2 | /yelp_dataset_challenge_academic_dataset/test.py | e0c45b629a6aec68a4d2652ad8861bb29d9d4874 | [] | no_license | asutreja/YelpDataChallengeProject | 851c8a60b9a8c3f8d1014341d963f3dd5bccd25b | 961e1ed04815bc48caf764f6b76ff1a2fbe2850b | refs/heads/master | 2021-01-20T20:56:39.471129 | 2015-06-08T20:54:04 | 2015-06-08T20:54:04 | 36,901,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,110 | py | import csv
import json
import urllib2
import multiprocessing as mp
from geopy.geocoders import Nominatim
import multiprocessing as mp
check_in_business_id_for_late_night = dict()
check_in_business_id_for_regular = dict()
user_id_set = set() # total 366716 unique user ids in this set
cities = set() # each unique city is stored here
# business_id mapped to # of late night checkins (10 pm to 2 am)
german_business_late_night = dict()
uk_business_late_night = dict()
us_business_late_night = dict()
canada_business_late_night = dict()
# business_id mapped to # of regular checkins (6 pm to 10 pm)
german_business_regular = dict()
uk_business_regular = dict()
us_business_regular = dict()
canada_business_regular = dict()
# number of businesses stays open very late in each country (closing between 11 pm to 2 am)
german_late_count = 0
uk_late_count = 0
us_late_count = 0
canada_late_count = 0
#writeFile = open('cities', 'w')
# in business csv:
# latitude = 10th column
# longitude = 74th column
# business ID = 16th colomn
# cities = 61st column
# state = 39th column
# state in UK = EDH, SCB, KHL, ELN, HAM, MLN, FIF, XGL, NTH,
# state in German = BW, RP, NW,
# state in Canada = ON, QC,
# state in USA = WA, WI, NC, PA, NV, CA, IL, AZ, MA, MN, SC, OR
# takes latitude and longitude and returns country name
# def lookup(lat, lon):
# data = json.load(urllib2.urlopen('http://maps.googleapis.com/maps/api/geocode/json?latlng=%s,%s&sensor=false' % ( str(row[10]), str(row[74]) ) ) )
# for result in data['results']:
# for component in result['address_components']:
# if 'country' in component['types']:
# return component['long_name']
# return None
# def findCountry(newMap):
# global business_id_set
# global num
# coordPair = business_id_set[newMap]
# geolocator = Nominatim()
# print num
# num +=1
# location = geolocator.reverse(coordPair)
# if(location.address):
# business_id_set[newMap] = location.raw['address']['country']
# words to look for: Restaurants, Food, Fast Food, Cafes
# hours.Monday.close = 75th column
# hours.Tuesday.close = 77th column
# hours.Wednesday.close = 58th column
# hours.Thursday.close = 47th column
# hours.Friday.close = 41st column
# hours.Saturday.close = 78th column
# hours.Sunday.close = 86th column
def getBusinessId():
global canada_business_late_night, us_business_late_night, uk_business_late_night, german_business_late_night
global german_business_regular, uk_business_regular, us_business_regular, canada_business_regular
#count = 0
new_category_set = set()
with open('yelp_academic_dataset_business.csv', 'rU') as f:
reader = csv.reader(f)
next(reader, None) # skipping the header
for row in reader:
# code to find the column index in csv file
# for each in row:
# if each == 'hours.Sunday.close':
# print each
# print count
# count+=1
# break
temp_business_id = row[16]
temp_state = row[39]
temp_category = row[9]
if temp_category.find('Restaurants') != -1 or temp_category.find('Fast Food') != -1 or temp_category.find('Cafes') != -1 or temp_category.find('Food') != -1:
if(temp_state == 'ON' or temp_state == 'QC'):
canada_business_late_night[temp_business_id] = 0
canada_business_regular[temp_business_id] = 0
elif(temp_state == 'BW' or temp_state == 'RP' or temp_state == 'NW'):
german_business_late_night[temp_business_id] = 0
german_business_regular[temp_business_id] = 0
elif(temp_state == 'EDH' or temp_state == 'SCB' or temp_state == 'KHL' or temp_state == 'ELN' or temp_state == 'HAM' or temp_state == 'MLN' or temp_state == 'FIF' or temp_state == 'XGL' or temp_state == 'NTH'):
uk_business_late_night[temp_business_id] = 0
uk_business_regular[temp_business_id] = 0
else:
us_business_late_night[temp_business_id] = 0
us_business_regular[temp_business_id] = 0
#lonLatPair = str(row[10]) + "," + str(row[74])
#pairs.append(lonLatPair)
#country = findCountry(lonLatPair)
#country = lookup( str(row[10]), str(row[74]) )
#if(country == 'United States of America'):
# us_business.add(row[16])
#elif (country == 'United Kingdom'):
# uk_business.add(row[16])
# elif (country == 'Deutschland'):
# german_business.add(row[16])
# elif(country == 'Canada'):
# canada_business.add(row[16])
#cities.add(row[61])
# check-in for 'checkin_info.0-0': 152 column
# check-in for 'checkin_info.0-1': 151 column
# check-in for 'checkin_info.0-2': 154 column
# check-in for 'checkin_info.0-3': 153 column
# check-in for 'checkin_info.0-4': 149 column
# check-in for 'checkin_info.0-5': 148 column
# check-in for 'checkin_info.0-6': 150 column
# check-in for 'checkin_info.1-0': 55 column
# check-in for 'checkin_info.1-1': 32 column
# check-in for 'checkin_info.1-2': 57 column
# check-in for 'checkin_info.1-3': 58 column
# check-in for 'checkin_info.1-4': 59 column
# check-in for 'checkin_info.1-5': 60 column
# check-in for 'checkin_info.1-6': 61 column
# check-in for 'checkin_info.22-0': 36 column
# check-in for 'checkin_info.22-1': 37 column
# check-in for 'checkin_info.22-2': 38 column
# check-in for 'checkin_info.22-3': 39 column
# check-in for 'checkin_info.22-4': 33 column
# check-in for 'checkin_info.22-5': 34 column
# check-in for 'checkin_info.22-6': 35 column
# check-in for 'checkin_info.23-0': 141 column
# check-in for 'checkin_info.23-1': 140 column
# check-in for 'checkin_info.23-2': 143 column
# check-in for 'checkin_info.23-3': 142 column
# check-in for 'checkin_info.23-4': 145 column
# check-in for 'checkin_info.23-5': 144 column
# check-in for 'checkin_info.23-6': 146 column
def getCheckInCountsForLateNight():
global check_in_business_id
# business_id is 14th column in checkin.csv
with open('yelp_academic_dataset_checkin.csv', 'rU') as f:
reader = csv.reader(f)
next(reader, None)
for row in reader:
total_checkins = 0
temp_business_id = row[14]
# counts 12 am to 1 am checkins
check0_0 = row[152]
check0_1 = row[151]
check0_2 = row[154]
check0_3 = row[153]
check0_4 = row[149]
check0_5 = row[148]
check0_6 = row[150]
if(check0_0 != ''):
total_checkins += int(check0_0)
if(check0_1 != ''):
total_checkins += int(check0_1)
if(check0_2 != ''):
total_checkins += int(check0_2)
if(check0_3 != ''):
total_checkins += int(check0_3)
if(check0_4 != ''):
total_checkins += int(check0_4)
if(check0_5 != ''):
total_checkins += int(check0_5)
if(check0_6 != ''):
total_checkins += int(check0_6)
# counts 1 am to 2 am checkins
check1_0 = row[55]
check1_1 = row[32]
check1_2 = row[57]
check1_3 = row[58]
check1_4 = row[59]
check1_5 = row[60]
check1_6 = row[61]
if(check1_0 != ''):
total_checkins += int(check1_0)
if(check1_1 != ''):
total_checkins += int(check1_1)
if(check1_2 != ''):
total_checkins += int(check1_2)
if(check1_3 != ''):
total_checkins += int(check1_3)
if(check1_4 != ''):
total_checkins += int(check1_4)
if(check1_5 != ''):
total_checkins += int(check1_5)
if(check1_6 != ''):
total_checkins += int(check1_6)
# counts 10 pm to 11 pm checkins
check22_0 = row[36]
check22_1 = row[37]
check22_2 = row[38]
check22_3 = row[39]
check22_4 = row[33]
check22_5 = row[34]
check22_6 = row[35]
if(check22_0 != ''):
total_checkins += int(check22_0)
if(check22_1 != ''):
total_checkins += int(check22_1)
if(check22_2 != ''):
total_checkins += int(check22_2)
if(check22_3 != ''):
total_checkins += int(check22_3)
if(check22_4 != ''):
total_checkins += int(check22_4)
if(check22_5 != ''):
total_checkins += int(check22_5)
if(check22_6 != ''):
total_checkins += int(check22_6)
# counts 11 pm to 12 am checkins
check23_0 = row[141]
check23_1 = row[140]
check23_2 = row[143]
check23_3 = row[142]
check23_4 = row[145]
check23_5 = row[144]
check23_6 = row[146]
if(check23_0 != ''):
total_checkins += int(check23_0)
if(check23_1 != ''):
total_checkins += int(check23_1)
if(check23_2 != ''):
total_checkins += int(check23_2)
if(check23_3 != ''):
total_checkins += int(check23_3)
if(check23_4 != ''):
total_checkins += int(check23_4)
if(check23_5 != ''):
total_checkins += int(check23_5)
if(check23_6 != ''):
total_checkins += int(check23_6)
check_in_business_id_for_late_night[temp_business_id] = total_checkins
def getCheckInCountsForRegular():
count = 0
with open('yelp_academic_dataset_checkin.csv', 'rU') as f:
reader = csv.reader(f)
next(reader, None)
for row in reader:
temp_business_id = row[14]
total_checkins = 0
# column for 6 pm to 7 pm
check18_0 = row[169]
check18_1 = row[168]
check18_2 = row[167]
check18_3 = row[166]
check18_4 = row[165]
check18_5 = row[164]
check18_6 = row[163]
if(check18_0 != ''):
total_checkins += int(check18_0)
if(check18_1 != ''):
total_checkins += int(check18_1)
if(check18_2 != ''):
total_checkins += int(check18_2)
if(check18_3 != ''):
total_checkins += int(check18_3)
if(check18_4 != ''):
total_checkins += int(check18_4)
if(check18_5 != ''):
total_checkins += int(check18_5)
if(check18_6 != ''):
total_checkins += int(check18_6)
# column for 7 pm to 8 pm
check19_0 = row[76]
check19_1 = row[77]
check19_2 = row[74]
check19_3 = row[75]
check19_4 = row[79]
check19_5 = row[80]
check19_6 = row[78]
if(check19_0 != ''):
total_checkins += int(check19_0)
if(check19_1 != ''):
total_checkins += int(check19_1)
if(check19_2 != ''):
total_checkins += int(check19_2)
if(check19_3 != ''):
total_checkins += int(check19_3)
if(check19_4 != ''):
total_checkins += int(check19_4)
if(check19_5 != ''):
total_checkins += int(check19_5)
if(check19_6 != ''):
total_checkins += int(check19_6)
# column for 8 pm to 9 pm
check20_0 = row[9]
check20_1 = row[10]
check20_2 = row[7]
check20_3 = row[8]
check20_4 = row[12]
check20_5 = row[13]
check20_6 = row[11]
if(check20_0 != ''):
total_checkins += int(check20_0)
if(check20_1 != ''):
total_checkins += int(check20_1)
if(check20_2 != ''):
total_checkins += int(check20_2)
if(check20_3 != ''):
total_checkins += int(check20_3)
if(check20_4 != ''):
total_checkins += int(check20_4)
if(check20_5 != ''):
total_checkins += int(check20_5)
if(check20_6 != ''):
total_checkins += int(check20_6)
# column for 9 pm to 10 pm
check21_0 = row[101]
check21_1 = row[100]
check21_2 = row[99]
check21_3 = row[98]
check21_4 = row[97]
check21_5 = row[96]
check21_6 = row[95]
if(check21_0 != ''):
total_checkins += int(check21_0)
if(check21_1 != ''):
total_checkins += int(check21_1)
if(check21_2 != ''):
total_checkins += int(check21_2)
if(check21_3 != ''):
total_checkins += int(check21_3)
if(check21_4 != ''):
total_checkins += int(check21_4)
if(check21_5 != ''):
total_checkins += int(check21_5)
if(check21_6 != ''):
total_checkins += int(check21_6)
check_in_business_id_for_regular[temp_business_id] = total_checkins
def numberOfBusinessesStaysOpenLate():
global canada_business_late_night, us_business_late_night, uk_business_late_night, german_business_late_night
global german_late_count, uk_late_count, us_late_count, canada_late_count
with open('yelp_academic_dataset_business.csv', 'rU') as f:
reader = csv.reader(f)
next(reader, None) # skipping the header
for row in reader:
temp_business_id = row[16]
temp_monday_closing = row[75]
temp_tuesday_closing = row[77]
temp_wednesday_closing = row[58]
temp_thursday_closing = row[47]
temp_friday_closing = row[41]
if( (temp_monday_closing == '0:00' or temp_monday_closing == '1:00' or temp_monday_closing == '2:00' or temp_monday_closing == '23:00') and (temp_tuesday_closing == '0:00' or temp_tuesday_closing == '1:00' or temp_tuesday_closing == '2:00' or temp_tuesday_closing == '23:00') and (temp_wednesday_closing == '0:00' or temp_wednesday_closing == '1:00' or temp_wednesday_closing == '2:00' or temp_wednesday_closing == '23:00') and (temp_thursday_closing == '0:00' or temp_thursday_closing == '1:00' or temp_thursday_closing == '2:00' or temp_thursday_closing == '23:00') and (temp_friday_closing == '0:00' or temp_friday_closing == '1:00' or temp_friday_closing == '2:00' or temp_friday_closing == '23:00') ):
if temp_business_id in us_business_late_night:
us_late_count += 1
elif temp_business_id in uk_business_late_night:
uk_late_count += 1
elif temp_business_id in german_business_late_night:
german_late_count += 1
elif temp_business_id in canada_business_late_night:
canada_late_count += 1
def main():
global canada_business_late_night, german_business_late_night, us_business_late_night, uk_business_late_night
global check_in_business_id_for_late_night, check_in_business_id_for_regular
global us_business_regular, uk_business_regular, german_business_regular, canada_business_regular
global german_late_count, uk_late_count, us_late_count, canada_late_count
us_count = 0
german_count = 0
uk_count = 0
canada_count = 0
getBusinessId()
getCheckInCountsForLateNight()
getCheckInCountsForRegular()
# assigning total late night check-ins based on business id's
for i in check_in_business_id_for_late_night.keys():
if(us_business_late_night.has_key(i)):
us_business_late_night[i] = check_in_business_id_for_late_night[i]
elif(uk_business_late_night.has_key(i)):
uk_business_late_night[i] = check_in_business_id_for_late_night[i]
elif(canada_business_late_night.has_key(i)):
canada_business_late_night[i] = check_in_business_id_for_late_night[i]
elif(german_business_late_night.has_key(i)):
german_business_late_night[i] = check_in_business_id_for_late_night[i]
# adding total late night check-ins
for i in us_business_late_night.keys():
us_count += us_business_late_night[i]
for i in german_business_late_night.keys():
german_count += german_business_late_night[i]
for i in canada_business_late_night.keys():
canada_count += canada_business_late_night[i]
for i in uk_business_late_night.keys():
uk_count += uk_business_late_night[i]
# pool = mp.Pool(processes=5)
# pool.map( findCountry, business_id_set )
#business_id_set[ business_id_set.keys()[0] ] = "Hello"
#print business_id_set
print
print "****************************"
print "Number of Restaurants in dataset by countries: "
print "****************************"
print "UK -> %d" % len(uk_business_late_night)
print "US -> %d" % len(us_business_late_night)
print "Canada -> %d" % len(canada_business_late_night)
print "Germany -> %d" % len(german_business_late_night)
print
print
print "****************************"
print "Number of Late Night check-ins (10 pm to 2 am) by countries:"
print "****************************"
print "UK -> %d" % uk_count
print "US -> %d" % us_count
print "Canada -> %d" % canada_count
print "German -> %d" % german_count
print
print
us_count = 0
german_count = 0
uk_count = 0
canada_count = 0
# assigning total regular check-ins based on business id's
for i in check_in_business_id_for_regular.keys():
if(us_business_regular.has_key(i)):
us_business_regular[i] = check_in_business_id_for_regular[i]
elif(uk_business_regular.has_key(i)):
uk_business_regular[i] = check_in_business_id_for_regular[i]
elif(canada_business_regular.has_key(i)):
canada_business_regular[i] = check_in_business_id_for_regular[i]
elif(german_business_regular.has_key(i)):
german_business_regular[i] = check_in_business_id_for_regular[i]
# adding total late night check-ins
for i in us_business_regular.keys():
us_count += us_business_regular[i]
for i in german_business_regular.keys():
german_count += german_business_regular[i]
for i in canada_business_regular.keys():
canada_count += canada_business_regular[i]
for i in uk_business_regular.keys():
uk_count += uk_business_regular[i]
print "****************************"
print "Number of Normal time check-ins (6 pm to 10 pm) by countries: "
print "****************************"
print "UK -> %d" % uk_count
print "US -> %d" % us_count
print "Canada -> %d" % canada_count
print "German -> %d" % german_count
print
print
numberOfBusinessesStaysOpenLate()
print "****************************"
print "Number of Restaurants stays open late (closes between 11 pm and 2 am) by countries: "
print "****************************"
print "UK -> %d" % uk_late_count
print "US -> %d" % us_late_count
print "Canada -> %d" % canada_late_count
print "German -> %d" % german_late_count
print
print
print "****************************"
main()
# with open('yelp_academic_dataset_user.csv', 'rU') as f:
# reader = csv.reader(f)
# for row in reader:
# user_id_set.add(row[16])
#for x in cities:
# writeFile.write(str(x) + "\n")
#writeFile.close()
#print business_id_set
# print "number of businesses in Canada: %d" % len(canada_business)
# print "****************************"
# print "number of businesses in UK: %d" % len(uk_business)
# print "****************************"
# print "number of businesses in Germany: %d" % len(german_business)
# print "****************************"
# print "number of businesses in US: %d" % len(us_business)
# print "****************************"
#print user_id_set | [
"asutreja@gmail.com"
] | asutreja@gmail.com |
d14bb95cdbd1c21c0a6b928b4d2cbb20b627f70f | 2c4013bf53e367f3bcb2ef3d6b0cf17bb76cd748 | /models/Settings.py | 750906a75058ea94768f00a5a71c4c0476706189 | [] | no_license | rexyan/Wedding | 26192a974cbadcd3332d0658323d4d9534ac3b01 | e09067af97561994471384ce204d5c6bc2c03f2a | refs/heads/master | 2021-03-30T20:33:37.899646 | 2018-07-27T14:29:51 | 2018-07-27T14:29:51 | 124,541,698 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,113 | py | # --*--coding:utf8--*--
from Base import *
import datetime
# 创建单表
class Setting(Base):
__tablename__ = 'setting' # 表名
SettingID = Column(Integer, primary_key=True, autoincrement=True)
WebStatus = Column(Boolean, default=True) # 网站开关
Logo = Column(String(100)) # 网站logo
WebKeywords = Column(String(100)) # 网站关键字
Webdescription = Column(String(100)) # 网站描述
LoginBanner = Column(String(100)) # 注册页Banner
def __repr__(self):
return "%s-%s" % (self.SettingID)
def to_json(self):
return {
'SettingID': self.SettingID,
'WebStatus': self.WebStatus,
'Logo': self.Logo,
'WebKeywords': self.WebKeywords,
'Webdescription': self.Webdescription,
}
def init_db():
Base.metadata.create_all(engine)
def drop_db():
Base.metadata.drop_all(engine)
def main(arg):
if arg == 1:
init_db()
elif arg == 0:
drop_db()
if __name__ == '__main__':
Session = sessionmaker(bind=engine)
session = Session()
main(1)
| [
"1572402228@qq.com"
] | 1572402228@qq.com |
ea6d92b489cd7d208a3b647ed79d296abbc43af3 | b2c364f5242c1a95ba6f7548131b62e0c333bebb | /python/Sudoku Solver.py | 25146179a3efde770f9de517f050368de079640f | [
"MIT"
] | permissive | luosch/leetcode | dc6e0cfbe90f8e003533b0d804c04449efbdf22f | 4d030d0b9d1958666abc3af33fc99fc73e1c9b2a | refs/heads/master | 2021-01-15T15:49:18.842847 | 2016-11-03T08:59:34 | 2016-11-03T08:59:34 | 41,963,573 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | class Solution(object):
def solveSudoku(self, board):
def isValid(row, col):
v = board[row][col];
board[row][col] = 'K'
for i in range(9):
if board[i][col] == v:
return False
if board[row][i] == v:
return False
for i in range(3):
for j in range(3):
if board[row // 3 * 3 + i][col // 3 * 3 + j] == v:
return False
board[row][col] = v
return True
def dfs(row, col):
if row >= 9:
return True
nextRow = row + 1 if col >= 8 else row
nextCol = col + 1 if col <= 7 else 0
if board[row][col] == '.':
for k in '123456789':
board[row][col] = k
if isValid(row, col) and dfs(nextRow, nextCol):
return True
board[row][col] = '.'
return False
else:
return dfs(nextRow, nextCol)
dfs(0, 0)
| [
"me@lsich.com"
] | me@lsich.com |
0c6ddfa760002446e87d77184a860cdc741d2a36 | 1e9aa29eff7426ca5daa91172bf3eb8a4fce197a | /Django/bigdatamatica/manage.py | 16e6257568e557aba3254a1b9c1736878cfc9c88 | [] | no_license | bigdatamatic/Analytics | 8d7ba42d03bfd8cbaba3098d0bda013b8bde3f05 | c60afe873fc54783a2a04786cfa6de6772cba97c | refs/heads/master | 2023-04-30T19:56:47.853201 | 2019-09-18T05:39:52 | 2019-09-18T05:39:52 | 177,970,701 | 0 | 6 | null | 2023-04-21T20:31:30 | 2019-03-27T10:26:01 | Jupyter Notebook | UTF-8 | Python | false | false | 633 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bigdatamatica.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"tanya2911dixit@gmail.com"
] | tanya2911dixit@gmail.com |
ce370ccbf928822cc6b71d9384369bbb1ba2af0d | 5bdc4f88b0825593e5fd52477112fb8ff9cb9d6b | /sparse/util/util.py | aef978b3429068c0199c91b899a6bbef8493ba95 | [
"MIT"
] | permissive | ruiatelsevier/sparse-hyper | 5a59bf201dde74f3410d7371bc93e5b99b3c6425 | b38f537b149a9940b46bb90c82e0f8b1552c471e | refs/heads/master | 2020-07-30T23:38:45.476081 | 2019-09-23T12:28:57 | 2019-09-23T12:28:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,968 | py | import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.patches import Circle, Wedge, Polygon, Ellipse, Rectangle
from matplotlib.collections import PatchCollection
from matplotlib.axes import Axes
import os, errno, random, time, string, sys
import torch
from torch import nn
from torch import FloatTensor, LongTensor
from torch.autograd import Variable
from torch.utils.data import sampler, dataloader
from collections.abc import Iterable
import torchvision
from collections import OrderedDict
import subprocess
import numpy as np
import math
from enum import Enum
tics = []
DEBUG = False
class Bias(Enum):
"""
"""
# No bias is used.`c
NONE = 1
# The bias is returned as a single dense tensor of floats.
DENSE = 2
# The bias is returned in sparse format, in the same way as the weight matrix is.
SPARSE = 3
def kl_loss(zmean, zlsig):
"""
Computes the KL loss term for a VAE.
:param zmean: batch of z means
:param zlsig: batch of z sigma vectors
:return:
"""
b, l = zmean.size()
kl = 0.5 * torch.sum(zlsig.exp() - zlsig + zmean.pow(2) - 1, dim=1)
assert kl.size() == (b,)
return kl
def kl_batch(batch):
"""
Computes the KL loss between the standard normal MVN and a diagonal MVN fitted to the batch
:param batch:
:return:
"""
b, d = batch.size()
mean = batch.mean(dim=0, keepdim=True)
batch = batch - mean
diacov = torch.bmm(batch.view(d, 1, b), batch.view(d, b, 1)).squeeze() / (b - 1)
logvar = torch.log(diacov)
return -0.5 * torch.sum(1 + logvar - mean.pow(2) - logvar.exp())
def vae_sample(zmean, zlsig, eps=None):
b, l = zmean.size()
if eps is None:
eps = torch.randn(b, l, device='cuda' if zmean.is_cuda else 'cpu')
eps = Variable(eps)
return zmean + eps * (zlsig * 0.5).exp()
def tic():
tics.append(time.time())
def toc():
if len(tics)==0:
return None
else:
return time.time()-tics.pop()
def norm(x):
"""
Normalize a tensor to a tensor with unit norm (treating first dim as batch dim)
:param x:
:return:
"""
b = x.size()[0]
n = torch.norm(x.view(b, -1), p=2, dim=1)
while len(n.size()) < len(x.size()):
n = n.unsqueeze(1)
n.expand_as(x)
return x/n
def makedirs(directory):
"""
Ensure that all directories in the given path exist.
:param directory:
"""
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def sample(collection, k, required):
"""
Sample, without replacement, k elements from 'collection', ensuring that 'required' are always contained in the
sample (but never twice).
currently only works if collection and required contain only unique elements
:param k:
:param collection:
:param required:
:return:
"""
if(k + len(required) > len(collection)):
# use rejection sampling
sample = list(collection)
while len(sample) > k:
ri = random.choice(range(len(sample)))
if sample[ri] not in required:
del(sample[ri])
return sample
else:
required = set(required)
sample0 = set(random.sample(collection, k + len(required)))
sample = list(sample0 - required)
while len(sample) > k - len(required):
ri = random.choice(range(len(sample)))
del(sample[ri])
sample.extend(required)
return sample
#
# if __name__ == '__main__':
#
# print('.')
# print(sample(range(6), 5, [0, 1, 2]))
# print('.')
# print(sample(range(100), 6, [0, 1, 2]))
# print(sample(range(100), 6, [0, 1, 2]))
# print(sample(range(100), 6, [0, 1, 2]))
# print('.')
def sparsemult(use_cuda):
return SparseMultGPU.apply if use_cuda else SparseMultCPU.apply
class SparseMultCPU(torch.autograd.Function):
"""
Sparse matrix multiplication with gradients over the value-vector
Does not work with batch dim.
"""
@staticmethod
def forward(ctx, indices, values, size, vector):
# print(type(size), size, list(size), intlist(size))
# print(indices.size(), values.size(), torch.Size(intlist(size)))
matrix = torch.sparse.FloatTensor(indices, values, torch.Size(intlist(size)))
ctx.indices, ctx.matrix, ctx.vector = indices, matrix, vector
return torch.mm(matrix, vector.unsqueeze(1))
@staticmethod
def backward(ctx, grad_output):
grad_output = grad_output.data
# -- this will break recursive autograd, but it's the only way to get grad over sparse matrices
i_ixs = ctx.indices[0,:]
j_ixs = ctx.indices[1,:]
output_select = grad_output.view(-1)[i_ixs]
vector_select = ctx.vector.view(-1)[j_ixs]
grad_values = output_select * vector_select
grad_vector = torch.mm(ctx.matrix.t(), grad_output).t()
return None, Variable(grad_values), None, Variable(grad_vector)
class SparseMultGPU(torch.autograd.Function):
"""
Sparse matrix multiplication with gradients over the value-vector
Does not work with batch dim.
"""
@staticmethod
def forward(ctx, indices, values, size, vector):
# print(type(size), size, list(size), intlist(size))
matrix = torch.cuda.sparse.FloatTensor(indices, values, torch.Size(intlist(size)))
ctx.indices, ctx.matrix, ctx.vector = indices, matrix, vector
return torch.mm(matrix, vector.unsqueeze(1))
@staticmethod
def backward(ctx, grad_output):
grad_output = grad_output.data
# -- this will break recursive autograd, but it's the only way to get grad over sparse matrices
i_ixs = ctx.indices[0,:]
j_ixs = ctx.indices[1,:]
output_select = grad_output.view(-1)[i_ixs]
vector_select = ctx.vector.view(-1)[j_ixs]
grad_values = output_select * vector_select
grad_vector = torch.mm(ctx.matrix.t(), grad_output).t()
return None, Variable(grad_values), None, Variable(grad_vector)
def nvidia_smi():
command = 'nvidia-smi'
return subprocess.check_output(command, shell=True)
def orth_loss(batch_size, x_size, model, use_cuda):
"""
:param batch_size:
:param x_size:
:param model:
:param use_cuda:
:return:
"""
x_size = (batch_size,) + x_size
x1o, x2o = torch.randn(x_size), torch.randn(x_size)
# normalize to unit tensors
x1o, x2o = norm(x1o), norm(x2o)
if use_cuda:
x1o, x2o = x1o.cuda(), x2o.cuda()
x1o, x2o = Variable(x1o), Variable(x2o)
y1 = model(x1o)
y2 = model(x2o)
x1 = x1o.view(batch_size, 1, -1)
x2 = x2o.view(batch_size, 1, -1)
y1 = y1.view(batch_size, 1, -1)
y2 = y2.view(batch_size, 1, -1)
print('x1 v y1', x1[0, :], y1[0, ])
xnorm = torch.bmm(x1, x2.transpose(1, 2))
ynorm = torch.bmm(y1, y2.transpose(1, 2))
loss = torch.sum(torch.pow((xnorm - ynorm), 2)) / batch_size
return loss, x1o, x2o
def bmultinomial(mat, num_samples=1, replacement=False):
"""
Take multinomial samples from a batch of matrices with multinomial parameters on the
rows
:param mat:
:param num_samples:
:param replacement:
:return:
"""
batches, rows, columns = mat.size()
mat = mat.view(1, -1, columns).squeeze(0)
sample = torch.multinomial(mat, num_samples, replacement)
return sample.view(batches, rows, num_samples), sample
def bsoftmax(input):
b, r, c = input.size()
input = input.view(1, -1, c)
input = nn.functional.softmax(input.squeeze(0)).unsqueeze(0)
return input.view(b, r, c)
def contains_nan(input):
if (not isinstance(input, torch.Tensor)) and isinstance(input, Iterable):
for i in input:
if contains_nan(i):
return True
return False
else:
return bool((input != input).sum() > 0)
#
# if __name__ == '__main__':
#
#
# i = torch.LongTensor([[0, 16, 1],
# [2, 0, 2]])
# v = torch.FloatTensor([1, 1, 1])
#
# matrix = torch.sparse.FloatTensor(i, v, torch.Size((16, 16)))
def od(lst):
od = OrderedDict()
for i, elem in enumerate(lst):
od[str(i)] = elem
return od
class Lambda(nn.Module):
def __init__(self, lambd):
super(Lambda, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class Debug(nn.Module):
def __init__(self, lambd):
super(Debug, self).__init__()
self.lambd = lambd
def forward(self, x):
self.lambd(x)
return x
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
def flatten(input):
return input.view(input.size(0), -1)
class NoActivation(nn.Module):
def forward(self, input):
return input
def prod(tuple):
result = 1
for v in tuple:
result *= v
return result
def add_noise(input, std=0.1):
"""
In-place
:param input:
:param std:
:return:
"""
noise = torch.cuda.FloatTensor(input.size()) if input.is_cuda else FloatTensor(input.size())
noise.normal_(std=std)
return input + noise
def corrupt_(input, prop=0.3):
"""
Sets a random proportion of the input to zero
:param input:
:param prop:
:return:
"""
t0 = time.time()
FT = torch.cuda.FloatTensor if input.is_cuda else torch.FloatTensor
mask = FT(input.size())
mask.uniform_()
mask.sub_(prop).ceil_()
input.mul_(mask)
def rstring(n):
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=n))
def count_params(model):
sum = 0
for tensor in model.parameters():
sum += prod(tensor.size())
return sum
def logit(x):
"""
Inverse of the sigmoid function.
:param x:
:return:
"""
if type(x) == float:
return math.log(x / (1 - x))
return torch.log(x/ (1 - x))
def inv(i, mx=28):
"""
Inverse of the sigmoid-based scaling function.
:param i:
:param mx: Max value. Should broadcast
:return:
"""
sc = (i/(mx-1)) * 0.9999 + 0.00005
return logit(sc)
def sigmoid(x):
return 1 / (1 + math.exp(-x))
class ChunkSampler(sampler.Sampler):
"""Samples elements sequentially from some offset, using a fixed permutation
initial source: https://github.com/pytorch/vision/issues/168
Arguments:
num_samples: # of desired datapoints
start: offset where we should start selecting from
"""
def __init__(self, start, num, total, seed = 0):
self.start = start
self.num = num
self.random = random.Random(seed)
self.l = list(range(total))
self.random.shuffle(self.l)
def __iter__(self):
return iter(self.l[self.start : self.start + self.num])
def __len__(self):
return self.num
def bmult(width, height, num_indices, batchsize, use_cuda):
"""
?
:param width:
:param height:
:param num_indices:
:param batchsize:
:param use_cuda:
:return:
"""
bmult = torch.cuda.LongTensor([height, width]) if use_cuda else LongTensor([height, width])
m = torch.cuda.LongTensor(range(batchsize)) if use_cuda else LongTensor(range(batchsize))
bmult = bmult.unsqueeze(0).unsqueeze(0)
m = m.unsqueeze(1).unsqueeze(1)
bmult = bmult.expand(batchsize, num_indices, 2)
m = m.expand(batchsize, num_indices, 2)
return m * bmult
def intlist(tensor):
"""
A slow and stupid way to turn a tensor into an iterable over ints
:param tensor:
:return:
"""
if type(tensor) is list:
return tensor
tensor = tensor.squeeze()
assert len(tensor.size()) == 1
s = tensor.size()[0]
l = [None] * s
for i in range(s):
l[i] = int(tensor[i])
return l
def totensor(dataset, batch_size=512, shuffle=True, maxclass=None):
"""
Takes a dataset and loads the whole thing into a tensor
:param dataset:
:return:
"""
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=2)
index = 0
for i, batch in enumerate(loader):
batch, labels = batch[0], batch[1]
if maxclass is not None:
batch = batch[labels <= maxclass]
if i == 0:
size = list(batch.size())
size[0] = len(dataset)
result = torch.zeros(*size)
result[index:index+batch.size(0)] = batch
index += batch.size(0)
return result
class Reshape(nn.Module):
def __init__(self, shape):
super().__init__()
self.shape = shape
def forward(self, input):
return input.view( (input.size(0),) + self.shape)
def normalize(indices, values, size, row=True, cuda=None, epsilon=0.00000001):
"""
Row or column normalizes a sparse matrix, defined by the given indices and values. Expects a batch dimension.
:param indices: (b, k, 2) LongTensor of index tuples
:param values: length-k vector of values
:param size: dimensions of the matrix
:param row: If true, we normalize the rows, otherwise the columns
:return: The normalized values (the indices stay the same)
"""
if cuda is None:
cuda = indices.is_cuda
dv = 'cuda' if cuda else 'cpu'
spm = sparsemult(cuda)
b, k, r = indices.size()
assert r == 2
# unroll the batch dimension
# (think if this as putting all the matrices in the batch along the diagonal of one huge matrix)
ran = torch.arange(b, device=dv).unsqueeze(1).expand(b, 2)
ran = ran * torch.tensor(size, device=dv).unsqueeze(0).expand(b, 2)
offset = ran.unsqueeze(1).expand(b, k, 2).contiguous().view(-1, 2)
indices = indices.view(-1, 2)
indices = indices + offset
values = values.view(-1)
if row:
ones = torch.ones((b*size[1],), device=dv)
else:
ones = torch.ones((b*size[0],), device=dv)
# transpose the matrix
indices = torch.cat([indices[:, 1:2], indices[:, 0:1]], dim=1)
sums = spm(indices.t(), values, torch.tensor(size, device=dv)*b, ones) # row/column sums
# select the sums corresponding to each index
div = torch.index_select(sums, 0, indices[:, 0]).squeeze() + epsilon
return (values/div).view(b, k)
# if __name__ == "__main__":
# tind = torch.tensor([[[0, 0],[0, 1], [4, 4], [4, 3]], [[0, 1],[1, 0],[0, 2], [2, 0]]])
# tv = torch.tensor([[0.5, 0.5, 0.4, 0.6], [0.5, 1, 0.5, 1]])
#
# print(normalize(tind, tv, (5, 5)))
# print(normalize(tind, tv, (5, 5), row=False))
def duplicates(tuples):
"""
Takes a tensor of integer tuples, and for each tuple that occurs multiple times marks all but one of the occurences
as duplicate.
:param tuples: A (batch, k, r)-tensor of containing a batch of k r-dimensional integer tuples
:return: A size (batch, k) byte tensor. When used as a mask, this masks out all duplicates.
"""
dv = 'cuda' if tuples.is_cuda else 'cpu'
b, k, r = tuples.size()
unique = nunique(tuples)
sorted, sort_idx = torch.sort(unique, dim=1)
_, unsort_idx = torch.sort(sort_idx, dim=1)
mask = sorted[:, 1:] == sorted[:, :-1]
mask = torch.cat([torch.zeros(b, 1, dtype=torch.bool, device=dv), mask], dim=1) # changed type unit to bool
return torch.gather(mask, 1, unsort_idx)
def nduplicates(tuples):
"""
Takes a tensor of integer tuples, and for each tuple that occurs multiple times marks all
but one of the occurrences as duplicate.
:param tuples: A (..., k, r)-tensor of containing a batch of k r-dimensional integer tuples
:return: A size (..., k) byte tensor. When used as a mask, this masks out all duplicates.
"""
init, k, r = tuples.size()[:-2], tuples.size()[-2], tuples.size()[-1]
tuples = tuples.view(-1, k, r)
mask = duplicates(tuples)
return mask.view(*init, k)
def scatter_imgs(latents, images, size=None, ax=None, color=None, alpha=1.0):
assert(latents.shape[0] == images.shape[0])
if ax is None:
fig = plt.figure(figsize=(16, 16))
ax = fig.add_subplot(111)
ax.set_xlim(0, 1e-7)
ax.set_ylim(0, 1e-7)
if color is None:
color = np.asarray([0.0, 0.0, 0.0, 1.0])
else:
color = np.asarray(color)
# print(color)
xmn, ymn = np.min(latents, axis=0)
xmx, ymx = np.max(latents, axis=0)
oxmn, oxmx = ax.get_xlim()
oymn, oymx = ax.get_ylim()
ax.set_xlim(min(oxmn, xmn), max(oxmx, xmx))
ax.set_ylim(min(oymn, ymn), max(oymx, ymx))
# print(ax.get_xlim(), ax.get_ylim())
if size is None:
size = (xmx - xmn)/np.sqrt(latents.shape[0])
size *= 0.5
n, h, w, c = images.shape
aspect = h/w
images = images * (1.0 - color[:3])
images = 1.0 - images
for i in range(n):
x, y = latents[i, 0:2]
im = images[i, :]
ax.imshow(im, extent=(x, x + size, y, y + size*aspect), alpha=alpha)
ax.scatter(latents[:, 0], latents[:, 1], linewidth=0, s=2, color=color)
return ax, size
def linmoid(x, inf_in, up):
"""
Squeeze the given input into the range (0, up). All points are translated linearly, except those above and below the
inflection points (on the input range), which are squeezed through a sigmoid function.
:param input:
:param inflections:
:param range:
:return:
"""
ilow = x < inf_in[0]
ihigh = x > inf_in[1]
# linear transform
s = (up - 1)/(inf_in[1] - inf_in[0])
y = x * s + 0.5 - inf_in[0] * s
scale = s * 4
y[ilow] = torch.sigmoid((x[ilow] - inf_in[0])*scale)
y[ihigh] = torch.sigmoid((x[ihigh] - inf_in[1])*scale) - 0.5 + (up - 0.5)
return y
# if __name__ == "__main__":
# x = torch.linspace(-0.5, 1.5, 1000)
# y = linmoid(x, inf_in=(0.25, 0.75), up=3)
#
# plt.scatter(x.numpy(), y.numpy(), s=2)
# plt.ylim([0, 3])
#
# clean()
# plt.savefig('test_linmoid.png')
def split(offset, depth):
dv = 'cuda' if offset.is_cuda else 'cpu'
b, n, s = offset.size()
bn = b*n
offset = offset.view(bn, s)
numbuckets = 2 ** depth # number of buckets in the input
bsize = s // numbuckets # size of the output buckets
lo = torch.arange(numbuckets, device=dv, dtype=torch.long) * bsize # minimum index of each downbucket
lo = lo[None, :, None].expand(bn, numbuckets, bsize).contiguous().view(bn, -1)
hi = torch.arange(numbuckets, device=dv, dtype=torch.long) * bsize + bsize//2 # minimum index of each upbucket
hi = hi[None, :, None].expand(bn, numbuckets, bsize).contiguous().view(bn, -1)
upchoices = offset.long()
downchoices = 1 - upchoices
numupchoices = upchoices.view(bn, numbuckets, bsize).cumsum(dim=2).view(bn, -1)
numdownchoices = downchoices.view(bn, numbuckets, bsize).cumsum(dim=2).view(bn, -1)
result = torch.zeros(bn, s, dtype=torch.long, device=dv)
# print(result.dtype, upchoices.dtype, hi.dtype, numupchoices.dtype)
result = result + upchoices * (hi + numupchoices - 1)
result = result + downchoices * (lo + numdownchoices - 1)
# If offset is not arranged correctly (equal numbers of ups and downs per bucket)
# we get a non-permutation. This is fine, but we must clamp the result to make sure the
# indices are still legal
result = result.clamp(0, s-1)
return result.view(b, n, s)
def sample_offsets(batch, num, size, depth, cuda=False):
dv = 'cuda' if cuda else 'cpu'
numbuckets = 2 ** depth # number of buckets in the input
bsize = size // numbuckets # size of the input buckets
ordered = torch.tensor([0,1], dtype=torch.uint8, device=dv)[None, None, None, :, None].expand(batch, num, numbuckets, 2, bsize // 2)
ordered = ordered.contiguous().view(batch, num, numbuckets, bsize)
# shuffle the buckets
ordered = ordered.view(batch * num * numbuckets, bsize)
ordered = shuffle_rows(ordered)
ordered = ordered.view(batch, num, numbuckets, bsize)
return ordered.contiguous().view(batch, num, -1)
shufflecache = {}
cache_size = 500_000
def shuffle_rows(x):
r, c = x.size()
if c not in shufflecache:
cached = torch.zeros(cache_size, c, dtype=torch.long, device='cpu')
for i in range(cache_size):
cached[i, :] = torch.randperm(c)
shufflecache[c] = cached
cache = shufflecache[c]
rows = random.sample(range(cache_size), k=r)
sample = cache[rows, :]
if x.is_cuda:
sample = sample.cuda()
out = x.gather(dim=1, index=sample)
if x.is_cuda:
out = out.cuda()
return out
# def bunique(tuples):
# """
# Like unique/2, but for batched tuples.
#
# :param tuples: A (b, k, d) tensor of a batch of (k, d) matrices containing d dimensional integer tuples
# :return: A (b, k, d, 1) tensor
# """
#
# b, k, d = tuples.size()
# tuples = tuples.view(b * k, d)
#
# un = unique(tuples)
#
# return un.view(b, k)
def nunique(tuples):
"""
:param tuples: A (..., d) tensor containing d dimensional integer tuples
:return: A (..., 1) tensor containing a unique single integer for every integer tuple
"""
init, d = tuples.size()[:-1], tuples.size()[-1]
tuples = tuples.view(-1, d)
un = unique(tuples)
return un.view(*init)
def unique(tuples):
"""
Takes a (b, s)-matrix and returns a (b, 1)-matrix with a unique integer for each row.
Uses the cantor tuple function: https://en.wikipedia.org/wiki/Pairing_function#Cantor_pairing_function
:param tuples: A matrix of size (b, s)
:return: A matrix of size (b, 1).
"""
b, s = tuples.size()
if s == 1:
return tuples
if s == 2:
k1, k2 = tuples[:, 0], tuples[:, 1]
res = ((k1 + k2) * (k1 + k2 + 1)) / 2 + k2
return res[:, None]
sub = unique(tuples[:, 1:])
res = torch.cat([tuples[:, 0:1], sub], dim=1)
return unique(res)
def xent(out, tgt):
"""
Binary cross-entropy. Manual implementation so we get gradient over both inputs
:param out:
:param tgt:
:return:
"""
assert out.size() == tgt.size()
out = out.clamp(0, 1)
tgt = tgt.clamp(0, 1)
return - tgt * (out + 1e-10).log() - (1.0 - tgt) * (1.0 - out + 1e-10).log()
if __name__ == '__main__':
#
# size = 8
# offset = torch.tensor([1, 1, 0, 1, 1, 0, 0, 0]).byte()
# offset = torch.tensor([[0, 0, 1, 0, 1, 1, 1, 0], [0, 1, 0, 1, 0, 1, 1, 0]]).byte()
offset = torch.tensor([[0, 1, 1, 0]]).byte()
indices = split(offset[:, None, :], 0)
print(indices)
#
# # print(sample_offsets(3, 4, 16, 3))
# #
# # print(unique(torch.tensor( [[1,2,3,4],[4,3,2,1],[1,2,3,4]] )))
# #
# #
# indices = torch.tensor([[[0, 0], [1, 1]], [[0, 1], [1, 0]]])
# values = torch.tensor([[1.0, 1.0], [1.0, 1.0]])
# inputs = torch.tensor([[[1.0, 2.0, 2.0, 2.0, 2.0], [3.0, 4.0, 2.0, 2.0, 2.0]], [[1.0, 2.0, 2.0, 2.0, 2.0], [3.0, 4.0, 4.0, 4.0, 4.0]]])
#
# print(inputs.size())
#
# print(batchmm(indices, values, (2,2), inputs))
def wrapmod(x, mod):
neg = x < 0.0
y = x.fmod(mod)
y[neg] = mod + y[neg]
return y
def interpolation_grid(size=(10, 10)):
"""
Returns an (h, v, 4) grid, where each point produces a weighted combination of the
four corner points. Taking the convex combination of tensors using these factors, will
result in a linear interpolation grid.
Corner points are enumerated in clockwise fashion, starting top left.
:param size: h, v
:return:
"""
h, v = size
g1, g2 = torch.meshgrid((torch.linspace(0, 1, h), torch.linspace(0, 1, v)))
g1, g2 = g1[:, :, None], g2[:, :, None]
p1, p2 = 1.0 - g1, 1.0 - g2
return torch.cat([p1*p2, p1*g2, g1*g2, g1*p2], dim=2)
def unsqueezen(input, n):
"""
Adds n singular dimensions at the start of the
:param input:
:param n:
:return:
"""
for _ in range(n):
input = input.unsqueeze(0)
return input
class CConv2d(nn.Module):
"""
Implementation of the CoordConv layer from https://arxiv.org/abs/1807.03247
"""
def __init__(self, in_channels, out_channels, kernel_size, res=None, stride=1, padding=0, dilation=1, groups=1, bias=True):
super().__init__()
# layer to which we'll defer the actual convolution
self.master = nn.Conv2d(in_channels + 2, out_channels, kernel_size, stride, padding, dilation, groups, bias)
if res is None:
self.coords = None
else:
self.register_buffer(coordinates(res), 'coords')
def forward(self, x):
b, c, h, w = x.size()
cuda = x.is_cuda
# get the coordinate channels
if self.coords is None:
coords = coordinates(x.size()[-2:], cuda)
else:
coords = self.coords
bcoords = coords[None, :, :, :].expand(b, 2, h, w)
x = torch.cat([bcoords, x], dim=1)
return self.master(x)
def coordinates(res, cuda=False):
"""
Compute the coordinate channels for a given resolution.
:param res:
:return:
"""
dv = 'cuda' if cuda else 'cpu'
h, w = res
hvec, wvec = torch.arange(h, device=dv, dtype=torch.float), torch.arange(w, device=dv, dtype=torch.float)
hvec, wvec = hvec / (h - 1), wvec / (w - 1)
hmat, wmat = hvec[None, :, None].expand(1, h, w), wvec[None, None, :].expand(1, h, w)
return torch.cat([hmat, wmat], dim=0).to(torch.float)
def d(tensor=None):
"""
Returns a device string either for the best available device,
or for the device corresponding to the argument
:param tensor:
:return:
"""
if tensor is None:
return 'cuda' if torch.cuda.is_available() else 'cpu'
return 'cuda' if tensor.is_cuda else 'cpu'
def here(subpath=None):
"""
:return: the path in which the package resides (the directory containing the 'sparse' dir)
"""
if subpath is None:
return os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
return os.path.abspath(os.path.join(os.path.dirname(__file__), '../..', subpath))
def flip(x):
"""
Takes a batch of matrix indices (continuous or discrete, can be negative) and returns a version
where anything above the diagonal is flipped to be below the diagonal.
The last dimension should be two, any preceding dimensions are taken to be bacth dimensions
:param x: The batch of matrices to flip
:return: The flipped matrices
"""
assert x.size(-1) == 2
bdims = x.size()[:-1]
x = x.view(-1, 2)
toflip = x[:, 0] < x[:, 1]
t = x[:, 0].clone() # store 0 indices temporarily
y = x.clone()
y[toflip, 0] = x[toflip, 1].clone()
y[toflip, 1] = t[toflip]
return y.view(*(bdims + (2,))) | [
"git@peterbloem.nl"
] | git@peterbloem.nl |
131185faeaed07f53122ddc91c140c97e2f749e0 | 59c29eda5bc0aa95ffd902bc8f2d1ff2fa936344 | /manage.py | c77b033db307bbdcb193578141b28e18bafeb460 | [] | no_license | jhashuva/aliphotography | d1f24e960c61722af2b2a17090c2c66dfde97b1c | f7815f2cd15674cc948ccdc556875f67b9228641 | refs/heads/master | 2020-05-16T03:43:49.505893 | 2019-04-25T12:51:29 | 2019-04-25T12:51:29 | 178,548,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'aliphotography.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"joshua537.nit@gmail.com"
] | joshua537.nit@gmail.com |
d3f360ddaafa4fd2662763c282ca04e6e5931be0 | cefdc52acb3460a1cb5bcb081962899e3bdce2bd | /nomadgram/users/models.py | 8b3c9715c5f9202944d38dcc0e792651a4d333a5 | [
"MIT"
] | permissive | jungyongho/jyhgram | b0a66b04f623dd79944bbd12721729f170eabd62 | cd473b39bcbc041bfaa80e58154cc5f19ecbe8b9 | refs/heads/master | 2020-03-23T04:10:43.563896 | 2018-07-25T09:01:25 | 2018-07-25T09:01:25 | 141,068,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | from django.contrib.auth.models import AbstractUser
from django.db.models import CharField
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.db import models
class User(AbstractUser):
GENDER_CHOICES = (
('male','Male'),
('female','Female'),
('not-specified', 'Not specified')
)
# First Name and Last Name do not cover name patterns
# around the globe.
name = models.CharField(_("Name of User"), blank=True, max_length=255)
website = models.URLField(null=True)
bio = models.TextField(null=True)
phone = models.CharField(max_length=140, null=True)
gender = models.CharField(max_length=80, choices=GENDER_CHOICES, null=True)
def get_absolute_url(self):
return reverse("users:detail", kwargs={"username": self.username})
| [
"jyh8811@greencar.co.kr"
] | jyh8811@greencar.co.kr |
2f9077c54ad18d52e47052d9996bbb4f02cf8bde | 19a3177f0018b6a4c28be339860223133e26b892 | /setstartingroom.py | 0e993017b5e7c0bc9992cbc6d96de2e33cf8274e | [] | no_license | jddaly2011/adventure | 994474675ffa6c3d5782ae4739c0b917d5681a6a | d9462a9896271dc0553d4fe4eae22b7ea29a3d1c | refs/heads/master | 2021-01-10T08:59:55.133437 | 2016-01-03T03:04:30 | 2016-01-03T03:04:30 | 46,602,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 992 | py | #!/usr/bin/python
import os
import re
import sys
argnum = len(sys.argv)
if argnum < 2:
print "Usage: replacetext.py startingroom"
sys.exit(1)
oldtext = "if tile_name ==.*:"
newtext = "if tile_name == '{}':".format(sys.argv[1])
for x in range(1, argnum):
filename = "world.py"
f = open(filename, "r")
currfile = f.read()
f.close()
newfile =[]
currfile = currfile.split("\n")
# if any(oldtext in s for s in currfile):
# print found
# for line in currfile:
# line = re.sub(oldtext, newtext, line)
# newfile.append(line)
# f = open(filename, "w")
# for line in newfile:
# f.write(line)
# f.write("\n")
# f.close()
for line in currfile:
line = re.sub(oldtext, newtext, line)
newfile.append(line)
f = open(filename, "w")
for line in newfile:
f.write(line)
f.write("\n")
f.close()
| [
"jddaly@gmail.com"
] | jddaly@gmail.com |
55fd503f8e7c64733b71f57a6c82e04276d14313 | 4277900926074ee4d30337a1a48771d8f8cb04e0 | /nltk_utils.py | 60ae0d52aa7c68259d36aae41066717ae535be4c | [] | no_license | A-Haythem/PCD | dd21fd859eb71806cf2ee1d4874d7bfaf67a273c | 4335b21fd361106df5a52446982892210c338bcf | refs/heads/main | 2023-03-25T04:45:00.610362 | 2021-03-22T21:21:08 | 2021-03-22T21:21:08 | 350,491,922 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | import nltk
import numpy as np
#nltk.download('punkt')
from nltk.stem.porter import PorterStemmer
stemmer=PorterStemmer()
#la technique de tokenization sert à éclater les mots d'une phrase dans un tableau
def tokenize(sentence):
return nltk.word_tokenize(sentence)
#la technique de stemming sert à éliminer les terminaison du genre ("ing","er" etc...)
def stem(word):
return stemmer.stem(word.lower())
#la technique bag of words exp :
"""
phrase = #["hello", "how", "are", "you"]
liste_des_mots = #["hi", "hello", "I", "you", "bye", "thank", "cool"]
bag_of_words = [ 0 , 1 , 0 , 1 , 0 , 0 , 0]
"""
def bag_of_words(tokenized_sentence,words):
# Appliquer le "stemming" pour chaque mot
sentence_words = [stem(word) for word in tokenized_sentence]
# initialisation du tableau par des 0
bag = np.zeros(len(words), dtype=np.float32)
for i, mot in enumerate(words):
if mot in sentence_words:
bag[i] = 1
return bag
| [
"noreply@github.com"
] | A-Haythem.noreply@github.com |
40103f9c50be40567a932b7e317bc3efce5720da | 7fa7e018375fc7667bc9bcc68e17e3d9bdb9287f | /Python/Python-随机车牌号/main.py | 29851bd9f27a18e417bd78f955c3762f3a2c4d0b | [] | no_license | Coder-Liuu/Lesson_design_for_Freshmen | ffb22e7f2fed9d3f8835538543fdd86c248b6383 | 99cc7d24a67802d3af50c123b30ed3412ae7a554 | refs/heads/main | 2023-01-24T13:12:19.612684 | 2020-12-07T07:32:12 | 2020-12-07T07:32:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,702 | py | import random
def pureNumber():
print("Ohhhhhhhhhh!")
plateNumber = str(random.randint(100000,999999))[1:]
print("这是你随机产生的车牌号!")
print("------>",plateNumber)
return plateNumber
def HanziNumber():
print("Ohhhhhhhhhh!")
plateNumber = str(random.randint(1000,9999))[1:]
char1='ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'
ans =""
for i in range(5):
ans2 = random.randint(0,len(char1)-1)
ans += random.choice(char1[ans2])
print("这是你随机产生的车牌号!")
print("------>",ans)
return ans
def main():
print("------欢迎来到车牌号挑选公司-----")
print("接下来你有三次挑选的机会!")
print("三次不选,你将没有车牌号.")
for i in range(3):
print("---------------%d-----------------"%(i+1))
print("1.纯数字")
print("2.数字字母相结合")
choose = int(input("你要想的车牌的类型:"))
if(choose == 1):
car_number = pureNumber()
elif(choose == 2):
car_number = HanziNumber()
else:
print("输入错误,你浪费了一次机会")
ans = input("你是否想要这个车牌号?(回答Yes 或者 No)")
if(ans == "Yes"):
print("你最终的车牌号是:",car_number)
print("-----------欢迎下次在来----------")
return
print("---------------------------------\n\n\n")
print("太贪心,三次车牌号你都没有选,你没有车牌号了")
print("-----------欢迎下次在来----------")
if __name__ == "__main__":
main()
| [
"2471001205@qq.com"
] | 2471001205@qq.com |
ffa79d54ca7d57287dacbec4dc59b66cbecabd2b | abfdbe69b7e1f775f25bdc62861609fe2d0e5aed | /script/detect_secrets | 46bbed83b40bdb5a75bf766a362b408775d055f8 | [
"MIT"
] | permissive | DiaaDiab/atst | 0dfbdf2c89148774e42ff9c905f52592b5d1d6a9 | 016f47f9794f6e8251c033a618d995840b2dd0d5 | refs/heads/staging | 2022-06-10T22:31:19.403400 | 2020-01-07T14:59:33 | 2020-01-07T14:59:33 | 232,350,856 | 0 | 0 | MIT | 2022-05-13T16:48:58 | 2020-01-07T15:11:37 | null | UTF-8 | Python | false | false | 1,132 | #! .venv/bin/python
import subprocess
import sys
from detect_secrets.pre_commit_hook import main as find_secrets
TRACKED_CHANGES = ["git", "diff", "HEAD", "--name-only"]
STAGED_CHANGES = ["git", "diff", "--cached", "--name-only"]
UNTRACKED_CHANGES = ["git", "ls-files", "--others", "--exclude-standard"]
def git_file_list(cmd):
comproc = subprocess.run(cmd, capture_output=True)
return [f.decode() for f in comproc.stdout.split()]
def git_staged_files():
return git_file_list(STAGED_CHANGES)
def git_all_files():
return git_file_list(TRACKED_CHANGES) + git_file_list(UNTRACKED_CHANGES)
def main(arg):
"""
If `arg` is "staged", this will only check files that have been
staged to the git index. Otherwise, it will check staged and
unstaged files.
"""
files = []
if arg == "staged":
files = git_staged_files()
else:
files = git_all_files()
return find_secrets(["--baseline", ".secrets.baseline"] + files)
if __name__ == "__main__":
arg = sys.argv[1] if len(sys.argv) > 1 else None
print("Finished scanning for secrets")
sys.exit(main(arg))
| [
"dan-ctr@friends.dds.mil"
] | dan-ctr@friends.dds.mil | |
043f39369ddb5869a0f589beb403b66748b3f3a0 | ceedf463269728f0257030671917f9fc979c720a | /popula.py | 8a2b370eed09596f677c02302927860324171dcd | [] | no_license | weltonvaz/Zumbis | 4a8bc213b2d7380b0ef4f3672c6a36b45f3f5c0a | da760e9f258c03660a2eae1439190ce36dee716d | refs/heads/master | 2021-01-19T08:33:58.430648 | 2015-04-17T11:59:11 | 2015-04-17T11:59:11 | 32,888,135 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Problema de crescimento populacional
# Desenvolvido por Evaldo Junior (InFog)
# http://evaldojunior.com.br/blog
popA, popB, anos = 80000, 200000, 0
cresA, cresB = 0.03, 0.015 # Crescimentos de 3% e 1,5% ao ano
while (popA < popB):
anos += 1
popA = popA + (popA * cresA)
popB = popB + (popB * cresB)
print("Após %i anos o país A ultrapassou o país B em número de habitantes." % anos)
print("País A: %.0f" % popA)
print("País B: %.0f" % popB)
| [
"weltonvaz@gmail.com"
] | weltonvaz@gmail.com |
fa5f288c62929ab06fe3c9079481f9b86c98c7a9 | f5b0e9c66944a7d9488a724ed50b60a49e39d2dd | /flaskblog/models.py | 56c03cf0e842ecd5a250739838eb3e5a71722a48 | [] | no_license | shah-raj/blog | 9c6f986206fe37fbac979d444fd808aff580e8b2 | 152503c1ce0313682fe00c518a5f1c94e0414ea2 | refs/heads/master | 2023-03-07T02:08:48.517202 | 2021-02-20T15:28:54 | 2021-02-20T15:28:54 | 334,650,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,702 | py | from datetime import datetime
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flaskblog import db, login_manager, app
from flask_login import UserMixin
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
image_file = db.Column(db.String(20), nullable=False, default='default.jpg')
password = db.Column(db.String(60), nullable=False)
posts = db.relationship('Post', backref='author', lazy=True)
def get_reset_token(self, expires_sec=1800):
s = Serializer(app.config['SECRET_KEY'], expires_sec)
return s.dumps({'user_id': self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return User.query.get(user_id)
def __repr__(self):
return f"User('{self.username}', '{self.email}', '{self.image_file}')"
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.title}', '{self.date_posted}')"
def as_dict(self):
return {'title': self.title}
| [
"shahraj0299@gmail.com"
] | shahraj0299@gmail.com |
2babff4e1cb90e9c3ec7ae793df1472ec18ce5ab | a8bdd32181e2125e1b1e9cbc02e3f5a37c78593e | /scrape_email.py | d4f6d87cbf5efb9b2e11bd9f6e1fb0806c78f81b | [] | no_license | cyberjon/app-a-day | 4740c610040e6788c728006196cac0f1af1d67d6 | 34d597c8e0267e61e6b9b60d44b38f25a2bed0a7 | refs/heads/master | 2022-03-29T11:57:04.001964 | 2020-01-19T14:11:32 | 2020-01-19T14:11:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | import requests
from bs4 import BeautifulSoup
import csv
import PySimpleGUI as sg
layout=[
[sg.Text('URL', size=(15, 1)), sg.InputText('')],
[sg.Submit(), sg.Cancel()]
]
window = sg.Window('Simple web scraper').Layout(layout)
button, values = window.Read()
url = values[0]
response = requests.get(url)
soup = BeautifulSoup(response.content, "html.parser")
current_link = ''
with open('email.csv', mode='w') as link_file:
link_writer= csv.writer(link_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
link_writer.writerow(['E-mail'])
for link in soup.find_all('a'):
current_link = link.get('href')
if 'mailto:' in current_link:
link_writer.writerow([current_link[7:]])
| [
"mach1982@gmail.com"
] | mach1982@gmail.com |
8eb83ecbc4bf55398903b22e89c732d259984c5b | 221c2fc5082f9e5d637d7d79ad81a079dbb2f2c5 | /OAS/first/views.py | 5c97a1065df6429a5868cea7e0c22abddb44b334 | [] | no_license | KlausMichael0/The-Online-Answer-System | d38c55d77c663e36c06145919a252328063e4187 | f6a5dd819e017e014ca055f10e76a1aa54e72b3f | refs/heads/master | 2022-12-18T08:20:49.452818 | 2019-09-06T14:17:34 | 2019-09-06T14:17:34 | 191,392,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,967 | py | from itertools import chain
from django.contrib.auth import authenticate#认证
from django.shortcuts import render,render_to_response
from django.http import HttpResponse,HttpResponseRedirect
from . import models
from django.contrib import auth #登录/注销模块
from django_comments import models as comment_models #评论模块
from django.contrib.contenttypes.models import ContentType #conten_type为外键引用 增加reply方法时候需要转换
from django.contrib import messages
from django.db.models import Q
def index(request):
search_on = 1
question_list_1 = models.Question.objects.all()
category = models.Category.objects.all()
a = models.OAS_User.objects.filter(id=request.user.id)
if a.exists():
user = models.OAS_User.objects.get(id=request.user.id)
else:
user = None
count = models.Question.objects.filter(Q(ask_to=request.user.id)&Q(finished=0))
numbers = len(count)
question_list = question_list_1[::-1]
return render(request, 'first/index_main.html', {'search_on':search_on, 'question_list':question_list, 'user_detail':user,'numbers':numbers, 'category_list':category})
def detail(request, question_id):
question = models.Question.objects.get(id=question_id)
a = models.OAS_User.objects.filter(id=request.user.id)
if a.exists():
user = models.OAS_User.objects.get(id=request.user.id)
else:
user = None
all_user = models.OAS_User.objects.all()
category = models.Category.objects.all()
count = models.Question.objects.filter(Q(ask_to=request.user.id)&Q(finished=0))
numbers = len(count)
context = {'question_detail':question, 'user_detail':user,'all_user':all_user, 'category_list':category,'numbers':numbers}
return render(request, 'first/detail.html',context=context)
def reply(request):
question_id = request.POST.get('Question_id')
comment = request.POST.get('comment')
obj = ContentType.objects.get(id=9) #外键不能直接给 拿出来然后物归原主
user = models.OAS_User.objects.get(id=request.user.id)
if user.instr == 0:
ip = '0'
else: ip = '1'
comment_models.Comment.objects.create(
user_name = user.user,
content_type = obj,
object_pk = question_id,
ip_address = ip,
site_id = 1,
user = request.user,
comment = comment,
)
return HttpResponseRedirect('/detail/%s' % question_id)
def login_view(request):
return render(request, 'login.html')
def logout_view(request):
auth.logout(request)
return HttpResponseRedirect("/")
def Login(request):
username = request.POST.get("username")
u = models.User.objects.filter(username=username)
if u.exists():
u = models.User.objects.filter(username=username)
else:
u = None
password = request.POST.get("password")
if ( username == '' or password == '' ):
messages.warning(request,'用户名或密码不允许为空')
return HttpResponseRedirect("/login_view/")
else:
if u:
user = authenticate(username=username, password=password)
if user:
auth.login(request, user)
return HttpResponseRedirect("/")
else:
messages.warning(request,'用户名或密码错误')
return HttpResponseRedirect("/login_view/")
else:
messages.warning(request, '用户不存在!')
return HttpResponseRedirect("/login_view/")
def issue(request):
user = models.OAS_User.objects.get(id=request.user.id)
category = models.Category.objects.all()
instr= models.OAS_User.objects.filter(instr=1)
return render(request, 'issue.html',{'category_list':category,'user_detail':user,'user':request.user, 'instr_list':instr})
def submit(request):
submit_content = request.POST.get('sub_content')
question_title = request.POST.get('question_title')
question_summary = request.POST.get('question_summary')
instr = request.POST.get('instr', "")
cate = request.POST.get('cate', "")
category = models.Category.objects.get(id=cate)
instructor = models.User.objects.get(id=instr)
author = models.OAS_User.objects.get(user_id=request.user.id)
if len(question_title) >= 64:
messages.warning(request, '标题过长!')
return HttpResponseRedirect("/issue/")
elif len(question_summary) >=128:
messages.warning(request, '概述过长!')
return HttpResponseRedirect("/issue/")
else:
models.Question.objects.create(
title= question_title,
summary= question_summary,
category_id=category.id,
content=submit_content,
author=author,
ask_to_id=instructor.id
)
return HttpResponseRedirect("/")
def choice_cate(request):
a = models.OAS_User.objects.filter(id=request.user.id)
if a.exists():
user = models.OAS_User.objects.get(id=request.user.id)
else:
user = None
count = models.Question.objects.filter(Q(ask_to=request.user.id)&Q(finished=0))
numbers = len(count)
category = models.Category.objects.all()
return render_to_response('first/categorys.html',{'numbers':numbers,'user_detail':user, 'user': request.user, 'category_list':category})
def category(request,cate_id):
a = models.OAS_User.objects.filter(id=request.user.id)
if a.exists():
user = models.OAS_User.objects.get(id=request.user.id)
else:
user = None
category = models.Category.objects.all()
ques_list_f = models.Question.objects.filter(category_id=cate_id)
ques_list_1 = ques_list_f[::-1]
ques_list_2 = ques_list_f[::-1]
return render_to_response('first/index.html',{'question_list_1':ques_list_1,'question_list_2':ques_list_2,'user_detail':user, 'user':request.user,'category_list':category})
def my_question(request):
user = models.OAS_User.objects.get(id=request.user.id)
list_f = models.Question.objects.filter(author_id=request.user.id)
list = list_f[::-1]
list_1 = list
list_2 = list
category = models.Category.objects.all()
return render_to_response('first/index.html',{'question_list_1':list_1,'question_list_2':list_2,'user_detail':user, 'user':request.user,'category_list':category})
def password(request):
user = models.User.objects.get(id=request.user.id)
password_ori = request.POST.get("password_ori")
password_1 = request.POST.get('password_1')
password_2 = request.POST.get('password_2')
print(user.password,password_1,password_2)
if(not authenticate(username=user.username, password=password_ori)):
messages.warning(request, '原密码输入错误!')
return HttpResponseRedirect("/password_view/")
elif(password_1 != password_2):
messages.warning(request, '两次输入的新密码不一致!')
return HttpResponseRedirect("/password_view/")
elif(password_1==password_2):
user.set_password(password_1)
user.save()
return HttpResponseRedirect("/")
else:
messages.warning(request, '输入错误!')
return HttpResponseRedirect("/password_view/")
def password_view(request):
return render(request,'password.html')
def search(request):
a = models.OAS_User.objects.filter(id=request.user.id)
if a.exists():
user = models.OAS_User.objects.get(id=request.user.id)
else:
user = None
search_key = request.POST.get('search_key')
search_title = models.Question.objects.filter(title__contains=search_key)
search_summary = models.Question.objects.filter(summary__contains=search_key)
search_content = models.Question.objects.filter(content__contains=search_key)
search = chain(search_title,search_summary,search_content)
search_2 = chain(search_title, search_summary, search_content)
category = models.Category.objects.all()
return render_to_response('first/index.html',{'question_list_1': search,'question_list_2': search_2,'user_detail':user, 'user': request.user, 'category_list': category})
def finish(request):
question_id = request.POST.get('Question_id')
print(question_id)
set_finish = models.Question.objects.get(id=question_id)
set_finish.finished = 1
set_finish.save()
return HttpResponseRedirect("/")
def hint(request):
user = models.OAS_User.objects.get(id=request.user.id)
count = models.Question.objects.filter(Q(ask_to=request.user.id)&Q(finished=0))
numbers = len(count)
question = models.Question.objects.filter(ask_to=request.user.id)
return render_to_response('first/hint.html',{'user':request.user, 'user_detail':user, 'question_list':question,'numbers':numbers})
| [
"2456151485@qq.com"
] | 2456151485@qq.com |
0985f27f468897169ba4b008dc1ea639d162f910 | b1270afb00e7f3326c91d110f9c53fab54f55c43 | /something.py | 4a9de163ab93c2840a700a31fa752102fc6682a3 | [] | no_license | shas-hank7/git-and-github | f51d483c755d8135d6817acfa477070f817c8bba | 0b4285fcd82509191cc90318ba2d52c2da3a7b78 | refs/heads/master | 2023-06-26T15:37:13.127615 | 2021-07-30T20:55:50 | 2021-07-30T20:55:50 | 391,184,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27 | py | #asdfsdfasdfa
#dsgsdfgsdfg
| [
"noreply@github.com"
] | shas-hank7.noreply@github.com |
65a01fd1f09658838b02901d836cc99d3fe44dd1 | ed37a985a7411fb3b8f29282a81f1d823f8f4afc | /pascal_triangle/implementations/cython/base.py | 5289433918abcc9fb01106fd869644cc623a41fb | [] | no_license | dmugtasimov/pascal_triangle | 5b310451582f6fc2ddc74f316259c6ec9fc4ec4b | 875deac43300a42560f0433a92e5f1e0475bb754 | refs/heads/master | 2021-06-16T10:55:11.338999 | 2017-04-11T17:20:54 | 2017-04-11T17:20:54 | 35,548,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | from pascal_triangle.implementations.base import PascalTriangleBase
class CyPascalTriangleBase(PascalTriangleBase):
language = 'Cython'
| [
"dmugtasimov@gmail.com"
] | dmugtasimov@gmail.com |
23b4b02de1b7d96cbc888cd572868d346d16803e | a2c4e411f4872f1c501618ed553ee5c87c3270db | /company/migrations/0002_company_name.py | d52a7bf059fb8de9abd833d6fa190105898f4f74 | [
"MIT"
] | permissive | lamnt-git/bug_bounty | c22f438f0c05c62329eb77ba1ce68c55a54d0b56 | fbb53d266a36c7df68e5f81fbafb0cf34891f764 | refs/heads/master | 2022-12-22T08:09:13.224931 | 2020-01-03T04:49:46 | 2020-01-03T04:49:46 | 231,513,592 | 0 | 0 | NOASSERTION | 2022-12-08T03:22:58 | 2020-01-03T04:44:20 | Python | UTF-8 | Python | false | false | 384 | py | # Generated by Django 2.2.6 on 2019-12-30 03:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('company', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='company',
name='name',
field=models.CharField(blank=True, max_length=50),
),
]
| [
"tunglambk123@gmail.com"
] | tunglambk123@gmail.com |
5aabfdaa690e6d5f51e29d29af16c5f7bbebe551 | f9c7969c8649c484f2460fb245a3d5bd6870fa5a | /ch07/exercises/exercise 50.py | 57914cc3e70dcbd399eceb03ac689bf9eefd314c | [] | no_license | Pshypher/tpocup | 78cf97d51259bfea944dc205b9644bb1ae4ab367 | b05b05728713637b1976a8203c2c97dbbfbb6a94 | refs/heads/master | 2022-05-18T13:11:31.417205 | 2020-01-07T13:50:06 | 2020-01-07T13:50:06 | 260,133,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | # Program written to transfer the elements of a list to another
# list whilst rearranging the order in which the elements appear
# Unless stated otherwise, variables are assumed to be of type int
def transform(list1, list2, r1, r2):
"""Removes items from list1 in the slice r1:r2, appends them onto list2
in reverse order; Returns the resulting list."""
slice_lst = list1[r1:r2] # r1 < r2
slice_lst.reverse() # reverse the order of the slice
list2.extend(slice_lst) # add the elements sliced from list1
# now reversed to list2
return list2
# Test that the function above works as expected
list1 = [1,2,3,4,5,6,7,8,9]
list2 = [100,200]
transform(list1, list2, 4, 7)
print(list2) # displays [100,200,7,6,5]
| [
"jimishote@gmail.com"
] | jimishote@gmail.com |
6d2de151f515434cade11730502b2d2cc5d3af93 | abd84815f5ad1396c3fd21d412a21d27027af1e8 | /server/lanms/.ycm_extra_conf.py | 1787e10b2133e0f41c7ff475623b6cf78f8b0957 | [] | no_license | akapust1n/Recognizing-text-regions | f17ed804d349063dc4a00ef76a54cb487e710b8f | 19ce7d41e6de0e5550da71bd0fcbaaed3a81a0a5 | refs/heads/master | 2021-01-21T22:25:41.116083 | 2017-10-31T21:53:16 | 2017-10-31T21:53:16 | 102,157,931 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,651 | py | #!/usr/bin/env python
#
# Copyright (C) 2014 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
sys.path.append(os.path.dirname(__file__))
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
from plumbum.cmd import python_config
flags = [
'-Wall',
'-Wextra',
'-Wnon-virtual-dtor',
'-Winvalid-pch',
'-Wno-unused-local-typedefs',
'-std=c++11',
'-x', 'c++',
'-Iinclude',
] + python_config('--cflags').split()
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags.
compilation_database_folder = ''
if os.path.exists(compilation_database_folder):
database = ycm_core.CompilationDatabase(compilation_database_folder)
else:
database = None
SOURCE_EXTENSIONS = ['.cpp', '.cxx', '.cc', '.c', '.m', '.mm']
def DirectoryOfThisScript():
return os.path.dirname(os.path.abspath(__file__))
def MakeRelativePathsInFlagsAbsolute(flags, working_directory):
if not working_directory:
return list(flags)
new_flags = []
make_next_absolute = False
path_flags = ['-isystem', '-I', '-iquote', '--sysroot=']
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith('/'):
new_flag = os.path.join(working_directory, flag)
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith(path_flag):
path = flag[len(path_flag):]
new_flag = path_flag + os.path.join(working_directory, path)
break
if new_flag:
new_flags.append(new_flag)
return new_flags
def IsHeaderFile(filename):
extension = os.path.splitext(filename)[1]
return extension in ['.h', '.hxx', '.hpp', '.hh']
def GetCompilationInfoForFile(filename):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile(filename):
basename = os.path.splitext(filename)[0]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists(replacement_file):
compilation_info = database.GetCompilationInfoForFile(
replacement_file)
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile(filename)
# This is the entry point; this function is called by ycmd to produce flags for
# a file.
def FlagsForFile(filename, **kwargs):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile(filename)
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_)
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute(flags, relative_to)
return {
'flags': final_flags,
'do_cache': True
}
| [
"mihmih96@gmail.com"
] | mihmih96@gmail.com |
a33e64b3c4f7faf63dafa85ff3f4ab91ded9dfab | 68ec6952dff6472c86eb827424f1b0279a722fec | /Flocking/movie.py | e2bb1a56e4306fcf0691a4ec989f992834747615 | [] | no_license | nicoguaro/Animating-Science | f6601e44162bbe4470f0135b74cebf559a6da7dc | 145184b422897275c8c173e93c51f8fadaf5a2bc | refs/heads/master | 2023-02-28T02:51:01.675773 | 2021-02-03T17:37:03 | 2021-02-03T17:37:03 | 372,103,748 | 1 | 0 | null | 2021-05-30T01:59:27 | 2021-05-30T01:59:27 | null | UTF-8 | Python | false | false | 1,891 | py | import cv2
import os
import shutil
import re
dir_path = './imgs/'
ext = 'png'
# sort images in directory
def sorted_alphanumeric(data):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(data, key=alphanum_key)
# TODO: scale up low resolution image + gaussian blur
def scaleBlur():
pass
# merge images into video
def createVideo(name):
output = str(name) + '.mp4'
# create directory list of images
images = []
for f in sorted_alphanumeric(os.listdir(dir_path)):
if f.endswith(ext):
images.append(f)
# Determine the width and height from the first image
image_path = os.path.join(dir_path, images[0])
frame = cv2.imread(image_path)
cv2.imshow('video',frame)
height, width, channels = frame.shape
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Be sure to use lower case
#fourcc = cv2.VideoWriter_fourcc(*'gifv') # Be sure to use lower case
out = cv2.VideoWriter(output, fourcc, 25.0, (width, height))
for image in images:
image_path = os.path.join(dir_path, image)
frame = cv2.imread(image_path)
out.write(frame) # Write out frame to video
cv2.imshow('video',frame)
if (cv2.waitKey(1) & 0xFF) == ord('q'): # Hit `q` to exit
break
# Release everything if job is finished
out.release()
cv2.destroyAllWindows()
print("...history video saved as {}".format(output))
# delete all files in the dir_path
def delImgs():
for filename in os.listdir(dir_path):
file_path = os.path.join(dir_path, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
| [
"quentin.wach@googlemail.com"
] | quentin.wach@googlemail.com |
40f715176e72f85178960ecbc9af9536f74f0d83 | 0eab496b831bd615ae509b43eab0044885ebb820 | /work-in-progress/MariuszFerdyn/working-rzetelnekursy.pl/c-icap/Glasswall-Rebuild-SDK-Evaluation/sdk-wrappers/glasswall.classic.python/archive.manager.sdk.wrapper/ArchiveManagerUnitTests.py | a26e2dfaf6c07c2a7e135a5ddb675f31258b8a5a | [
"Apache-2.0",
"GPL-3.0-only",
"LGPL-3.0-only"
] | permissive | MariuszFerdyn/gp-jira-website | 9923d692205acc487a3821b6827f1bb423ce0d05 | d013b6bef584957d6ff0ae39e72fbd2a6c16168c | refs/heads/main | 2023-01-07T10:30:57.628393 | 2020-11-06T12:22:17 | 2020-11-06T12:22:17 | 307,626,074 | 0 | 0 | Apache-2.0 | 2020-10-27T07:58:13 | 2020-10-27T07:58:12 | null | UTF-8 | Python | false | false | 1,459 | py | import unittest
from GlasswallArchiveManager import ArchiveManager
class TestArchiveManagerMethods(unittest.TestCase):
def setUp(self):
self.gw = ArchiveManager("./test_artefacts/glasswall.archive.manager.dll")
with open("./test_artefacts/test_data/test_file_001.zip", "rb") as f:
self.input_file = f.read()
with open("./test_artefacts/config.xml", "r") as f:
self.config_file = f.read()
def test_protect_status_success(self):
ret_obj = self.gw.GwFileProtectAndReportArchive(self.input_file, self.config_file)
print('----------------------------------------------------------------------')
print(ret_obj.fileBuffer)
print(ret_obj.reportBuffer)
print('----------------------------------------------------------------------')
self.assertEqual(ret_obj.returnStatus, 1)
def test_analysis_status_success(self):
ret_obj = self.gw.GwFileAnalysisArchive(self.input_file, self.config_file)
print('----------------------------------------------------------------------')
print(ret_obj.fileBuffer)
print(ret_obj.reportBuffer)
print('----------------------------------------------------------------------')
self.assertEqual(ret_obj.returnStatus, 1)
suite = unittest.TestLoader().loadTestsFromTestCase(TestArchiveManagerMethods)
unittest.TextTestRunner(verbosity=2).run(suite) | [
"mf@fast-sms.net"
] | mf@fast-sms.net |
6a71ee61962bf5aaad4affa272e4d5ea139738fa | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY4JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544840/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_106/run_cfg.py | 9695dd7ce6e979864c86ead25607ebeee3e6d533 | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY4JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544840/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DY4JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_572.root',
'/store/cmst3/user/cmgtools/CMG/DY4JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_573.root',
'/store/cmst3/user/cmgtools/CMG/DY4JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_574.root',
'/store/cmst3/user/cmgtools/CMG/DY4JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_575.root',
'/store/cmst3/user/cmgtools/CMG/DY4JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_576.root')
)
| [
"riccardo.manzoni@cern.ch"
] | riccardo.manzoni@cern.ch |
468fa827c9c1875109b7374b21cc2c8296f1961c | a87366f8d9621b0164afb6ce97a0e564134bc688 | /job/filters.py | aa1e849e445f67856997adc61c1c429f7e280643 | [] | no_license | werdani/django-job-board | 15a45e0d24867fbf8f830ba2dac6edf12ecc5703 | 7abe00a15e3f833b44bea3ed44b7c8a0c987a40d | refs/heads/master | 2022-12-28T11:29:07.884980 | 2020-10-07T21:34:27 | 2020-10-07T21:34:27 | 293,180,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | import django_filters
from .models import job
class JobFilter(django_filters.FilterSet):
title = django_filters.CharFilter(lookup_expr='icontains')
description = django_filters.CharFilter(lookup_expr='icontains')
class Meta:
model = job
fields ='__all__'
exclude=['owner','published_at','image','salary','vacancy','slug'] | [
"https://github.com/werdani/django-job-board.git"
] | https://github.com/werdani/django-job-board.git |
81e9dd9678453c9c93ce7e2ce976e2ea805e3873 | ce12451daf62f257669967ee04005efd26395f96 | /param.py | 4d5f789dc4d4440f8c1c9995e4d2e28a081b1bcb | [] | no_license | kinect59/pose2image | 74c7c637c5fac55c18480c0bb5641742af477d8e | 596be75c43e95d270915d3510af01f5ae39a6b34 | refs/heads/master | 2020-03-18T19:31:50.460539 | 2018-05-21T15:43:40 | 2018-05-21T15:43:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,077 | py | def getGeneralParams():
param = {}
dn = 1
param['IMG_HEIGHT'] = 256/dn
param['IMG_WIDTH'] = 256/dn
param['obj_scale_factor'] = 1.14/dn
param['scale_max'] = 1.05
param['scale_min'] = 0.90
param['max_rotate_degree'] = 5
param['max_sat_factor'] = 0.05
param['max_px_shift'] = 10
param['posemap_downsample'] = 2
param['sigma_joint'] = 7/4.0
param['n_joints'] = 14
param['test_interval'] = 500
param['model_save_interval'] = 5000
param['project_dir'] = '/afs/csail.mit.edu/u/b/balakg/pose/pose2image'
param['batch_size'] = 6
param['seq_len'] = 2
return param
'''
def getDatasetParams(dataset):
param = {}
if(dataset == 'golfswinghd'):
param['n_test_vids'] = 13
param['vid_pth'] = '../../datasets/golfswinghd/frames/'
param['info_pth'] = '../../datasets/golfswinghd/info/'
param['img_sfx'] = '.jpg'
param['test_vids'] = None
if(dataset == 'weightlifting'):
param['n_test_vids'] = 6
param['vid_pth'] = '../../datasets/weightlifting/videos/Men/'
param['info_pth'] = '../../datasets/weightlifting/videoinfo/'
param['img_sfx'] = '.png'
param['test_vids'] = [1,7,18,29,33,57]
if(dataset == 'workout'):
param['vid_pth'] = '../../datasets/workout-warp/frames'
param['info_pth'] = '../../datasets/workout-warp/videoinfo'
param['img_sfx'] = '.jpg'
param['n_test_vids'] = 3
param['test_vids'] = [28,16,36] #9,24
if(dataset == 'tennis'):
param['vid_pth'] = '../../datasets/tennis-warp/frames'
param['info_pth'] = '../../datasets/tennis-warp/videoinfo'
param['img_sfx'] = '.jpg'
param['n_test_vids'] = 2
param['test_vids'] = [44,25] #22, 45
if(dataset == 'test-aux'):
param['vid_pth'] = '../../datasets/warp-test-aux/frames'
param['info_pth'] = '../../datasets/warp-test-aux/videoinfo'
param['img_sfx'] = '.png'
param['n_test_vids'] = 9
param['test_vids'] = None
if(dataset == 'other'):
param['vid_pth'] = '../../datasets/posewarp/train/frames'
param['info_pth'] = '../../datasets/posewarp/train/info'
param['img_sfx'] = '.png'
param['n_test_vids'] = 15
param['test_vids'] = None
return param
'''
| [
"balakg@thousandeyes.csail.mit.edu"
] | balakg@thousandeyes.csail.mit.edu |
d29617a2500b1d0b6f4f92ca725dbe8dc6d4e245 | c00e228c510eed17f373d8347af157b680f4e39e | /Project_not_edited/rbm.py | 4cccc63aa14222ca1f5f8d46e0cc9ff6db7c9ffc | [
"MIT"
] | permissive | Nailin96/NetworkedLife | 4775e71288a4eeff1287293d6ba7048157875a9e | c0f505a75a7638736a175b8bf058fcde486bcf4c | refs/heads/master | 2022-09-16T23:31:16.852130 | 2020-06-02T11:13:26 | 2020-06-02T11:13:26 | 268,779,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,980 | py | import numpy as np
import projectLib as lib
# set highest rating
K = 5
def softmax(x):
# Numerically stable softmax function
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
def ratingsPerMovie(training):
movies = [x[0] for x in training]
u_movies = np.unique(movies).tolist()
return np.array([[i, movie, len([x for x in training if x[0] == movie])] for i, movie in enumerate(u_movies)])
def getV(ratingsForUser):
# ratingsForUser is obtained from the ratings for user library
# you should return a binary matrix ret of size m x K, where m is the number of movies
# that the user has seen. ret[i][k] = 1 if the user
# has rated movie ratingsForUser[i, 0] with k stars
# otherwise it is 0
ret = np.zeros((len(ratingsForUser), K))
for i in range(len(ratingsForUser)):
ret[i, ratingsForUser[i, 1]-1] = 1.0
return ret
def getInitialWeights(m, F, K):
# m is the number of visible units
# F is the number of hidden units
# K is the highest rating (fixed to 5 here)
return np.random.normal(0, 0.1, (m, F, K))
def sig(x):
### TO IMPLEMENT ###
# x is a real vector of size n
# ret should be a vector of size n where ret_i = sigmoid(x_i)
return None
def visibleToHiddenVec(v, w):
### TO IMPLEMENT ###
# v is a matrix of size m x 5. Each row is a binary vector representing a rating
# OR a probability distribution over the rating
# w is a list of matrices of size m x F x 5
# ret should be a vector of size F
return None
def hiddenToVisible(h, w):
### TO IMPLEMENT ###
# h is a binary vector of size F
# w is an array of size m x F x 5
# ret should be a matrix of size m x 5, where m
# is the number of movies the user has seen.
# Remember that we do not reconstruct movies that the user
# has not rated! (where reconstructing means getting a distribution
# over possible ratings).
# We only do so when we predict the rating a user would have given to a movie.
return None
def probProduct(v, p):
# v is a matrix of size m x 5
# p is a vector of size F, activation of the hidden units
# returns the gradient for visible input v and hidden activations p
ret = np.zeros((v.shape[0], p.size, v.shape[1]))
for i in range(v.shape[0]):
for j in range(p.size):
for k in range(v.shape[1]):
ret[i, j, k] = v[i, k] * p[j]
return ret
def sample(p):
# p is a vector of real numbers between 0 and 1
# ret is a vector of same size as p, where ret_i = Ber(p_i)
# In other word we sample from a Bernouilli distribution with
# parameter p_i to obtain ret_i
samples = np.random.random(p.size)
return np.array(samples <= p, dtype=int)
def getPredictedDistribution(v, w, wq):
### TO IMPLEMENT ###
# This function returns a distribution over the ratings for movie q, if user data is v
# v is the dataset of the user we are predicting the movie for
# It is a m x 5 matrix, where m is the number of movies in the
# dataset of this user.
# w is the weights array for the current user, of size m x F x 5
# wq is the weight matrix of size F x 5 for movie q
# If W is the whole weights array, then wq = W[q, :, :]
# You will need to perform the same steps done in the learning/unlearning:
# - Propagate the user input to the hidden units
# - Sample the state of the hidden units
# - Backpropagate these hidden states to obtain
# the distribution over the movie whose associated weights are wq
# ret is a vector of size 5
return None
def predictRatingMax(ratingDistribution):
### TO IMPLEMENT ###
# ratingDistribution is a probability distribution over possible ratings
# It is obtained from the getPredictedDistribution function
# This function is one of three you are to implement
# that returns a rating from the distribution
# We decide here that the predicted rating will be the one with the highest probability
return None
def predictRatingMean(ratingDistribution):
### TO IMPLEMENT ###
# ratingDistribution is a probability distribution over possible ratings
# It is obtained from the getPredictedDistribution function
# This function is one of three you are to implement
# that returns a rating from the distribution
# We decide here that the predicted rating will be the expectation over ratingDistribution
return None
def predictRatingExp(ratingDistribution):
### TO IMPLEMENT ###
# ratingDistribution is a probability distribution over possible ratings
# It is obtained from the getPredictedDistribution function
# This function is one of three you are to implement
# that returns a rating from the distribution
# We decide here that the predicted rating will be the expectation over
# the softmax applied to ratingDistribution
return None
def predictMovieForUser(q, user, W, training, predictType="exp"):
# movie is movie idx
# user is user ID
# type can be "max" or "exp"
ratingsForUser = lib.getRatingsForUser(user, training)
v = getV(ratingsForUser)
ratingDistribution = getPredictedDistribution(v, W[ratingsForUser[:, 0], :, :], W[q, :, :])
if predictType == "max":
return predictRatingMax(ratingDistribution)
elif predictType == "mean":
return predictRatingMean(ratingDistribution)
else:
return predictRatingExp(ratingDistribution)
def predict(movies, users, W, training, predictType="exp"):
# given a list of movies and users, predict the rating for each (movie, user) pair
# used to compute RMSE
return [predictMovieForUser(movie, user, W, training, predictType=predictType) for (movie, user) in zip(movies, users)]
def predictForUser(user, W, training, predictType="exp"):
### TO IMPLEMENT
# given a user ID, predicts all movie ratings for the user
return None
| [
"zhaonailin@hotmail.com"
] | zhaonailin@hotmail.com |
b87a366aa13eb66f436dea7f35c3c7983665479d | ccbdaa1840fafb5ac539770e9e8715a119d8d3d6 | /E56/lc407.py | e0b4b59bc10bbe2af8bc86dce006930222a38d39 | [] | no_license | adslchen/leetcode | bcbd67a4a5b0d5964f40f4b999a34ed7cc087275 | 0699107eb39c51c0ec77c59748ce21deefd6765a | refs/heads/master | 2020-03-22T00:04:02.497148 | 2019-06-27T06:01:39 | 2019-06-27T06:01:39 | 139,222,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,104 | py | class Solution:
def trapRainWater(self, heightMap):
"""
:type heightMap: List[List[int]]
:rtype: int
"""
if not heightMap or not heightMap[0] : return 0
import heapq
m, n = len(heightMap), len(heightMap[0])
heap = []
ans = 0
visit = [[ 0 for i in range(n)] for j in range(m)]
for i in range(m):
for j in range(n):
if i == 0 or j == 0 or i == m-1 or j == n-1:
visit[i][j] = 1
heapq.heappush(heap, (heightMap[i][j], i, j))
maxh = 0
while heap:
height, i, j = heapq.heappop(heap)
maxh = max(heightMap[i][j], maxh)
for x, y in ((i+1,j),(i-1,j),(i,j+1),(i,j-1)):
if x < m and x >= 0 and y < n and y >= 0 and visit[x][y] == 0:
ans += max(0, maxh - heightMap[x][y])
#ans += max(0, height - heightMap[x][y])
heapq.heappush(heap, (heightMap[x][y], x, y))
visit[x][y] = 1
return ans
| [
"adsl_chen@outlook.com"
] | adsl_chen@outlook.com |
1a0e532b26b8e1f4e25a0bdf0c0d61114323d61c | e7b7cc34f77c71e61aa0fa05bcc62f54fc2fc0e1 | /String/test_q014_longest_common_prefix.py | 5c11b0ca85d14ca6dca237e3305afcd9f12663cf | [] | no_license | sevenhe716/LeetCode | 41d2ef18f5cb317858c9b69d00bcccb743cbdf48 | 4a1747b6497305f3821612d9c358a6795b1690da | refs/heads/master | 2020-03-16T16:12:27.461172 | 2019-04-22T13:27:54 | 2019-04-22T13:27:54 | 130,221,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | import unittest
from String.q014_longest_common_prefix import SolutionF
class TestLongestCommonPrefix(unittest.TestCase):
"""Test q014_longest_common_prefix.py"""
def test_longest_common_prefix(self):
s = SolutionF()
self.assertEqual('fl', s.longestCommonPrefix(["flower", "flow", "flight"]))
self.assertEqual('', s.longestCommonPrefix(["flower", "flow", ""]))
self.assertEqual('f', s.longestCommonPrefix(["flower", "flow", "f"]))
self.assertEqual('', s.longestCommonPrefix(["dog", "racecar", "car"]))
self.assertEqual('', s.longestCommonPrefix([]))
if __name__ == '__main__':
unittest.main()
| [
"429134862@qq.com"
] | 429134862@qq.com |
6ee5824a7f0a33926ee6524a24a32398e7a7a209 | e82a5480b960abc154025168a27742149ae74de3 | /Leetcode/Dynamic Programming/Medium/1043_partition_array_for_maximum_sum.py | ea7a058f3689100fc3140e54fb2a74d56b88cb62 | [] | no_license | harshsodi/DSA | 8e700f0284f5f3c5559a7e385b82e0a6c96d3363 | 18f82f9b17a287abe3f318118691b62607e61ff9 | refs/heads/master | 2021-07-07T23:42:50.750471 | 2020-09-11T03:16:41 | 2020-09-11T03:16:41 | 186,679,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | # Runtime: 448 ms, faster than 41.60% of Python online submissions for Partition Array for Maximum Sum.
# Memory Usage: 11.8 MB, less than 100.00% of Python online submissions for Partition Array for Maximum Sum.
class Solution(object):
def maxSumAfterPartitioning(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: int
"""
n = len(A)
dp = [0 for _ in A]
dp[0] = A[0]
for i in range(1, n):
cmax = A[i]
for j in range(0, K):
c = i - j
if c < 0:
break
if c == 0:
prev = 0
else:
prev = dp[c-1]
cmax = max(cmax, A[c])
dp[i] = max(dp[i], cmax * (j+1) + prev)
return dp[n-1] | [
"harshsodi@gmail.com"
] | harshsodi@gmail.com |
e32715504d6211e057a4d673a42b4886fa9869cc | 286426bea3295f16aa1c6cd86216e70e7aaf851f | /venv/bin/jupyter-contrib-nbextension | c7d39b226aee6d29f68218f6d3881b7a683ad063 | [] | no_license | Herudaio/adventures-with-web-scraping | 3d15c9d7416087fa9b3200deedcf1afeae6229ac | cc0de30c370641ffd8a98d5a43c9a1ec5b633796 | refs/heads/master | 2023-02-23T15:20:44.755925 | 2022-03-08T08:21:32 | 2022-03-08T08:21:32 | 163,673,712 | 0 | 0 | null | 2023-02-10T23:09:40 | 2018-12-31T14:06:03 | Python | UTF-8 | Python | false | false | 289 | #!/Users/mycek/Workspace/ShadowTribe/PyLight3/venv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_contrib_nbextensions.application import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"marcin.mycek@estimote.com"
] | marcin.mycek@estimote.com | |
ee9ca8c66460b4c22349d81cb03741bd0b4fa18f | afd3545bbd00558ffa3e89bf342e2af2f696aab5 | /twitter/bin/rst2html.py | 3e056ab231741ad00b97da06d25d90a5a1fd090a | [] | no_license | zyntop2014/websitedeploy | 68159a5d70990d5a253b8df8b0e6c388afffd79b | 7b6d6a4b726d5ec5f9379342dba65fbccabedadf | refs/heads/master | 2021-01-12T00:49:33.549323 | 2017-03-07T02:53:43 | 2017-03-07T02:53:43 | 78,303,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | #!/home/yanan/Desktop/CloudComputing/websitedeploy/websitedeploy/twitter/bin/python2
# $Id: rst2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html', description=description)
| [
"zyntop2014@gmail.com"
] | zyntop2014@gmail.com |
e70ca7837e1d1cf457f3d698f32cbaabcd7861d8 | 0a6c4618b246ade13c931fdafb7de3f8541563cf | /data vision/5.3.2.py | 12ee3f3556cafcc0b94b81156063cef08815366c | [] | no_license | 18237221520/Python | 882e64159b1297735d4d716c62ee3dd296fcc65a | 8950d55138582a01a1d8b354197e2bfbb4859c82 | refs/heads/main | 2023-01-19T05:59:31.826478 | 2020-11-24T03:23:37 | 2020-11-24T03:23:37 | 312,451,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
plt.rcParams[ 'font.sans-serif']=[ 'Microsoft YaHei']
plt.rcParams[ 'axes.unicode_minus']=False
titanic=pd.read_csv( 'birth-rate.csv')
titanic.dropna(subset=[ '2008'],inplace=True)
plt.style.use('ggplot')
plt.hist(titanic['2008'],bins=10,color='steelblue',edgecolor='k',label='直方图')
plt.tick_params(top='off',right='off')
plt.legend()
plt.show() | [
"noreply@github.com"
] | 18237221520.noreply@github.com |
34249ea22f9600ba5e6b368f1ec766bbc6af30ee | eecb2d5b087932287af911d114c321c9dc1019da | /LiXZ/CMD/cydata2.py | dbbdf72e63cc0741e3be653728879647dec685e7 | [] | no_license | dingxu6207/xingtuan | dd8799c3045b900504aa689558e2df5738d40052 | 6dc8aa747f4a7a4f9d981d55f58e5cdfa83aab4f | refs/heads/master | 2023-01-24T10:20:33.187771 | 2020-12-02T16:54:26 | 2020-12-02T16:54:26 | 298,020,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,128 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 23 09:32:52 2020
@author: dingxu
"""
import numpy as np
from sklearn.cluster import KMeans,DBSCAN,AgglomerativeClustering
import pandas as pd
import matplotlib.pyplot as plt
#np.random.seed(7)
data = np.loadtxt('cydata2.txt')
X = np.copy(data[:,0:5])
#zsx = 1.0*(X-X.mean())/(X.std())
hang,lie = X.shape
for i in range(0,hang):
X[i:,] = (X[i:,]-np.min(X[i:,]))/(np.max(X[i:,]) - np.min(X[i:,]))
zsx = X
model = KMeans(n_clusters = 2)
#model = AgglomerativeClustering(n_clusters = 2)
model.fit(zsx)
#print(model.labels_)
m = model.labels_
predict_labels = model.predict(zsx)
r1 = pd.Series(model.labels_).value_counts()
r2 = pd.DataFrame(model.cluster_centers_)
print(r1)
print(r2)
Gmag = data[:,5]
BPRP = data[:,6]-data[:,7]
plt.figure(0)
plt.plot(BPRP, Gmag, '.')
plt.xlabel('BP-RP',fontsize=14)
plt.ylabel('Gmag',fontsize=14)
ax = plt.gca()
ax.yaxis.set_ticks_position('left') #将y轴的位置设置在右边
ax.invert_yaxis() #y轴反向
datalable = np.column_stack((data ,predict_labels))
cydata = datalable[datalable[:,8]==1]
cyGmag = cydata[:,5]
cyBPRP = cydata[:,6]-cydata[:,7]
plt.figure(1)
plt.scatter(cyBPRP, cyGmag, marker='o', color='lightcoral',s=10.0)
plt.xlabel('BP-RP',fontsize=14)
plt.ylabel('Gmag',fontsize=14)
ax = plt.gca()
ax.yaxis.set_ticks_position('left') #将y轴的位置设置在右边
ax.invert_yaxis() #y轴反向
np.savetxt('cydata3.txt', cydata[:,0:8])
waidata = datalable[datalable[:,8]==0]
waiGmag = waidata[:,5]
waiBPRP = waidata[:,6]-waidata[:,7]
plt.figure(2)
plt.scatter(waiBPRP, waiGmag, marker='o', color='grey',s=10.0)
plt.xlabel('BP-RP',fontsize=14)
plt.ylabel('Gmag',fontsize=14)
ax = plt.gca()
ax.yaxis.set_ticks_position('left') #将y轴的位置设置在右边
ax.invert_yaxis() #y轴反向
plt.figure(3)
plt.scatter(waidata[:,3], waidata[:,4], marker='o', color='grey',s=10.0)
plt.scatter(cydata[:,3], cydata[:,4], marker='o', color='lightcoral',s=10.0)
plt.xlabel('pmRA',fontsize=14)
plt.ylabel('pmDEC',fontsize=14)
#plt.figure(4)
#plt.plot(cydata[:,3], cydata[:,4], '.', color='lightcoral') | [
"32772701+dingxu6207@users.noreply.github.com"
] | 32772701+dingxu6207@users.noreply.github.com |
aa61b6f217999900d05d63f4e05380ad613f4f7e | d2510f3b301ece62d601cf913d57e05fdf52e3a9 | /showclock.py | 717b1707a94e9ab230ba2e0f6584e515cebe556e | [] | no_license | chiajoukuo/Annoying-Clock | 9c699833f1e8eb8ec54ac46856d5dd38018034a2 | dcf291a0ff9e9e18453c97d35b3f3ca77bda19fb | refs/heads/master | 2020-05-30T11:36:37.131103 | 2019-06-28T09:10:38 | 2019-06-28T09:10:38 | 189,708,677 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | import sys
import time
import smbus2
sys.modules['smbus'] = smbus2
from RPLCD.i2c import CharLCD
lcd = CharLCD('PCF8574', address=0x27, port=1, backlight_enabled=True)
try:
print('按下 Ctrl-C 可停止程式')
lcd.clear()
while True:
lcd.cursor_pos = (0, 0)
lcd.write_string("Date: {}".format(time.strftime("%Y/%m/%d")))
lcd.cursor_pos = (1, 0)
lcd.write_string("Time: {}".format(time.strftime("%H:%M:%S")))
time.sleep(1)
except KeyboardInterrupt:
print('關閉程式')
finally:
lcd.clear() | [
"1998isabel@gmail.com"
] | 1998isabel@gmail.com |
9207799d5df5c20ea30bbbe2881bd3107f549667 | bcb8874cf2984f76eb452a16e8cba4250034425b | /utils/metrics.py | 57bcd74ed7ce439d1b49cc671767d0b968cdef55 | [] | no_license | borbavanessa/stacking_ensemble | 976d65069a57427bbd5a736947bd06c7ef6e7102 | e9c1b7ed5b583a7d7cf455a2fdfa8d7d6118cd13 | refs/heads/master | 2023-01-01T11:51:48.539031 | 2020-10-14T04:45:08 | 2020-10-14T04:45:08 | 287,350,501 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,708 | py | import numpy as np
import pandas as pd
import utils.definition_network as dn
import sklearn
import pandas.io.json as pd_json
# import matplotlib.pyplot as plt
from keras import backend as K
from sklearn.metrics import confusion_matrix
from sklearn.metrics import multilabel_confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import hamming_loss
class Metrics:
def __init__(self, _normalize=False):
self.normalize = _normalize
#
# Metric calculation functions compatible with the keras format. Used for the training step of the algorithm
#
def recall_m(self, y, y_hat):
# possible_positives = all that are true
true_positives = K.sum(K.round(K.clip(y * y_hat, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(self, y, y_hat):
true_positives = K.sum(K.round(K.clip(y * y_hat, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_hat, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(self, y, y_hat):
precision = self.precision_m(y, y_hat)
recall = self.recall_m(y, y_hat)
return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
def confusion_matrix_binary_classifier(self, y, y_hat, labels, binary_function=dn.TypePredictionLabel.BINARY):
# format to use with binary_crossentropy for single label single class
if binary_function == dn.TypePredictionLabel.BINARY:
mat_confusion = confusion_matrix(y, np.round(y_hat))
else:
mat_confusion = confusion_matrix(y.argmax(axis=1), np.round(y_hat).argmax(axis=1))
return "\nMatriz Confusão:\n\t\t\t\t"+ str(labels[0]) + "(pred)\t"+ str(labels[1]) + "(pred)\n"+\
str(labels[0]) + "(true)\t\t\t"+ str(mat_confusion[0][0]) + "\t\t" + str(mat_confusion[0][1])+ "\n"+\
str(labels[1]) + "(true)\t\t"+ str(mat_confusion[1][0]) + "\t\t" + str(mat_confusion[1][1])
#Function metris for multi-class classifier
def custom_print_metrics(self, labels, p, r, f, s, report_dict):
# print format dict metrics
cm_txt = ''
for label in report_dict:
cm_txt += str(label) + '\n' + str(report_dict[label]['confusion_matrix']) + '\n'
rows = zip(labels, p, r, f, s)
headers = ["precision", "recall", "f1-score", "support"]
head_fmt = '{:>{width}s} ' + ' {:>9}' * len(headers)
report = head_fmt.format('', *headers, width=20)
report += '\n\n'
row_fmt = '{:>{width}s} ' + ' {:>9.{digits}f}' * 3 + ' {:>9}\n'
for row in rows:
report += row_fmt.format(*row, width=20, digits=2)
report += '\n'
report = cm_txt + '\n' + report
return report
def calc_confusion_matrix_by_label(self, y, y_hat):
report_dict = dict()
n_samples = y.__len__()
labels = ['control', 'anxiety', 'depression', 'anxiety,depression']
p = []
r = []
f = []
s = []
for label in labels:
if label == 'control':
y_new = [( (y[i][0] == 1) and (y[i][1] == 0) and (y[i][2] == 0) ).astype('int') for i in range(n_samples)]
y_hat_new = [( (y_hat[i][0] == 1) and (y_hat[i][1] == 0) and (y_hat[i][2] == 0) ).astype('int') for i in range(n_samples)]
elif label == 'anxiety':
y_new = [( (y[i][0] == 0) and (y[i][1] == 1) and (y[i][2] == 0) ).astype('int') for i in range(n_samples)]
y_hat_new = [( (y_hat[i][0] == 0) and (y_hat[i][1] == 1) and (y_hat[i][2] == 0) ).astype('int') for i in range(n_samples)]
elif label == 'depression':
y_new = [( (y[i][0] == 0) and (y[i][1] == 0) and (y[i][2] == 1) ).astype('int') for i in range(n_samples)]
y_hat_new = [( (y_hat[i][0] == 0) and (y_hat[i][1] == 0) and (y_hat[i][2] == 1) ).astype('int') for i in range(n_samples)]
else: #'comorbidity'
y_new = [( (y[i][0] == 0) and (y[i][1] == 1) and (y[i][2] == 1) ).astype('int') for i in range(n_samples)]
y_hat_new = [( (y_hat[i][0] == 0) and (y_hat[i][1] == 1) and (y_hat[i][2] == 1) ).astype('int') for i in range(n_samples)]
p.append(sklearn.metrics.precision_score(y_new, y_hat_new))
r.append(sklearn.metrics.recall_score(y_new, y_hat_new))
f.append(sklearn.metrics.f1_score(y_new, y_hat_new))
s.append(np.sum(y_new))
report_dict.update({label: {'confusion_matrix': confusion_matrix(y_new, y_hat_new),
'precision': p[-1],
'recall': r[-1],
'f1-score': f[-1],
'support': s[-1]}})
report = self.custom_print_metrics(labels, p, r, f, s, report_dict)
return report, report_dict
def exact_match_ratio(self, y, y_hat):
n_samples = y.__len__()
mr = (1 / n_samples) * np.sum([(((y[i][0] == y_hat[i][0]) and
(y[i][1] == y_hat[i][1]) and
(y[i][2] == y_hat[i][2]))).astype('float32') for i in range(n_samples)],
axis=0)
return mr
def correct_prediction_by_label(self, y, y_hat):
n_samples = y.__len__()
cpl = (1 / n_samples) * np.sum([(y[i] == y_hat[i]).astype('float32') for i in range(n_samples)], axis=0)
return cpl
def calc_metrics_multilabel(self, y, y_hat, labels, type_prediction_label, sample_wise=False):
if type_prediction_label in [dn.TypePredictionLabel.MULTI_LABEL_CATEGORICAL, dn.TypePredictionLabel.SINGLE_LABEL_CATEGORICAL]:
y_hat = (y_hat > 0.5).astype('float32')
y = (y > 0.5).astype('float32')
else: # self.pp_data.type_prediction_label== 'dn.TypePredictionLabel.SINGLE_LABEL_CATEGORICAL'
y_hat = np.argmax(y_hat, axis=1)
y = np.argmax(y, axis=1)
label_index = [index for index in range(len(labels))]
rep_ml_dict = classification_report(y, y_hat, labels=label_index, target_names=labels, zero_division=0, output_dict=True)
rep_sl, rep_sl_dict = self.calc_confusion_matrix_by_label(y, y_hat)
final_metrics = dict()
final_metrics.update({'Exact Match Ratio': self.exact_match_ratio(y, y_hat),
'Correct Prediction per Label': self.correct_prediction_by_label(y, y_hat),
'Hamming Loss': hamming_loss(y, y_hat),
'Multi-label Confusion Matrix': multilabel_confusion_matrix(y, y_hat, samplewise=sample_wise),
'Multi-label Report': classification_report(y, y_hat, labels=label_index, target_names=labels, zero_division=0),
'Single-label Report': rep_sl,
'Multi-label Report Dict': rep_ml_dict,
'Single-label Report Dict': rep_sl_dict
})
return final_metrics
def save_predict_results(self, file_name, type_prediction_label, y_pred, y_test):
if type_prediction_label in [dn.TypePredictionLabel.MULTI_LABEL_CATEGORICAL, dn.TypePredictionLabel.SINGLE_LABEL_CATEGORICAL]:
predict_values = []
for index in range(len(y_test)):
predict_values.append([y_test[index][0], y_test[index][1], y_test[index][2],
y_pred[index][0], y_pred[index][1], y_pred[index][2]])
pd_predict = pd.DataFrame(predict_values, columns=['y_control', 'y_anxiety', 'y_depression',
'yhat_control', 'yhat_anxiety', 'yhat_depression'])
elif type_prediction_label == dn.TypePredictionLabel.BINARY_CATEGORICAL:
predict_values = []
for index in range(len(y_test)):
predict_values.append([y_test[index][0], y_test[index][1],
y_pred[index][0], y_pred[index][1]])
pd_predict = pd.DataFrame(predict_values,
columns=['y_control', 'y_disorder', 'yhat_control', 'yhat_disorder'])
else: #type_prediction_label == dn.TypePredictionLabel.BINARY
labels = np.array([y_test, [res[0] for res in y_pred]])
pd_predict = pd.DataFrame(labels.transpose(), columns=['y_test', 'y_pred'])
pd_predict.to_csv(dn.PATH_PROJECT + file_name + '.csv', index=False, sep=';')
def computes_metrics_by_iteration(self, path_test, name_test, test_ids):
for test_id in test_ids:
print('TEST %s metrics by iteraction' % str(test_id))
metrics_df = pd.read_pickle(dn.PATH_PROJECT + path_test + 'test_' + str(test_id) + '/' +
name_test + str(test_id) + '_metrics.df')
metrics = metrics_df.columns[3:len(metrics_df.columns)]
iteractions = metrics_df.iteraction.unique()
models = metrics_df.model.unique()
list_report_metrics = []
for iteraction in iteractions:
for model in models:
results = dict()
for metric in metrics:
rdf = metrics_df[(metrics_df.iteraction == iteraction) & (metrics_df.model == model)] \
.agg({metric: ['min', 'max', 'mean']}).T
results.update({metric: {'min': str(rdf['min'][0]),
'max': str(rdf['max'][0]),
'mean': str(rdf['mean'][0])}})
list_report_metrics.append(dict({'test': 'test_' + str(test_id),
'iteraction': str(iteraction),
'model': model,
'metric': results}))
data_pd = pd_json.json_normalize(list_report_metrics)
data_pd.to_csv(dn.PATH_PROJECT + path_test + 'test_' + str(test_id) + '/' + \
name_test + str(test_id) + '_metrics_by_iteration.csv')
def computes_metrics_by_stage(self, path_test, name_test, stage, test_ids):
for test_id in test_ids:
print('TEST %s - stage %s' % (str(test_id), stage))
metrics_df = pd.read_pickle(dn.PATH_PROJECT + path_test + 'test_' + str(test_id) + '/' +
name_test + str(test_id) + '_metrics.df')
metrics = metrics_df.columns[3:len(metrics_df.columns)]
models = metrics_df.model.unique()
list_report_metrics = []
for model in models:
results = dict()
for metric in metrics:
rdf = metrics_df[(metrics_df['iteraction'].str.contains(stage)) & \
(metrics_df.model == model)].agg({metric: ['min', 'max', 'mean']}).T
results.update({metric: {'min': str(rdf['min'][0]),
'max': str(rdf['max'][0]),
'mean': str(rdf['mean'][0])}})
list_report_metrics.append(dict({'test': 'test_' + str(test_id),
'iteraction': stage,
'model': model,
'metric': results}))
data_pd = pd_json.json_normalize(list_report_metrics)
data_pd.to_csv(dn.PATH_PROJECT + path_test + 'test_' + str(test_id) + '/' + \
name_test + str(test_id) + '_' + stage + '.csv')
| [
"vanessa.borba@gmail.com"
] | vanessa.borba@gmail.com |
bcbe4e83dec0fe91a1870110287f8df495d3f9c4 | 737c0920b33fddb3fc7b6ff7287f06faaf9958bb | /models/temp/common_spec_2.py | e47cda732a65a69386b22619f5cf0ec7033e294e | [] | no_license | Willamjie/CCWH-ACB | aa51b412adccf0078bc2f575dd47e22cd2daa689 | e15176c9d74c1b9232d72d79114f0bf6aa0d315e | refs/heads/main | 2023-02-25T14:30:57.389888 | 2021-02-02T14:08:06 | 2021-02-02T14:08:06 | 335,209,023 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,308 | py | # This file contains modules common to various models
from utils.utils import *
from models.DConv import DOConv2d
def autopad(k, p=None): # kernel, padding
# Pad to 'same'
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
def DWConv(c1, c2, k=1, s=1, act=True):
# Depthwise convolution
return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
class Conv(nn.Module):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Conv, self).__init__()
self.conv = DOConv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = nn.LeakyReLU(0.1, inplace=True) if act else nn.Identity()
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def fuseforward(self, x):
return self.act(self.conv(x))
class Bottleneck(nn.Module):
# Standard bottleneck
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
super(Bottleneck, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_, c2, 3, 1, g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(BottleneckCSP, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
self.act = nn.LeakyReLU(0.1, inplace=True)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class SPP(nn.Module):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, c1, c2, k=(5, 9, 13)):
super(SPP, self).__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
def forward(self, x):
x = self.cv1(x)
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
class Focus(nn.Module):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Focus, self).__init__()
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
class Concat(nn.Module):
# Concatenate a list of tensors along dimension
def __init__(self, dimension=1):
super(Concat, self).__init__()
self.d = dimension
def forward(self, x):
return torch.cat(x, self.d)
class Flatten(nn.Module):
# Use after nn.AdaptiveAvgPool2d(1) to remove last 2 dimensions
@staticmethod
def forward(x):
return x.view(x.size(0), -1)
class Classify(nn.Module):
# Classification head, i.e. x(b,c1,20,20) to x(b,c2)
def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
super(Classify, self).__init__()
self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) # to x(b,c2,1,1)
self.flat = Flatten()
def forward(self, x):
z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
return self.flat(self.conv(z)) # flatten to x(b,c2)
| [
"you@example.com"
] | you@example.com |
f0b00d5555c750a0f48de3058e20b1490cb7004e | 9c084b281372066ce84c5dd927014d0e982d5211 | /grado/settings.py | 7c9a90c40ef8d04551b67e14a009fe5f1fc5cf79 | [] | no_license | Josephe23/examen | effec4b26f6f9468e0408e258808004f8a4c8665 | aee649b381d61526445f1bae3bdd587e539626d9 | refs/heads/master | 2021-08-26T05:32:34.022321 | 2017-11-21T18:53:16 | 2017-11-21T18:53:16 | 111,588,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,169 | py | """
Django settings for grado project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4!v2y@j-bdz!(kj(pw0234#%mt!)xxx!_9&!2#o99fs7+oud19'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'profesor',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'grado.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'grado.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'es-gt'
TIME_ZONE = 'America/Guatemala'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR,'static')
| [
"josep19961@hotmail.com"
] | josep19961@hotmail.com |
2efc75247312dda6a4b3a75b13341709c8291fe0 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p4VQE/R4/benchmark/startPyquil304.py | bbb13e55f149de899519a4a930fcac157e7752b1 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,220 | py | # qubit number=4
# total number=13
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=3
prog += CNOT(0,2) # number=7
prog += X(2) # number=8
prog += CNOT(0,2) # number=9
prog += CNOT(3,1) # number=10
prog += H(3) # number=4
prog += Y(3) # number=5
prog += X(0) # number=11
prog += X(0) # number=12
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil304.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
a550932f8d183ac76d5220081668a12122d70e06 | 02a0ca20e32dc337f93948430f92b9434514f702 | /9_async_worker/binding.gyp | a848308869fc85113f584c585e96043ff88a0b7f | [] | no_license | SageWu/nan-example | 82c7aabfeba5ce522e4728dbe7ccb6d79761f3df | a9f2b379aceace7bd54bcf16b5ef58614fb63784 | refs/heads/master | 2023-04-01T12:06:30.028808 | 2021-04-08T14:31:41 | 2021-04-08T14:31:41 | 353,727,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | gyp | {
"targets": [
{
"target_name": "example",
"include_dirs": [
"<!(node -e \"require('nan')\")"
],
"sources": [
"main.cc",
"async.cc"
]
}
]
} | [
"sq13592829287@gmail.com"
] | sq13592829287@gmail.com |
cf985955974009bb0773bcb38ad316642f8c6e2b | c6610462c0f3a1742f14ea0abc97cd03cf6fa980 | /src/carNumber/lgb_compare.py | a640912555ecfba0082193775ecd84e9946401c9 | [] | no_license | Johnfan888/bda | ffeeeffe66387edb0fd6897a4f397fbe66adf82f | 8be66c098a94d02afe85d35db584cf1228ae84b7 | refs/heads/master | 2020-06-27T22:35:23.994668 | 2019-01-25T07:58:47 | 2019-01-25T07:58:47 | 97,075,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 15 20:12:23 2018
对比原始特征,以下哪个模型较好
@author: dell-1
LGB:867125.460656,609.462835443
XGB:891010.924815,628.413774858
RF:996211.340255,694.672363636
"""
from lightgbm.sklearn import LGBMRegressor
from xgboost.sklearn import XGBRegressor
from sklearn import ensemble
from sklearn.metrics import mean_squared_error,mean_absolute_error
import pandas as pd
data = pd.read_csv('original_train.csv')
test = pd.read_csv('original_test.csv')
X_train = data.drop(['count1'],axis=1)
X_test = test.drop(['count1'],axis=1)
y_train = data['count1']
y_test = test['count1']
model = ensemble.RandomForestRegressor().fit(X_train,y_train)
y_pre = model.predict(X_test)
print(mean_squared_error(y_pre,y_test))
print(mean_absolute_error(y_pre,y_test)) | [
"johnfan888@hotmail.com"
] | johnfan888@hotmail.com |
34519ec97bf854308441ce069fbad1237557ffd7 | 5f00c455e734b6586e9ad2a2d62d8c513191aa86 | /modules/testowy | f9d165adb06ed51107567791644e418ce90a9721 | [] | no_license | grom3k/ansible_playground | 2474c2962259dd8706a1bebe7d04ce57e141c1ba | 92a26527eb6be57fdc3270c019f36533d1de1f10 | refs/heads/master | 2021-05-30T07:35:50.751201 | 2016-01-15T00:44:31 | 2016-01-15T00:44:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | #!/usr/bin/env python
print '{cos: ktos}'
| [
"Michal Lasota"
] | Michal Lasota | |
95e1ea6aacd9e0b2bfe38b9c9de93cfd60182a95 | 51108a50ffb48ad154f587c230045bb783f22240 | /bfgame/factories/recipes/base.py | f4d2fbb68a4894ed9e44bdc747ab6a6932072734 | [
"MIT"
] | permissive | ChrisLR/BasicDungeonRL | c90bd0866c457557cccbad24e14689d5d6db7b00 | b293d40bd9a0d3b7aec41b5e1d58441165997ff1 | refs/heads/master | 2021-06-15T13:56:53.888646 | 2019-08-05T16:33:57 | 2019-08-05T16:33:57 | 104,269,987 | 3 | 0 | MIT | 2019-08-05T16:28:23 | 2017-09-20T21:35:19 | Python | UTF-8 | Python | false | false | 161 | py | class Recipe(object):
name = ""
base_object_type = None
depends_on = []
@staticmethod
def build_components(object_type, game):
pass
| [
"arzhul@gmail.com"
] | arzhul@gmail.com |
780714cc15c4e6ede65c4752381ca308e7ef4819 | d76bf926973a53d62ca7c7b89483a2e24b0e2517 | /app/living_space.py | a64cefb9f0b52d5076d22d11fa00ff154fbc017b | [
"MIT"
] | permissive | rozenborg/checkpoint-1 | 577ea1cd959959130f670afd8928447af1cb3686 | 9a6e5d0fcf69e0c6aa6c1338d233c6a1878286d9 | refs/heads/master | 2020-12-11T01:46:13.667168 | 2016-09-21T10:27:50 | 2016-09-21T10:27:50 | 68,804,631 | 0 | 0 | null | 2016-09-21T10:44:52 | 2016-09-21T10:06:16 | Python | UTF-8 | Python | false | false | 117 | py | from app.room import Room
class LivingSpace(Room):
"""A room that is suitable as a living space
Attributes:
"""
| [
"writerosenberg@gmail.com"
] | writerosenberg@gmail.com |
f59f64cd89c6c126f3ef41cb687408531d5c2689 | 1fe908c4c3361a06a465274d2992799d39278bf7 | /src/fetch_control/scripts/opencv_node/hsv_thresh.py | 5b4ba6587411e04f3537d32033811ba7cf17ae72 | [] | no_license | masatoshichang/Humanoid-Robotics-Project-Dangerous-Goods-Handling-Robot | b5eb7bfacc98443861b69c1943f953603956a580 | 6e2f4e66fec758a347b01ec88d54d7f046716e27 | refs/heads/master | 2021-01-19T18:44:40.333549 | 2017-05-05T23:17:02 | 2017-05-05T23:17:02 | 88,378,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,721 | py | import cv2
import numpy as np
class HSVThresh(object):
trackbar_initialized = False
@staticmethod
def hsv_trackbar():
if HSVThresh.trackbar_initialized:
return
cv2.namedWindow('hsv', cv2.WINDOW_NORMAL)
cv2.createTrackbar('H_l', 'hsv', 0, 255, lambda x: None)
cv2.createTrackbar('S_l', 'hsv', 0, 255, lambda x: None)
cv2.createTrackbar('V_l', 'hsv', 0, 255, lambda x: None)
cv2.createTrackbar('H_h', 'hsv', 0, 255, lambda x: None)
cv2.createTrackbar('S_h', 'hsv', 0, 255, lambda x: None)
cv2.createTrackbar('V_h', 'hsv', 0, 255, lambda x: None)
trackbar_initialized = True
@staticmethod
def hsv_thresh(image):
hl = cv2.getTrackbarPos('H_l', 'hsv')
sl = cv2.getTrackbarPos('S_l', 'hsv')
vl = cv2.getTrackbarPos('V_l', 'hsv')
hh = cv2.getTrackbarPos('H_h', 'hsv')
sh = cv2.getTrackbarPos('S_h', 'hsv')
vh = cv2.getTrackbarPos('V_h', 'hsv')
lower = np.array([hl, sl, vl], np.uint8)
upper = np.array([hh, sh, vh], np.uint8)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
"""
lower = np.array([1, 0, 20])
upper = np.array([100, 250, 200])
"""
mask = cv2.inRange(hsv, lower, upper)
return mask
@staticmethod
def hsv_thresh_green(image):
lower = np.array([39, 139, 108])
upper = np.array([75, 255, 255])
lower = np.array(lower, np.uint8)
upper = np.array(upper, np.uint8)
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower, upper)
return mask
| [
"masatoshi.chang@gmail.com"
] | masatoshi.chang@gmail.com |
07984cf67fd08e2d1a9428bef02872c5408361b8 | ea93bdd0f3b96c5a3288abaeb10c1c281c68e5fb | /POS_61070068/report/urls.py | f7f948dd70077df18adbcdf9c71adcde73ab261d | [] | no_license | TheMhee/POS_Pa-mai-dai-a-rai-reoy | 371464a361ce4d9e1e6aea8ab13dbc4ae007edb0 | 4336b71d2747030f296d26c61825081ebd97be4f | refs/heads/master | 2021-03-01T16:16:51.017091 | 2020-03-11T16:43:58 | 2020-03-11T16:43:58 | 245,797,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,308 | py | """POS_61070068 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from . import views
urlpatterns = [
path('search/<int:day>/<int:month>/<int:year>/<int:week>', views.report_search, name='report_search'),
path('search/<int:day>/<int:month>/<int:year>/', views.report_search_day, name='report_search_day'),
path('search/<int:month>/<int:year>/', views.report_search_month, name='report_search_month'),
path('search/<int:year>/', views.report_search_year, name='report_search_year'),
path('search/<int:year>/<int:week>', views.report_search_week, name='report_search_week'),
path('<str:report_type>/', views.report, name='report_list'),
]
| [
"thanakrit.mh@gmail.com"
] | thanakrit.mh@gmail.com |
134af246bdd1e2592743b5a4148e6517054581ef | b062dbf70f9d37d66c53639ae9a9ed57c2b6a89d | /pidx-api/entity/Receipt-sub.py | dbfc24f16f8424c4847f8efe598ff557ba34f113 | [] | no_license | takashi-osako/open-pidx | 2a3b49967e155a7c4b90fbae5d0bf66961767fc1 | e20a015b9c17870a99b19a8dbbec07e80691805d | refs/heads/master | 2021-01-25T03:48:35.656746 | 2013-04-01T02:00:59 | 2013-04-01T02:00:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75,597 | py | #!/usr/bin/env python
#
# Generated Tue Mar 19 21:38:41 2013 by generateDS.py version 2.9a.
#
import sys
import Receipt as supermod
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError(
"Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# Globals
#
ExternalEncoding = 'ascii'
#
# Data representation classes
#
class ReceiptSub(supermod.Receipt):
def __init__(self, version=None, ReceiptProperties=None, ReceiptDetails=None, ReceiptTotals=None):
super(ReceiptSub, self).__init__(version, ReceiptProperties, ReceiptDetails, ReceiptTotals, )
supermod.Receipt.subclass = ReceiptSub
# end class ReceiptSub
class ReceiptDetailsSub(supermod.ReceiptDetails):
def __init__(self, ReceiptLineItems=None):
super(ReceiptDetailsSub, self).__init__(ReceiptLineItems, )
supermod.ReceiptDetails.subclass = ReceiptDetailsSub
# end class ReceiptDetailsSub
class ReceiptPropertiesSub(supermod.ReceiptProperties):
def __init__(self, ReceiptDate=None, PurchaseOrderReferenceInformation=None, PartnerInformation=None, ReferenceNumber=None, LanguageCode=None, DateTimeRange=None, DocumentReference=None, CustomerSpecificInformation=None, Comment=None):
super(ReceiptPropertiesSub, self).__init__(ReceiptDate, PurchaseOrderReferenceInformation, PartnerInformation, ReferenceNumber, LanguageCode, DateTimeRange, DocumentReference, CustomerSpecificInformation, Comment, )
supermod.ReceiptProperties.subclass = ReceiptPropertiesSub
# end class ReceiptPropertiesSub
class ReceiptTotalsSub(supermod.ReceiptTotals):
def __init__(self, TotalLineItems=None):
super(ReceiptTotalsSub, self).__init__(TotalLineItems, )
supermod.ReceiptTotals.subclass = ReceiptTotalsSub
# end class ReceiptTotalsSub
class AccountInformationSub(supermod.AccountInformation):
def __init__(self, AccountHolderName=None, AccountNumber=None, FinancialInstitution=None, FinancialInstitutionDFINumber=None, CreditCardNumber=None, CreditCardType=None, ExpireDate=None):
super(AccountInformationSub, self).__init__(AccountHolderName, AccountNumber, FinancialInstitution, FinancialInstitutionDFINumber, CreditCardNumber, CreditCardType, ExpireDate, )
supermod.AccountInformation.subclass = AccountInformationSub
# end class AccountInformationSub
class AddressInformationSub(supermod.AddressInformation):
def __init__(self, POBoxNumber=None, BuildingHouseNumber=None, StreetName=None, AddressLine=None, CityName=None, StateProvince=None, PostalCode=None, PostalCountry=None, CountyName=None, LocationIdentifier=None, StreetDirection=None, RegionName=None, LocationCode=None, Comment=None):
super(AddressInformationSub, self).__init__(POBoxNumber, BuildingHouseNumber, StreetName, AddressLine, CityName, StateProvince, PostalCode, PostalCountry, CountyName, LocationIdentifier, StreetDirection, RegionName, LocationCode, Comment, )
supermod.AddressInformation.subclass = AddressInformationSub
# end class AddressInformationSub
class AdditiveTypeIdentifierSub(supermod.AdditiveTypeIdentifier):
def __init__(self, identifierIndicator=None, valueOf_=None):
super(AdditiveTypeIdentifierSub, self).__init__(identifierIndicator, valueOf_, )
supermod.AdditiveTypeIdentifier.subclass = AdditiveTypeIdentifierSub
# end class AdditiveTypeIdentifierSub
class AllowanceOrChargeSub(supermod.AllowanceOrCharge):
def __init__(self, allowanceOrChargeIndicator=None, AllowanceOrChargeTotalAmount=None, AllowanceOrChargeRate=None, AllowanceOrChargePercent=None, AllowanceOrChargeNumber=None, AllowanceOrChargeTypeCode=None, MethodOfHandlingCode=None, AllowanceOrChargeDescription=None, AllowanceOrChargeQuantity=None):
super(AllowanceOrChargeSub, self).__init__(allowanceOrChargeIndicator, AllowanceOrChargeTotalAmount, AllowanceOrChargeRate, AllowanceOrChargePercent, AllowanceOrChargeNumber, AllowanceOrChargeTypeCode, MethodOfHandlingCode, AllowanceOrChargeDescription, AllowanceOrChargeQuantity, )
supermod.AllowanceOrCharge.subclass = AllowanceOrChargeSub
# end class AllowanceOrChargeSub
class AllowanceOrChargePercentSub(supermod.AllowanceOrChargePercent):
def __init__(self, percentIndicator=None, valueOf_=None):
super(AllowanceOrChargePercentSub, self).__init__(percentIndicator, valueOf_, )
supermod.AllowanceOrChargePercent.subclass = AllowanceOrChargePercentSub
# end class AllowanceOrChargePercentSub
class AllowanceOrChargeRateSub(supermod.AllowanceOrChargeRate):
def __init__(self, MonetaryAmount=None, UnitOfMeasureCode=None, CurrencyCode=None):
super(AllowanceOrChargeRateSub, self).__init__(MonetaryAmount, UnitOfMeasureCode, CurrencyCode, )
supermod.AllowanceOrChargeRate.subclass = AllowanceOrChargeRateSub
# end class AllowanceOrChargeRateSub
class AlternativeCommunicationMethodSub(supermod.AlternativeCommunicationMethod):
def __init__(self, communicationMethodType=None, definitionOfOther=None, alternativeCommunicationMethodIndicator=None, valueOf_=None):
super(AlternativeCommunicationMethodSub, self).__init__(communicationMethodType, definitionOfOther, alternativeCommunicationMethodIndicator, valueOf_, )
supermod.AlternativeCommunicationMethod.subclass = AlternativeCommunicationMethodSub
# end class AlternativeCommunicationMethodSub
class AmbientTemperatureSub(supermod.AmbientTemperature):
def __init__(self, Temperature=None, UnitOfMeasureCode=None):
super(AmbientTemperatureSub, self).__init__(Temperature, UnitOfMeasureCode, )
supermod.AmbientTemperature.subclass = AmbientTemperatureSub
# end class AmbientTemperatureSub
class AttachmentSub(supermod.Attachment):
def __init__(self, AttachmentPurposeCode=None, FileName=None, AttachmentTitle=None, AttachmentDescription=None, FileType=None, AttachmentLocation=None):
super(AttachmentSub, self).__init__(AttachmentPurposeCode, FileName, AttachmentTitle, AttachmentDescription, FileType, AttachmentLocation, )
supermod.Attachment.subclass = AttachmentSub
# end class AttachmentSub
class AveragePressureSub(supermod.AveragePressure):
def __init__(self, Pressure=None, UnitOfMeasureCode=None):
super(AveragePressureSub, self).__init__(Pressure, UnitOfMeasureCode, )
supermod.AveragePressure.subclass = AveragePressureSub
# end class AveragePressureSub
class AverageTemperatureSub(supermod.AverageTemperature):
def __init__(self, Temperature=None, UnitOfMeasureCode=None):
super(AverageTemperatureSub, self).__init__(Temperature, UnitOfMeasureCode, )
supermod.AverageTemperature.subclass = AverageTemperatureSub
# end class AverageTemperatureSub
class BatchNumberSub(supermod.BatchNumber):
def __init__(self, batchNumberCreator=None, definitionOfOther=None, valueOf_=None):
super(BatchNumberSub, self).__init__(batchNumberCreator, definitionOfOther, valueOf_, )
supermod.BatchNumber.subclass = BatchNumberSub
# end class BatchNumberSub
class BlockSub(supermod.Block):
def __init__(self, BlockName=None, OCSGNumber=None, StateLease=None):
super(BlockSub, self).__init__(BlockName, OCSGNumber, StateLease, )
supermod.Block.subclass = BlockSub
# end class BlockSub
class BuyersCurrencySub(supermod.BuyersCurrency):
def __init__(self, CurrencyCode=None):
super(BuyersCurrencySub, self).__init__(CurrencyCode, )
supermod.BuyersCurrency.subclass = BuyersCurrencySub
# end class BuyersCurrencySub
class ChangeOrderInformationSub(supermod.ChangeOrderInformation):
def __init__(self, OrderChangeNumber=None, OrderChangeDate=None, PurchaseOrderTypeCode=None, SalesOrderNumber=None, SequenceNumber=None):
super(ChangeOrderInformationSub, self).__init__(OrderChangeNumber, OrderChangeDate, PurchaseOrderTypeCode, SalesOrderNumber, SequenceNumber, )
supermod.ChangeOrderInformation.subclass = ChangeOrderInformationSub
# end class ChangeOrderInformationSub
class CommodityCodeSub(supermod.CommodityCode):
def __init__(self, agencyIndicator=None, valueOf_=None):
super(CommodityCodeSub, self).__init__(agencyIndicator, valueOf_, )
supermod.CommodityCode.subclass = CommodityCodeSub
# end class CommodityCodeSub
class ContactIdentifierSub(supermod.ContactIdentifier):
def __init__(self, contactIdentifierIndicator=None, valueOf_=None):
super(ContactIdentifierSub, self).__init__(contactIdentifierIndicator, valueOf_, )
supermod.ContactIdentifier.subclass = ContactIdentifierSub
# end class ContactIdentifierSub
class ContactInformationSub(supermod.ContactInformation):
def __init__(self, contactInformationIndicator=None, ContactIdentifier=None, ContactName=None, ContactDescription=None, Telephone=None, EmailAddress=None, AlternativeCommunicationMethod=None, LanguageCode=None):
super(ContactInformationSub, self).__init__(contactInformationIndicator, ContactIdentifier, ContactName, ContactDescription, Telephone, EmailAddress, AlternativeCommunicationMethod, LanguageCode, )
supermod.ContactInformation.subclass = ContactInformationSub
# end class ContactInformationSub
class ConveyanceInformationSub(supermod.ConveyanceInformation):
def __init__(self, ConveyanceIdentifier=None, VoyageTripNumber=None, VoyageTripDateTimeRange=None, EstimatedDateTimeOfArrival=None):
super(ConveyanceInformationSub, self).__init__(ConveyanceIdentifier, VoyageTripNumber, VoyageTripDateTimeRange, EstimatedDateTimeOfArrival, )
supermod.ConveyanceInformation.subclass = ConveyanceInformationSub
# end class ConveyanceInformationSub
class CountryOfFinalDestinationSub(supermod.CountryOfFinalDestination):
def __init__(self, CountryCode=None):
super(CountryOfFinalDestinationSub, self).__init__(CountryCode, )
supermod.CountryOfFinalDestination.subclass = CountryOfFinalDestinationSub
# end class CountryOfFinalDestinationSub
class CountryOfOriginSub(supermod.CountryOfOrigin):
def __init__(self, CountryCode=None):
super(CountryOfOriginSub, self).__init__(CountryCode, )
supermod.CountryOfOrigin.subclass = CountryOfOriginSub
# end class CountryOfOriginSub
class CreditCardInformationSub(supermod.CreditCardInformation):
def __init__(self, CreditCardNumber=None, CardHolderName=None, CreditCardType=None, CreditCardExpirationDate=None, CardAuthorizationCode=None, CardReferenceNumber=None):
super(CreditCardInformationSub, self).__init__(CreditCardNumber, CardHolderName, CreditCardType, CreditCardExpirationDate, CardAuthorizationCode, CardReferenceNumber, )
supermod.CreditCardInformation.subclass = CreditCardInformationSub
# end class CreditCardInformationSub
class CurrencyRatesSub(supermod.CurrencyRates):
def __init__(self, SellersCurrency=None, BuyersCurrency=None, ExchangeRate=None):
super(CurrencyRatesSub, self).__init__(SellersCurrency, BuyersCurrency, ExchangeRate, )
supermod.CurrencyRates.subclass = CurrencyRatesSub
# end class CurrencyRatesSub
class CustodyLocationIdentifierSub(supermod.CustodyLocationIdentifier):
def __init__(self, identifierIndicator=None, valueOf_=None):
super(CustodyLocationIdentifierSub, self).__init__(identifierIndicator, valueOf_, )
supermod.CustodyLocationIdentifier.subclass = CustodyLocationIdentifierSub
# end class CustodyLocationIdentifierSub
class CustodyLocationInformationSub(supermod.CustodyLocationInformation):
def __init__(self, CustodyLocationIdentifier=None, CustodyLocationDescription=None, AddressInformation=None, GeographicalInformation=None):
super(CustodyLocationInformationSub, self).__init__(CustodyLocationIdentifier, CustodyLocationDescription, AddressInformation, GeographicalInformation, )
supermod.CustodyLocationInformation.subclass = CustodyLocationInformationSub
# end class CustodyLocationInformationSub
class CustodyTicketInformationSub(supermod.CustodyTicketInformation):
def __init__(self, custodyTicketType=None, thirdPartyTicketIndicator=None, CustodyTicketNumber=None, CustodyTicketDateTime=None, CustodyTransferStartDateTime=None, CustodyTransferStopDateTime=None, RevisionNumber=None, CustodySupercedeTicketNumber=None, ConnectingCarrierTicketNumber=None):
super(CustodyTicketInformationSub, self).__init__(custodyTicketType, thirdPartyTicketIndicator, CustodyTicketNumber, CustodyTicketDateTime, CustodyTransferStartDateTime, CustodyTransferStopDateTime, RevisionNumber, CustodySupercedeTicketNumber, ConnectingCarrierTicketNumber, )
supermod.CustodyTicketInformation.subclass = CustodyTicketInformationSub
# end class CustodyTicketInformationSub
class CustodyTicketTimeLogSub(supermod.CustodyTicketTimeLog):
def __init__(self, CustodyTicketTimeLogEntry=None):
super(CustodyTicketTimeLogSub, self).__init__(CustodyTicketTimeLogEntry, )
supermod.CustodyTicketTimeLog.subclass = CustodyTicketTimeLogSub
# end class CustodyTicketTimeLogSub
class CustodyTicketTimeLogEntrySub(supermod.CustodyTicketTimeLogEntry):
def __init__(self, TimeLogDateTime=None, TimeLogActivity=None):
super(CustodyTicketTimeLogEntrySub, self).__init__(TimeLogDateTime, TimeLogActivity, )
supermod.CustodyTicketTimeLogEntry.subclass = CustodyTicketTimeLogEntrySub
# end class CustodyTicketTimeLogEntrySub
class CustodyTransferInformationSub(supermod.CustodyTransferInformation):
def __init__(self, FromPartner=None, ToPartner=None):
super(CustodyTransferInformationSub, self).__init__(FromPartner, ToPartner, )
supermod.CustodyTransferInformation.subclass = CustodyTransferInformationSub
# end class CustodyTransferInformationSub
class CustomerSpecificInformationSub(supermod.CustomerSpecificInformation):
def __init__(self, informationType=None, valueOf_=None):
super(CustomerSpecificInformationSub, self).__init__(informationType, valueOf_, )
supermod.CustomerSpecificInformation.subclass = CustomerSpecificInformationSub
# end class CustomerSpecificInformationSub
class DeferredSub(supermod.Deferred):
def __init__(self, DeferredAmount=None, DeferredDueDate=None, PercentDeferredPayable=None, Description=None):
super(DeferredSub, self).__init__(DeferredAmount, DeferredDueDate, PercentDeferredPayable, Description, )
supermod.Deferred.subclass = DeferredSub
# end class DeferredSub
class DeliveryTolerancesSub(supermod.DeliveryTolerances):
def __init__(self, UpperLimit=None, LowerLimit=None):
super(DeliveryTolerancesSub, self).__init__(UpperLimit, LowerLimit, )
supermod.DeliveryTolerances.subclass = DeliveryTolerancesSub
# end class DeliveryTolerancesSub
class DiscountsSub(supermod.Discounts):
def __init__(self, DaysDue=None, DiscountsDueDate=None, PercentDiscount=None, DiscountAmount=None, Description=None):
super(DiscountsSub, self).__init__(DaysDue, DiscountsDueDate, PercentDiscount, DiscountAmount, Description, )
supermod.Discounts.subclass = DiscountsSub
# end class DiscountsSub
class DocumentDeliveryInformationSub(supermod.DocumentDeliveryInformation):
def __init__(self, PartnerInformation=None):
super(DocumentDeliveryInformationSub, self).__init__(PartnerInformation, )
supermod.DocumentDeliveryInformation.subclass = DocumentDeliveryInformationSub
# end class DocumentDeliveryInformationSub
class DocumentReferenceSub(supermod.DocumentReference):
def __init__(self, referenceType=None, definitionOfOther=None, DocumentIdentifier=None, ReferenceItem=None, ReferenceInformation=None, DocumentDate=None):
super(DocumentReferenceSub, self).__init__(referenceType, definitionOfOther, DocumentIdentifier, ReferenceItem, ReferenceInformation, DocumentDate, )
supermod.DocumentReference.subclass = DocumentReferenceSub
# end class DocumentReferenceSub
class EffectiveDatesSub(supermod.EffectiveDates):
def __init__(self, FromDate=None, ToDate=None):
super(EffectiveDatesSub, self).__init__(FromDate, ToDate, )
supermod.EffectiveDates.subclass = EffectiveDatesSub
# end class EffectiveDatesSub
class ErrorClassificationSub(supermod.ErrorClassification):
def __init__(self, GlobalMessageExceptionCode=None):
super(ErrorClassificationSub, self).__init__(GlobalMessageExceptionCode, )
supermod.ErrorClassification.subclass = ErrorClassificationSub
# end class ErrorClassificationSub
class ErrorDescriptionSub(supermod.ErrorDescription):
def __init__(self, FreeFormText=None):
super(ErrorDescriptionSub, self).__init__(FreeFormText, )
supermod.ErrorDescription.subclass = ErrorDescriptionSub
# end class ErrorDescriptionSub
class ExceptionDescriptionSub(supermod.ExceptionDescription):
def __init__(self, ErrorClassification=None, ErrorDescription=None, OffendingMessageComponent=None):
super(ExceptionDescriptionSub, self).__init__(ErrorClassification, ErrorDescription, OffendingMessageComponent, )
supermod.ExceptionDescription.subclass = ExceptionDescriptionSub
# end class ExceptionDescriptionSub
class FieldTicketInformationSub(supermod.FieldTicketInformation):
def __init__(self, FieldTicketNumber=None, FieldTicketDate=None, RevisionNumber=None):
super(FieldTicketInformationSub, self).__init__(FieldTicketNumber, FieldTicketDate, RevisionNumber, )
supermod.FieldTicketInformation.subclass = FieldTicketInformationSub
# end class FieldTicketInformationSub
class FinalDestinationLocationIdentifierSub(supermod.FinalDestinationLocationIdentifier):
def __init__(self, identifierIndicator=None, valueOf_=None):
super(FinalDestinationLocationIdentifierSub, self).__init__(identifierIndicator, valueOf_, )
supermod.FinalDestinationLocationIdentifier.subclass = FinalDestinationLocationIdentifierSub
# end class FinalDestinationLocationIdentifierSub
class FinalDestinationLocationInformationSub(supermod.FinalDestinationLocationInformation):
def __init__(self, FinalDestinationLocationIdentifier=None, FinalDestinationLocationDescription=None, AddressInformation=None, GeographicalInformation=None):
super(FinalDestinationLocationInformationSub, self).__init__(FinalDestinationLocationIdentifier, FinalDestinationLocationDescription, AddressInformation, GeographicalInformation, )
supermod.FinalDestinationLocationInformation.subclass = FinalDestinationLocationInformationSub
# end class FinalDestinationLocationInformationSub
class FlashTemperatureSub(supermod.FlashTemperature):
def __init__(self, Temperature=None, UnitOfMeasureCode=None):
super(FlashTemperatureSub, self).__init__(Temperature, UnitOfMeasureCode, )
supermod.FlashTemperature.subclass = FlashTemperatureSub
# end class FlashTemperatureSub
class FreeFormTextSub(supermod.FreeFormText):
def __init__(self, language=None, valueOf_=None):
super(FreeFormTextSub, self).__init__(language, valueOf_, )
supermod.FreeFormText.subclass = FreeFormTextSub
# end class FreeFormTextSub
class GaugeReadingMeasureSub(supermod.GaugeReadingMeasure):
def __init__(self, Measurement=None, UnitOfMeasureCode=None):
super(GaugeReadingMeasureSub, self).__init__(Measurement, UnitOfMeasureCode, )
supermod.GaugeReadingMeasure.subclass = GaugeReadingMeasureSub
# end class GaugeReadingMeasureSub
class GeographicalInformationSub(supermod.GeographicalInformation):
def __init__(self, FieldName=None, Section=None, Township=None, Range=None, Region=None, Block=None, GPSCoordinates=None):
super(GeographicalInformationSub, self).__init__(FieldName, Section, Township, Range, Region, Block, GPSCoordinates, )
supermod.GeographicalInformation.subclass = GeographicalInformationSub
# end class GeographicalInformationSub
class GPSCoordinatesSub(supermod.GPSCoordinates):
def __init__(self, Latitude=None, Longitude=None):
super(GPSCoordinatesSub, self).__init__(Latitude, Longitude, )
supermod.GPSCoordinates.subclass = GPSCoordinatesSub
# end class GPSCoordinatesSub
class GravitySub(supermod.Gravity):
def __init__(self, Type=None, valueOf_=None):
super(GravitySub, self).__init__(Type, valueOf_, )
supermod.Gravity.subclass = GravitySub
# end class GravitySub
class GrossQuantitySub(supermod.GrossQuantity):
def __init__(self, QuantityUnitOfMeasurement=None, valueOf_=None):
super(GrossQuantitySub, self).__init__(QuantityUnitOfMeasurement, valueOf_, )
supermod.GrossQuantity.subclass = GrossQuantitySub
# end class GrossQuantitySub
class HazardousMaterialClassCodeSub(supermod.HazardousMaterialClassCode):
def __init__(self, hazardousMaterialIndicator=None, valueOf_=None):
super(HazardousMaterialClassCodeSub, self).__init__(hazardousMaterialIndicator, valueOf_, )
supermod.HazardousMaterialClassCode.subclass = HazardousMaterialClassCodeSub
# end class HazardousMaterialClassCodeSub
class HazardousMaterialsSub(supermod.HazardousMaterials):
def __init__(self, SpecialHandlingCode=None, HazardousMaterialDescription=None, HazardousMaterialClassCode=None):
super(HazardousMaterialsSub, self).__init__(SpecialHandlingCode, HazardousMaterialDescription, HazardousMaterialClassCode, )
supermod.HazardousMaterials.subclass = HazardousMaterialsSub
# end class HazardousMaterialsSub
class IntermodalServiceSub(supermod.IntermodalService):
def __init__(self, codeListName=None, valueOf_=None):
super(IntermodalServiceSub, self).__init__(codeListName, valueOf_, )
supermod.IntermodalService.subclass = IntermodalServiceSub
# end class IntermodalServiceSub
class IntrastatSub(supermod.Intrastat):
def __init__(self, CommodityCode=None, CommodityDescription=None, TransactionNature=None, SupplementaryUnits=None, CountryOfFinalDestination=None, TransportMethodCode=None):
super(IntrastatSub, self).__init__(CommodityCode, CommodityDescription, TransactionNature, SupplementaryUnits, CountryOfFinalDestination, TransportMethodCode, )
supermod.Intrastat.subclass = IntrastatSub
# end class IntrastatSub
class InvoiceInformationSub(supermod.InvoiceInformation):
def __init__(self, InvoiceNumber=None, InvoiceDate=None, InvoiceTypeCode=None, RevisionNumber=None):
super(InvoiceInformationSub, self).__init__(InvoiceNumber, InvoiceDate, InvoiceTypeCode, RevisionNumber, )
supermod.InvoiceInformation.subclass = InvoiceInformationSub
# end class InvoiceInformationSub
class JobLocationIdentifierSub(supermod.JobLocationIdentifier):
def __init__(self, jobLocationIdentifierIndicator=None, valueOf_=None):
super(JobLocationIdentifierSub, self).__init__(jobLocationIdentifierIndicator, valueOf_, )
supermod.JobLocationIdentifier.subclass = JobLocationIdentifierSub
# end class JobLocationIdentifierSub
class JobLocationInformationSub(supermod.JobLocationInformation):
def __init__(self, JobLocationIdentifier=None, JobLocationClassCode=None, JobLocationDescription=None, JobLocationStatus=None, WellInformation=None, JobSiteCategory=None, AddressInformation=None, GeographicalInformation=None):
super(JobLocationInformationSub, self).__init__(JobLocationIdentifier, JobLocationClassCode, JobLocationDescription, JobLocationStatus, WellInformation, JobSiteCategory, AddressInformation, GeographicalInformation, )
supermod.JobLocationInformation.subclass = JobLocationInformationSub
# end class JobLocationInformationSub
class LetterOfCreditInformationSub(supermod.LetterOfCreditInformation):
def __init__(self, DocumentReference=None, AdvisingBank=None, LetterOfCreditAmount=None, PaymentTerms=None, LetterOfCreditQuantity=None, LetterOfCreditVoyageDateTimeRange=None, LetterOfCreditNegotiatedDateTimeRange=None):
super(LetterOfCreditInformationSub, self).__init__(DocumentReference, AdvisingBank, LetterOfCreditAmount, PaymentTerms, LetterOfCreditQuantity, LetterOfCreditVoyageDateTimeRange, LetterOfCreditNegotiatedDateTimeRange, )
supermod.LetterOfCreditInformation.subclass = LetterOfCreditInformationSub
# end class LetterOfCreditInformationSub
class LineItemAdditiveSub(supermod.LineItemAdditive):
def __init__(self, AdditiveTypeIdentifier=None, Quantity=None, UnitOfMeasureCode=None):
super(LineItemAdditiveSub, self).__init__(AdditiveTypeIdentifier, Quantity, UnitOfMeasureCode, )
supermod.LineItemAdditive.subclass = LineItemAdditiveSub
# end class LineItemAdditiveSub
class LineItemIdentifierSub(supermod.LineItemIdentifier):
def __init__(self, identifierIndicator=None, valueOf_=None):
super(LineItemIdentifierSub, self).__init__(identifierIndicator, valueOf_, )
supermod.LineItemIdentifier.subclass = LineItemIdentifierSub
# end class LineItemIdentifierSub
class LineItemInformationSub(supermod.LineItemInformation):
def __init__(self, LineItemIdentifier=None, LineItemName=None, LineItemDescription=None, ManufacturerIdentifier=None):
super(LineItemInformationSub, self).__init__(LineItemIdentifier, LineItemName, LineItemDescription, ManufacturerIdentifier, )
supermod.LineItemInformation.subclass = LineItemInformationSub
# end class LineItemInformationSub
class LineItemMeasuresSub(supermod.LineItemMeasures):
def __init__(self, SampleMeasures=None, VesselMeasures=None, TankMeasures=None, Meter=None, PartnerDefinedMeasure=None):
super(LineItemMeasuresSub, self).__init__(SampleMeasures, VesselMeasures, TankMeasures, Meter, PartnerDefinedMeasure, )
supermod.LineItemMeasures.subclass = LineItemMeasuresSub
# end class LineItemMeasuresSub
class LineItemPurposeCodeSub(supermod.LineItemPurposeCode):
def __init__(self, codeListName=None, valueOf_=None):
super(LineItemPurposeCodeSub, self).__init__(codeListName, valueOf_, )
supermod.LineItemPurposeCode.subclass = LineItemPurposeCodeSub
# end class LineItemPurposeCodeSub
class LocationSub(supermod.Location):
def __init__(self, locationIndicator=None, LocationIdentifier=None, LocationName=None, AddressInformation=None, GeographicalInformation=None, LocationDescription=None):
super(LocationSub, self).__init__(locationIndicator, LocationIdentifier, LocationName, AddressInformation, GeographicalInformation, LocationDescription, )
supermod.Location.subclass = LocationSub
# end class LocationSub
class LocationCodeSub(supermod.LocationCode):
def __init__(self, codeListName=None, valueOf_=None):
super(LocationCodeSub, self).__init__(codeListName, valueOf_, )
supermod.LocationCode.subclass = LocationCodeSub
# end class LocationCodeSub
class LoadTenderInformationSub(supermod.LoadTenderInformation):
def __init__(self, LoadTenderIdentifier=None, LoadTenderIssuedDateTime=None):
super(LoadTenderInformationSub, self).__init__(LoadTenderIdentifier, LoadTenderIssuedDateTime, )
supermod.LoadTenderInformation.subclass = LoadTenderInformationSub
# end class LoadTenderInformationSub
class ManufacturingIdentificationDetailsSub(supermod.ManufacturingIdentificationDetails):
def __init__(self, ManufacturingIdentificationTypeCode=None, ManufacturingIdentifier=None, ParentManufacturingIdentifier=None):
super(ManufacturingIdentificationDetailsSub, self).__init__(ManufacturingIdentificationTypeCode, ManufacturingIdentifier, ParentManufacturingIdentifier, )
supermod.ManufacturingIdentificationDetails.subclass = ManufacturingIdentificationDetailsSub
# end class ManufacturingIdentificationDetailsSub
class ModeOfTransportationSub(supermod.ModeOfTransportation):
def __init__(self, TransportMethodCode=None, TransportEvent=None, TransportContainer=None, HazardousMaterials=None):
super(ModeOfTransportationSub, self).__init__(TransportMethodCode, TransportEvent, TransportContainer, HazardousMaterials, )
supermod.ModeOfTransportation.subclass = ModeOfTransportationSub
# end class ModeOfTransportationSub
class MeasurementRangeSub(supermod.MeasurementRange):
def __init__(self, MinimumMeasurement=None, MaximumMeasurement=None):
super(MeasurementRangeSub, self).__init__(MinimumMeasurement, MaximumMeasurement, )
supermod.MeasurementRange.subclass = MeasurementRangeSub
# end class MeasurementRangeSub
class MeterSub(supermod.Meter):
def __init__(self, MeterNumber=None, OpenReading=None, CloseReading=None, MeterQuantity=None, MeterFactor=None, MeterDistributionPercent=None, ProverReport=None):
super(MeterSub, self).__init__(MeterNumber, OpenReading, CloseReading, MeterQuantity, MeterFactor, MeterDistributionPercent, ProverReport, )
supermod.Meter.subclass = MeterSub
# end class MeterSub
class NetQuantitySub(supermod.NetQuantity):
def __init__(self, QuantityUnitOfMeasurement=None, valueOf_=None):
super(NetQuantitySub, self).__init__(QuantityUnitOfMeasurement, valueOf_, )
supermod.NetQuantity.subclass = NetQuantitySub
# end class NetQuantitySub
class ObservedTemperatureSub(supermod.ObservedTemperature):
def __init__(self, Temperature=None, UnitOfMeasureCode=None):
super(ObservedTemperatureSub, self).__init__(Temperature, UnitOfMeasureCode, )
supermod.ObservedTemperature.subclass = ObservedTemperatureSub
# end class ObservedTemperatureSub
class OffendingMessageComponentSub(supermod.OffendingMessageComponent):
def __init__(self, GlobalMessageComponentCode=None):
super(OffendingMessageComponentSub, self).__init__(GlobalMessageComponentCode, )
supermod.OffendingMessageComponent.subclass = OffendingMessageComponentSub
# end class OffendingMessageComponentSub
class OriginLocationIdentifierSub(supermod.OriginLocationIdentifier):
def __init__(self, identifierIndicator=None, valueOf_=None):
super(OriginLocationIdentifierSub, self).__init__(identifierIndicator, valueOf_, )
supermod.OriginLocationIdentifier.subclass = OriginLocationIdentifierSub
# end class OriginLocationIdentifierSub
class OriginLocationInformationSub(supermod.OriginLocationInformation):
def __init__(self, OriginLocationIdentifier=None, OriginLocationDescription=None, AddressInformation=None, GeographicalInformation=None):
super(OriginLocationInformationSub, self).__init__(OriginLocationIdentifier, OriginLocationDescription, AddressInformation, GeographicalInformation, )
supermod.OriginLocationInformation.subclass = OriginLocationInformationSub
# end class OriginLocationInformationSub
class PackageDetailSub(supermod.PackageDetail):
def __init__(self, LineItemNumber=None, ProductInformation=None, PackageType=None, PackageLevel=None, PackagingQuantity=None, ShippingLabelNumber=None, PackageWeight=None, SubUnitCount=None):
super(PackageDetailSub, self).__init__(LineItemNumber, ProductInformation, PackageType, PackageLevel, PackagingQuantity, ShippingLabelNumber, PackageWeight, SubUnitCount, )
supermod.PackageDetail.subclass = PackageDetailSub
# end class PackageDetailSub
class PackagingInformationSub(supermod.PackagingInformation):
def __init__(self, ProductInformation=None, PackagingQuantity=None, PackageType=None, PackageWeight=None, PackagingLabel=None, SpecialInstructions=None):
super(PackagingInformationSub, self).__init__(ProductInformation, PackagingQuantity, PackageType, PackageWeight, PackagingLabel, SpecialInstructions, )
supermod.PackagingInformation.subclass = PackagingInformationSub
# end class PackagingInformationSub
class PartnerDefinedMeasureSub(supermod.PartnerDefinedMeasure):
def __init__(self, PartnerDefinedMeasureDescription=None, PartnerDefinedMeasureIdentifier=None, Measurement=None, UnitOfMeasureCode=None):
super(PartnerDefinedMeasureSub, self).__init__(PartnerDefinedMeasureDescription, PartnerDefinedMeasureIdentifier, Measurement, UnitOfMeasureCode, )
supermod.PartnerDefinedMeasure.subclass = PartnerDefinedMeasureSub
# end class PartnerDefinedMeasureSub
class PartnerDefinedMeasureIdentifierSub(supermod.PartnerDefinedMeasureIdentifier):
def __init__(self, partnerDefinedMeasureIdentifierIndicator=None, valueOf_=None):
super(PartnerDefinedMeasureIdentifierSub, self).__init__(partnerDefinedMeasureIdentifierIndicator, valueOf_, )
supermod.PartnerDefinedMeasureIdentifier.subclass = PartnerDefinedMeasureIdentifierSub
# end class PartnerDefinedMeasureIdentifierSub
class PartnerIdentifierSub(supermod.PartnerIdentifier):
def __init__(self, definitionOfOther=None, partnerIdentifierIndicator=None, valueOf_=None):
super(PartnerIdentifierSub, self).__init__(definitionOfOther, partnerIdentifierIndicator, valueOf_, )
supermod.PartnerIdentifier.subclass = PartnerIdentifierSub
# end class PartnerIdentifierSub
class PartnerInformationSub(supermod.PartnerInformation):
def __init__(self, partnerRoleIndicator=None, definitionOfOther=None, PartnerIdentifier=None, PartnerName=None, AddressInformation=None, ContactInformation=None, TaxInformation=None, URL=None, AccountInformation=None):
super(PartnerInformationSub, self).__init__(partnerRoleIndicator, definitionOfOther, PartnerIdentifier, PartnerName, AddressInformation, ContactInformation, TaxInformation, URL, AccountInformation, )
supermod.PartnerInformation.subclass = PartnerInformationSub
# end class PartnerInformationSub
class PaymentTermsSub(supermod.PaymentTerms):
def __init__(self, PaymentTermsOfSale=None, Description=None, PaymentTermsBasisDateCode=None, PaymentTermsBasisDate=None, TermsNetDays=None, PercentOfInvoicePayable=None, Discounts=None, Deferred=None, Penalty=None):
super(PaymentTermsSub, self).__init__(PaymentTermsOfSale, Description, PaymentTermsBasisDateCode, PaymentTermsBasisDate, TermsNetDays, PercentOfInvoicePayable, Discounts, Deferred, Penalty, )
supermod.PaymentTerms.subclass = PaymentTermsSub
# end class PaymentTermsSub
class PersonnelInformationSub(supermod.PersonnelInformation):
def __init__(self, PersonnelName=None, CompanyName=None, PersonnelIdentifier=None, TimeWorked=None, JobDescription=None, Comment=None):
super(PersonnelInformationSub, self).__init__(PersonnelName, CompanyName, PersonnelIdentifier, TimeWorked, JobDescription, Comment, )
supermod.PersonnelInformation.subclass = PersonnelInformationSub
# end class PersonnelInformationSub
class PortOfDischargeSub(supermod.PortOfDischarge):
def __init__(self, codeListName=None, valueOf_=None):
super(PortOfDischargeSub, self).__init__(codeListName, valueOf_, )
supermod.PortOfDischarge.subclass = PortOfDischargeSub
# end class PortOfDischargeSub
class PostalCountrySub(supermod.PostalCountry):
def __init__(self, CountryCode=None):
super(PostalCountrySub, self).__init__(CountryCode, )
supermod.PostalCountry.subclass = PostalCountrySub
# end class PostalCountrySub
class PortOfLoadingSub(supermod.PortOfLoading):
def __init__(self, codeListName=None, valueOf_=None):
super(PortOfLoadingSub, self).__init__(codeListName, valueOf_, )
supermod.PortOfLoading.subclass = PortOfLoadingSub
# end class PortOfLoadingSub
class PriceBasisSub(supermod.PriceBasis):
def __init__(self, Measurement=None, MeasurementRange=None, UnitOfMeasureCode=None, BasisDescription=None):
super(PriceBasisSub, self).__init__(Measurement, MeasurementRange, UnitOfMeasureCode, BasisDescription, )
supermod.PriceBasis.subclass = PriceBasisSub
# end class PriceBasisSub
class PricingSub(supermod.Pricing):
def __init__(self, UnitPrice=None, PriceBasis=None, PriceReason=None):
super(PricingSub, self).__init__(UnitPrice, PriceBasis, PriceReason, )
supermod.Pricing.subclass = PricingSub
# end class PricingSub
class ProductSubLineItemsSub(supermod.ProductSubLineItems):
def __init__(self, SubLineItemNumber=None, ManufacturingIdentificationDetails=None, GrossVolume=None, GrossWeight=None, NetWeight=None):
super(ProductSubLineItemsSub, self).__init__(SubLineItemNumber, ManufacturingIdentificationDetails, GrossVolume, GrossWeight, NetWeight, )
supermod.ProductSubLineItems.subclass = ProductSubLineItemsSub
# end class ProductSubLineItemsSub
class PrimaryCurrencySub(supermod.PrimaryCurrency):
def __init__(self, CurrencyCode=None):
super(PrimaryCurrencySub, self).__init__(CurrencyCode, )
supermod.PrimaryCurrency.subclass = PrimaryCurrencySub
# end class PrimaryCurrencySub
class ProductIdentifierSub(supermod.ProductIdentifier):
def __init__(self, definitionOfOther=None, assigningOrganization=None, valueOf_=None):
super(ProductIdentifierSub, self).__init__(definitionOfOther, assigningOrganization, valueOf_, )
supermod.ProductIdentifier.subclass = ProductIdentifierSub
# end class ProductIdentifierSub
class ProductInformationSub(supermod.ProductInformation):
def __init__(self, ProductIdentifier=None, ProductDescription=None, ProductGradeDescription=None):
super(ProductInformationSub, self).__init__(ProductIdentifier, ProductDescription, ProductGradeDescription, )
supermod.ProductInformation.subclass = ProductInformationSub
# end class ProductInformationSub
class PurchaseOrderInformationSub(supermod.PurchaseOrderInformation):
def __init__(self, PurchaseOrderNumber=None, PurchaseOrderIssuedDate=None, PurchaseOrderTypeCode=None, ReleaseNumber=None, SalesOrderNumber=None, RevisionNumber=None):
super(PurchaseOrderInformationSub, self).__init__(PurchaseOrderNumber, PurchaseOrderIssuedDate, PurchaseOrderTypeCode, ReleaseNumber, SalesOrderNumber, RevisionNumber, )
supermod.PurchaseOrderInformation.subclass = PurchaseOrderInformationSub
# end class PurchaseOrderInformationSub
class PurchaseOrderReferenceInformationSub(supermod.PurchaseOrderReferenceInformation):
def __init__(self, OrderNumber=None, OrderDate=None, LineItemNumber=None, ChangeSequenceNumber=None, ReleaseNumber=None):
super(PurchaseOrderReferenceInformationSub, self).__init__(OrderNumber, OrderDate, LineItemNumber, ChangeSequenceNumber, ReleaseNumber, )
supermod.PurchaseOrderReferenceInformation.subclass = PurchaseOrderReferenceInformationSub
# end class PurchaseOrderReferenceInformationSub
class QuoteRequestInformationSub(supermod.QuoteRequestInformation):
def __init__(self, QuoteRequestNumber=None, QuoteRequestDate=None, RevisionNumber=None):
super(QuoteRequestInformationSub, self).__init__(QuoteRequestNumber, QuoteRequestDate, RevisionNumber, )
supermod.QuoteRequestInformation.subclass = QuoteRequestInformationSub
# end class QuoteRequestInformationSub
class RateOfExchangeDetailSub(supermod.RateOfExchangeDetail):
def __init__(self, ReferenceCurrency=None, TargetCurrency=None, ExchangeRate=None, DateOfRateOfExchange=None):
super(RateOfExchangeDetailSub, self).__init__(ReferenceCurrency, TargetCurrency, ExchangeRate, DateOfRateOfExchange, )
supermod.RateOfExchangeDetail.subclass = RateOfExchangeDetailSub
# end class RateOfExchangeDetailSub
class ReferenceCurrencySub(supermod.ReferenceCurrency):
def __init__(self, CurrencyCode=None):
super(ReferenceCurrencySub, self).__init__(CurrencyCode, )
supermod.ReferenceCurrency.subclass = ReferenceCurrencySub
# end class ReferenceCurrencySub
class ReferenceInformationSub(supermod.ReferenceInformation):
def __init__(self, referenceInformationIndicator=None, ReferenceNumber=None, Description=None):
super(ReferenceInformationSub, self).__init__(referenceInformationIndicator, ReferenceNumber, Description, )
supermod.ReferenceInformation.subclass = ReferenceInformationSub
# end class ReferenceInformationSub
class RequestedDocumentSub(supermod.RequestedDocument):
def __init__(self, DocumentReference=None, DocumentDeliveryInformation=None, NumberOfOriginals=None, NumberOfCopies=None):
super(RequestedDocumentSub, self).__init__(DocumentReference, DocumentDeliveryInformation, NumberOfOriginals, NumberOfCopies, )
supermod.RequestedDocument.subclass = RequestedDocumentSub
# end class RequestedDocumentSub
class RequestQuoteResponseSub(supermod.RequestQuoteResponse):
def __init__(self, OpenDate=None, CloseDate=None, ContactInformation=None, ResponseInstructions=None):
super(RequestQuoteResponseSub, self).__init__(OpenDate, CloseDate, ContactInformation, ResponseInstructions, )
supermod.RequestQuoteResponse.subclass = RequestQuoteResponseSub
# end class RequestQuoteResponseSub
class ResultingOrderTypeSub(supermod.ResultingOrderType):
def __init__(self, PurchaseOrderTypeCode=None):
super(ResultingOrderTypeSub, self).__init__(PurchaseOrderTypeCode, )
supermod.ResultingOrderType.subclass = ResultingOrderTypeSub
# end class ResultingOrderTypeSub
class RigIdentifierSub(supermod.RigIdentifier):
def __init__(self, rigIdentifierType=None, valueOf_=None):
super(RigIdentifierSub, self).__init__(rigIdentifierType, valueOf_, )
supermod.RigIdentifier.subclass = RigIdentifierSub
# end class RigIdentifierSub
class SampleMeasuresSub(supermod.SampleMeasures):
def __init__(self, PartnerDefinedMeasure=None, ObservedDensity=None, ObservedTemperature=None, APIGravity=None, AverageTemperature=None, AveragePressure=None, AverageFlowRate=None, CorrectedGravity=None, CorrectedTemperatureLiquidFactor=None, CorrectedPressureLiquidFactor=None, CorrectedDensity=None, CompositeFactor=None, PercentLoaded=None, FlashTemperature=None, Haze=None, LineItemColor=None, Microseparometer=None, IncrustationFactor=None):
super(SampleMeasuresSub, self).__init__(PartnerDefinedMeasure, ObservedDensity, ObservedTemperature, APIGravity, AverageTemperature, AveragePressure, AverageFlowRate, CorrectedGravity, CorrectedTemperatureLiquidFactor, CorrectedPressureLiquidFactor, CorrectedDensity, CompositeFactor, PercentLoaded, FlashTemperature, Haze, LineItemColor, Microseparometer, IncrustationFactor, )
supermod.SampleMeasures.subclass = SampleMeasuresSub
# end class SampleMeasuresSub
class SecondCurrencySub(supermod.SecondCurrency):
def __init__(self, CurrencyCode=None):
super(SecondCurrencySub, self).__init__(CurrencyCode, )
supermod.SecondCurrency.subclass = SecondCurrencySub
# end class SecondCurrencySub
class SellersCurrencySub(supermod.SellersCurrency):
def __init__(self, CurrencyCode=None):
super(SellersCurrencySub, self).__init__(CurrencyCode, )
supermod.SellersCurrency.subclass = SellersCurrencySub
# end class SellersCurrencySub
class ServiceDateTimeSub(supermod.ServiceDateTime):
def __init__(self, dateTypeIndicator=None, valueOf_=None):
super(ServiceDateTimeSub, self).__init__(dateTypeIndicator, valueOf_, )
supermod.ServiceDateTime.subclass = ServiceDateTimeSub
# end class ServiceDateTimeSub
class ShipmentTermsSub(supermod.ShipmentTerms):
def __init__(self, ShipmentTermsCode=None, ShipmentTermsLocation=None):
super(ShipmentTermsSub, self).__init__(ShipmentTermsCode, ShipmentTermsLocation, )
supermod.ShipmentTerms.subclass = ShipmentTermsSub
# end class ShipmentTermsSub
class ShipmentPackagingSub(supermod.ShipmentPackaging):
def __init__(self, PackageDetail=None):
super(ShipmentPackagingSub, self).__init__(PackageDetail, )
supermod.ShipmentPackaging.subclass = ShipmentPackagingSub
# end class ShipmentPackagingSub
class ShipmentTermsLocationSub(supermod.ShipmentTermsLocation):
def __init__(self, codeListName=None, valueOf_=None):
super(ShipmentTermsLocationSub, self).__init__(codeListName, valueOf_, )
supermod.ShipmentTermsLocation.subclass = ShipmentTermsLocationSub
# end class ShipmentTermsLocationSub
class ShipNoticeEquipmentDetailsSub(supermod.ShipNoticeEquipmentDetails):
def __init__(self, LineItemNumber=None, EquipmentIdentifier=None, CarrierEquipmentCode=None, EquipmentOwnership=None, NumberOfUnits=None, SpecialInstructions=None, Height=None, Width=None, Length=None, NetWeight=None, TareWeight=None, GrossWeight=None, NetVolume=None, GrossVolume=None, SealNumber=None, EquipmentLoadEmptyStatus=None):
super(ShipNoticeEquipmentDetailsSub, self).__init__(LineItemNumber, EquipmentIdentifier, CarrierEquipmentCode, EquipmentOwnership, NumberOfUnits, SpecialInstructions, Height, Width, Length, NetWeight, TareWeight, GrossWeight, NetVolume, GrossVolume, SealNumber, EquipmentLoadEmptyStatus, )
supermod.ShipNoticeEquipmentDetails.subclass = ShipNoticeEquipmentDetailsSub
# end class ShipNoticeEquipmentDetailsSub
class SpecialInstructionsSub(supermod.SpecialInstructions):
def __init__(self, definitionOfOther=None, instructionIndicator=None, valueOf_=None):
super(SpecialInstructionsSub, self).__init__(definitionOfOther, instructionIndicator, valueOf_, )
supermod.SpecialInstructions.subclass = SpecialInstructionsSub
# end class SpecialInstructionsSub
class TankCloseMeasuresSub(supermod.TankCloseMeasures):
def __init__(self, AdjustmentQuantity=None, AmbientTemperature=None, APIGravity=None, CorrectedQuantity=None, CorrectedGravity=None, TankDateTime=None, GaugeReadingMeasure=None, GaugeQuantity=None, ReadingQuantity=None, TankWaterQuantity=None, TankWaterMeasure=None, TankTemperature=None, AdjustedRoofQuantity=None, CorrectedTemperatureLiquidFactor=None, PartnerDefinedMeasure=None):
super(TankCloseMeasuresSub, self).__init__(AdjustmentQuantity, AmbientTemperature, APIGravity, CorrectedQuantity, CorrectedGravity, TankDateTime, GaugeReadingMeasure, GaugeQuantity, ReadingQuantity, TankWaterQuantity, TankWaterMeasure, TankTemperature, AdjustedRoofQuantity, CorrectedTemperatureLiquidFactor, PartnerDefinedMeasure, )
supermod.TankCloseMeasures.subclass = TankCloseMeasuresSub
# end class TankCloseMeasuresSub
class TankHeightSub(supermod.TankHeight):
def __init__(self, Measurement=None, UnitOfMeasureCode=None):
super(TankHeightSub, self).__init__(Measurement, UnitOfMeasureCode, )
supermod.TankHeight.subclass = TankHeightSub
# end class TankHeightSub
class TankMeasuresSub(supermod.TankMeasures):
def __init__(self, TankHeight=None, TankCapacityQuantity=None, PartnerDefinedMeasure=None, TankOpenMeasures=None, TankCloseMeasures=None):
super(TankMeasuresSub, self).__init__(TankHeight, TankCapacityQuantity, PartnerDefinedMeasure, TankOpenMeasures, TankCloseMeasures, )
supermod.TankMeasures.subclass = TankMeasuresSub
# end class TankMeasuresSub
class TankOpenMeasuresSub(supermod.TankOpenMeasures):
def __init__(self, AdjustmentQuantity=None, AmbientTemperature=None, APIGravity=None, CorrectedQuantity=None, CorrectedGravity=None, TankDateTime=None, GaugeReadingMeasure=None, GaugeQuantity=None, ReadingQuantity=None, TankWaterQuantity=None, TankWaterMeasure=None, TankTemperature=None, AdjustedRoofQuantity=None, CorrectedTemperatureLiquidFactor=None, PartnerDefinedMeasure=None):
super(TankOpenMeasuresSub, self).__init__(AdjustmentQuantity, AmbientTemperature, APIGravity, CorrectedQuantity, CorrectedGravity, TankDateTime, GaugeReadingMeasure, GaugeQuantity, ReadingQuantity, TankWaterQuantity, TankWaterMeasure, TankTemperature, AdjustedRoofQuantity, CorrectedTemperatureLiquidFactor, PartnerDefinedMeasure, )
supermod.TankOpenMeasures.subclass = TankOpenMeasuresSub
# end class TankOpenMeasuresSub
class TankTemperatureSub(supermod.TankTemperature):
def __init__(self, Temperature=None, UnitOfMeasureCode=None):
super(TankTemperatureSub, self).__init__(Temperature, UnitOfMeasureCode, )
supermod.TankTemperature.subclass = TankTemperatureSub
# end class TankTemperatureSub
class TankWaterMeasureSub(supermod.TankWaterMeasure):
def __init__(self, Measurement=None, UnitOfMeasureCode=None):
super(TankWaterMeasureSub, self).__init__(Measurement, UnitOfMeasureCode, )
supermod.TankWaterMeasure.subclass = TankWaterMeasureSub
# end class TankWaterMeasureSub
class TargetCurrencySub(supermod.TargetCurrency):
def __init__(self, CurrencyCode=None):
super(TargetCurrencySub, self).__init__(CurrencyCode, )
supermod.TargetCurrency.subclass = TargetCurrencySub
# end class TargetCurrencySub
class TaxCertificateNumberSub(supermod.TaxCertificateNumber):
def __init__(self, taxCertificateType=None, valueOf_=None):
super(TaxCertificateNumberSub, self).__init__(taxCertificateType, valueOf_, )
supermod.TaxCertificateNumber.subclass = TaxCertificateNumberSub
# end class TaxCertificateNumberSub
class TaxIdentifierNumberSub(supermod.TaxIdentifierNumber):
def __init__(self, taxType=None, valueOf_=None):
super(TaxIdentifierNumberSub, self).__init__(taxType, valueOf_, )
supermod.TaxIdentifierNumber.subclass = TaxIdentifierNumberSub
# end class TaxIdentifierNumberSub
class TaxInformationSub(supermod.TaxInformation):
def __init__(self, TaxIdentifierNumber=None, Jurisdiction=None, TaxCertificateNumber=None, TaxBasisAmount=None, TaxRate=None, DateTimeRange=None, TaxCertificateType=None):
super(TaxInformationSub, self).__init__(TaxIdentifierNumber, Jurisdiction, TaxCertificateNumber, TaxBasisAmount, TaxRate, DateTimeRange, TaxCertificateType, )
supermod.TaxInformation.subclass = TaxInformationSub
# end class TaxInformationSub
class TaxLocationSub(supermod.TaxLocation):
def __init__(self, LocationIdentifier=None, LocationName=None, LocationDescription=None, AddressInformation=None):
super(TaxLocationSub, self).__init__(LocationIdentifier, LocationName, LocationDescription, AddressInformation, )
supermod.TaxLocation.subclass = TaxLocationSub
# end class TaxLocationSub
class TelephoneSub(supermod.Telephone):
def __init__(self, telephoneIndicator=None, PhoneNumber=None, PhoneNumberExtension=None, TelecomCountryCode=None, TelecomAreaCode=None):
super(TelephoneSub, self).__init__(telephoneIndicator, PhoneNumber, PhoneNumberExtension, TelecomCountryCode, TelecomAreaCode, )
supermod.Telephone.subclass = TelephoneSub
# end class TelephoneSub
class TemperatureSub(supermod.Temperature):
def __init__(self, TemperatureUnitOfMeasurement=None, valueOf_=None):
super(TemperatureSub, self).__init__(TemperatureUnitOfMeasurement, valueOf_, )
supermod.Temperature.subclass = TemperatureSub
# end class TemperatureSub
class TransportInformationSub(supermod.TransportInformation):
def __init__(self, stageIdentifier=None, ShipmentMethodOfPayment=None, RoutingSequenceCode=None, TransportMethodCode=None, PartnerInformation=None, Location=None, ShipmentTermsCode=None, Routing=None, ServiceLevelCode=None, HazardousMaterials=None, CarrierEquipmentCode=None, TransportName=None, TransportMethod=None, DocumentReference=None):
super(TransportInformationSub, self).__init__(stageIdentifier, ShipmentMethodOfPayment, RoutingSequenceCode, TransportMethodCode, PartnerInformation, Location, ShipmentTermsCode, Routing, ServiceLevelCode, HazardousMaterials, CarrierEquipmentCode, TransportName, TransportMethod, DocumentReference, )
supermod.TransportInformation.subclass = TransportInformationSub
# end class TransportInformationSub
class TransportMethodSub(supermod.TransportMethod):
def __init__(self, codeListName=None, valueOf_=None):
super(TransportMethodSub, self).__init__(codeListName, valueOf_, )
supermod.TransportMethod.subclass = TransportMethodSub
# end class TransportMethodSub
class UnitPriceSub(supermod.UnitPrice):
def __init__(self, MonetaryAmount=None, UnitOfMeasureCode=None, CurrencyCode=None):
super(UnitPriceSub, self).__init__(MonetaryAmount, UnitOfMeasureCode, CurrencyCode, )
supermod.UnitPrice.subclass = UnitPriceSub
# end class UnitPriceSub
class ValidityDatesSub(supermod.ValidityDates):
def __init__(self, FromDate=None, ToDate=None):
super(ValidityDatesSub, self).__init__(FromDate, ToDate, )
supermod.ValidityDates.subclass = ValidityDatesSub
# end class ValidityDatesSub
class VesselMeasuresSub(supermod.VesselMeasures):
def __init__(self, VesselType=None, VesselDescription=None, VesselPurged=None, VaporFactor=None, VesselCapacityQuantity=None, VolumeOutageQuantity=None, PurgedQuantity=None, PartnerDefinedMeasure=None):
super(VesselMeasuresSub, self).__init__(VesselType, VesselDescription, VesselPurged, VaporFactor, VesselCapacityQuantity, VolumeOutageQuantity, PurgedQuantity, PartnerDefinedMeasure, )
supermod.VesselMeasures.subclass = VesselMeasuresSub
# end class VesselMeasuresSub
class WellInformationSub(supermod.WellInformation):
def __init__(self, WellIdentifier=None, WellName=None, WellType=None, WellCategory=None):
super(WellInformationSub, self).__init__(WellIdentifier, WellName, WellType, WellCategory, )
supermod.WellInformation.subclass = WellInformationSub
# end class WellInformationSub
class AdvancedShipNoticeLineItemsTypeSub(supermod.AdvancedShipNoticeLineItemsType):
def __init__(self, LineItemNumber=None, EquipmentDetailsLineNumber=None, RevisionNumber=None, ProductInformation=None, ShippedQuantity=None, OrderQuantity=None, InvoiceQuantity=None, PackagingQuantity=None, CumulativeTotalQuantity=None, LineItemText=None, DocumentReference=None, ShipmentIndicator=None, PartnerInformation=None, DateTimeRange=None, ShipmentTerms=None, ScheduleDateTime=None, ScheduleDateTimeRange=None, SpecialInstructions=None, RequestedDocument=None, Routing=None, CustomerSpecificInformation=None, PercentActive=None, ProductSubLineItems=None, PackagingInformation=None, Comment=None):
super(AdvancedShipNoticeLineItemsTypeSub, self).__init__(LineItemNumber, EquipmentDetailsLineNumber, RevisionNumber, ProductInformation, ShippedQuantity, OrderQuantity, InvoiceQuantity, PackagingQuantity, CumulativeTotalQuantity, LineItemText, DocumentReference, ShipmentIndicator, PartnerInformation, DateTimeRange, ShipmentTerms, ScheduleDateTime, ScheduleDateTimeRange, SpecialInstructions, RequestedDocument, Routing, CustomerSpecificInformation, PercentActive, ProductSubLineItems, PackagingInformation, Comment, )
supermod.AdvancedShipNoticeLineItemsType.subclass = AdvancedShipNoticeLineItemsTypeSub
# end class AdvancedShipNoticeLineItemsTypeSub
class CompanyInformationTypeSub(supermod.CompanyInformationType):
def __init__(self, CompanyCode=None, CompanyFEIN=None, CompanyName=None):
super(CompanyInformationTypeSub, self).__init__(CompanyCode, CompanyFEIN, CompanyName, )
supermod.CompanyInformationType.subclass = CompanyInformationTypeSub
# end class CompanyInformationTypeSub
class CustodyPartnerTypeSub(supermod.CustodyPartnerType):
def __init__(self, PartnerInformation=None, PartnerEntityIdentifier=None, PartnerFacilityIdentifier=None):
super(CustodyPartnerTypeSub, self).__init__(PartnerInformation, PartnerEntityIdentifier, PartnerFacilityIdentifier, )
supermod.CustodyPartnerType.subclass = CustodyPartnerTypeSub
# end class CustodyPartnerTypeSub
class DateTimeRangeTypeSub(supermod.DateTimeRangeType):
def __init__(self, definitionOfOther=None, rangeType=None, FromDateTime=None, ToDateTime=None):
super(DateTimeRangeTypeSub, self).__init__(definitionOfOther, rangeType, FromDateTime, ToDateTime, )
supermod.DateTimeRangeType.subclass = DateTimeRangeTypeSub
# end class DateTimeRangeTypeSub
class DescriptionTypeSub(supermod.DescriptionType):
def __init__(self, Description=None, LanguageCode=None):
super(DescriptionTypeSub, self).__init__(Description, LanguageCode, )
supermod.DescriptionType.subclass = DescriptionTypeSub
# end class DescriptionTypeSub
class ErrorsTypeSub(supermod.ErrorsType):
def __init__(self, Error=None):
super(ErrorsTypeSub, self).__init__(Error, )
supermod.ErrorsType.subclass = ErrorsTypeSub
# end class ErrorsTypeSub
class FreeTextTypeSub(supermod.FreeTextType):
def __init__(self, Language=None, valueOf_=None, extensiontype_=None):
super(FreeTextTypeSub, self).__init__(Language, valueOf_, extensiontype_, )
supermod.FreeTextType.subclass = FreeTextTypeSub
# end class FreeTextTypeSub
class InvoiceResponseReasonTypeSub(supermod.InvoiceResponseReasonType):
def __init__(self, ResponseReasonCode=None, ResponseReasonCodeXPath=None, ResponseReasonComments=None):
super(InvoiceResponseReasonTypeSub, self).__init__(ResponseReasonCode, ResponseReasonCodeXPath, ResponseReasonComments, )
supermod.InvoiceResponseReasonType.subclass = InvoiceResponseReasonTypeSub
# end class InvoiceResponseReasonTypeSub
class LineItemsTypeSub(supermod.LineItemsType):
def __init__(self, LineItemNumber=None, DocumentReference=None, PartnerInformation=None, CustomerSpecificInformation=None, Comment=None, extensiontype_=None):
super(LineItemsTypeSub, self).__init__(LineItemNumber, DocumentReference, PartnerInformation, CustomerSpecificInformation, Comment, extensiontype_, )
supermod.LineItemsType.subclass = LineItemsTypeSub
# end class LineItemsTypeSub
class MeasurementTypeSub(supermod.MeasurementType):
def __init__(self, Measurement=None, UnitOfMeasureCode=None):
super(MeasurementTypeSub, self).__init__(Measurement, UnitOfMeasureCode, )
supermod.MeasurementType.subclass = MeasurementTypeSub
# end class MeasurementTypeSub
class MonetaryTypeSub(supermod.MonetaryType):
def __init__(self, MonetaryAmount=None, CurrencyCode=None, extensiontype_=None):
super(MonetaryTypeSub, self).__init__(MonetaryAmount, CurrencyCode, extensiontype_, )
supermod.MonetaryType.subclass = MonetaryTypeSub
# end class MonetaryTypeSub
class OrderChangeCancelLineItemsTypeSub(supermod.OrderChangeCancelLineItemsType):
def __init__(self, LineItemNumber=None, DocumentReference=None, PartnerInformation=None, CustomerSpecificInformation=None, Comment=None, PurchaseOrderReferenceInformation=None, LineItemRequestedAction=None, AccompanyingSampleCode=None):
super(OrderChangeCancelLineItemsTypeSub, self).__init__(LineItemNumber, DocumentReference, PartnerInformation, CustomerSpecificInformation, Comment, PurchaseOrderReferenceInformation, LineItemRequestedAction, AccompanyingSampleCode, )
supermod.OrderChangeCancelLineItemsType.subclass = OrderChangeCancelLineItemsTypeSub
# end class OrderChangeCancelLineItemsTypeSub
class OrderStatusLineItemsRequestTypeSub(supermod.OrderStatusLineItemsRequestType):
def __init__(self, LineItemNumber=None, DocumentReference=None, PartnerInformation=None, CustomerSpecificInformation=None, Comment=None):
super(OrderStatusLineItemsRequestTypeSub, self).__init__(LineItemNumber, DocumentReference, PartnerInformation, CustomerSpecificInformation, Comment, )
supermod.OrderStatusLineItemsRequestType.subclass = OrderStatusLineItemsRequestTypeSub
# end class OrderStatusLineItemsRequestTypeSub
class OrderLineItemsTypeSub(supermod.OrderLineItemsType):
def __init__(self, LineItemNumber=None, DocumentReference=None, PartnerInformation=None, CustomerSpecificInformation=None, Comment=None, RevisionNumber=None, ProductInformation=None, OrderQuantity=None, PackagingQuantity=None, LineItemPurposeCode=None, EngineeringChangeOrderIdentifier=None, BatchNumber=None, CountryOfOrigin=None, CountryOfFinalDestination=None, DeliveryTolerances=None, ShipmentTerms=None, ScheduleDateTime=None, ScheduleDateTimeRange=None, TransportInformation=None, RequestedDocument=None, Routing=None, SpecialServicesRequest=None, PackListRequirements=None, QuoteIdentifier=None, Label=None, ImportLicenseNeededFlag=None, ImportLicenseAvailableFlag=None, AccompanyingSampleCode=None, JobLocationClassCode=None, extensiontype_=None):
super(OrderLineItemsTypeSub, self).__init__(LineItemNumber, DocumentReference, PartnerInformation, CustomerSpecificInformation, Comment, RevisionNumber, ProductInformation, OrderQuantity, PackagingQuantity, LineItemPurposeCode, EngineeringChangeOrderIdentifier, BatchNumber, CountryOfOrigin, CountryOfFinalDestination, DeliveryTolerances, ShipmentTerms, ScheduleDateTime, ScheduleDateTimeRange, TransportInformation, RequestedDocument, Routing, SpecialServicesRequest, PackListRequirements, QuoteIdentifier, Label, ImportLicenseNeededFlag, ImportLicenseAvailableFlag, AccompanyingSampleCode, JobLocationClassCode, extensiontype_, )
supermod.OrderLineItemsType.subclass = OrderLineItemsTypeSub
# end class OrderLineItemsTypeSub
class PartnerConfirmationStatusTypeSub(supermod.PartnerConfirmationStatusType):
def __init__(self, PartnerInformation=None, ConfirmationDateTime=None, ConfirmationStatusCode=None):
super(PartnerConfirmationStatusTypeSub, self).__init__(PartnerInformation, ConfirmationDateTime, ConfirmationStatusCode, )
supermod.PartnerConfirmationStatusType.subclass = PartnerConfirmationStatusTypeSub
# end class PartnerConfirmationStatusTypeSub
class PartnerEventActionSub(supermod.PartnerEventAction):
def __init__(self, PartnerInformation=None, EventDateTime=None):
super(PartnerEventActionSub, self).__init__(PartnerInformation, EventDateTime, )
supermod.PartnerEventAction.subclass = PartnerEventActionSub
# end class PartnerEventActionSub
class PipelineEventInformationTypeSub(supermod.PipelineEventInformationType):
def __init__(self, PipelineEvent=None, PipelineEventVolumeAffect=None, PipelineEventStatus=None, PipelineEventDescription=None, PipelineCustodyEvent=None, AllowPartnerChange=None, AffectShipperInventory=None):
super(PipelineEventInformationTypeSub, self).__init__(PipelineEvent, PipelineEventVolumeAffect, PipelineEventStatus, PipelineEventDescription, PipelineCustodyEvent, AllowPartnerChange, AffectShipperInventory, )
supermod.PipelineEventInformationType.subclass = PipelineEventInformationTypeSub
# end class PipelineEventInformationTypeSub
class ProductCharacteristicsTypeSub(supermod.ProductCharacteristicsType):
def __init__(self, DyesFlag=None, OXY=None, CetaneOctane=None, FungibleSegregated=None, SulfurContent=None, RVP=None, Additized=None, OxygenatePercent=None, RFGFlag=None):
super(ProductCharacteristicsTypeSub, self).__init__(DyesFlag, OXY, CetaneOctane, FungibleSegregated, SulfurContent, RVP, Additized, OxygenatePercent, RFGFlag, )
supermod.ProductCharacteristicsType.subclass = ProductCharacteristicsTypeSub
# end class ProductCharacteristicsTypeSub
class ProductTypeSub(supermod.ProductType):
def __init__(self, ProductCode=None, ProductName=None, ProductDescription=None, GrossQuantity=None, Temperature=None, Gravity=None, NetQuantity=None, ProductCharacteristics=None):
super(ProductTypeSub, self).__init__(ProductCode, ProductName, ProductDescription, GrossQuantity, Temperature, Gravity, NetQuantity, ProductCharacteristics, )
supermod.ProductType.subclass = ProductTypeSub
# end class ProductTypeSub
class QuantityTypeSub(supermod.QuantityType):
def __init__(self, Quantity=None, UnitOfMeasureCode=None):
super(QuantityTypeSub, self).__init__(Quantity, UnitOfMeasureCode, )
supermod.QuantityType.subclass = QuantityTypeSub
# end class QuantityTypeSub
class ReceiptLineItemsTypeSub(supermod.ReceiptLineItemsType):
def __init__(self, LineItemNumber=None, DocumentReference=None, PartnerInformation=None, CustomerSpecificInformation=None, Comment=None, StorageTankIdentifier=None):
super(ReceiptLineItemsTypeSub, self).__init__(LineItemNumber, DocumentReference, PartnerInformation, CustomerSpecificInformation, Comment, StorageTankIdentifier, )
supermod.ReceiptLineItemsType.subclass = ReceiptLineItemsTypeSub
# end class ReceiptLineItemsTypeSub
class SuccessTypeSub(supermod.SuccessType):
def __init__(self):
super(SuccessTypeSub, self).__init__()
supermod.SuccessType.subclass = SuccessTypeSub
# end class SuccessTypeSub
class TaxTypeSub(supermod.TaxType):
def __init__(self, TaxTypeCode=None, MixedRateIndicator=None, TaxIdentifierNumber=None, TaxExemptCode=None, TaxLocation=None, TaxRate=None, TaxBasisAmount=None, TaxAmount=None, TaxReference=None, DeferredAmount=None, extensiontype_=None):
super(TaxTypeSub, self).__init__(TaxTypeCode, MixedRateIndicator, TaxIdentifierNumber, TaxExemptCode, TaxLocation, TaxRate, TaxBasisAmount, TaxAmount, TaxReference, DeferredAmount, extensiontype_, )
supermod.TaxType.subclass = TaxTypeSub
# end class TaxTypeSub
class TerminalInformationTypeSub(supermod.TerminalInformationType):
def __init__(self, TerminalSPLC=None, TerminalTimeZone=None, TerminalControlNumber=None, TerminalName=None, EPAFacilityID=None, EPAEntityID=None, AddressInformation=None):
super(TerminalInformationTypeSub, self).__init__(TerminalSPLC, TerminalTimeZone, TerminalControlNumber, TerminalName, EPAFacilityID, EPAEntityID, AddressInformation, )
supermod.TerminalInformationType.subclass = TerminalInformationTypeSub
# end class TerminalInformationTypeSub
class WarningsTypeSub(supermod.WarningsType):
def __init__(self, Warning=None):
super(WarningsTypeSub, self).__init__(Warning, )
supermod.WarningsType.subclass = WarningsTypeSub
# end class WarningsTypeSub
class WellIdentifierTypeSub(supermod.WellIdentifierType):
def __init__(self, wellIdentifierIndicator=None, valueOf_=None):
super(WellIdentifierTypeSub, self).__init__(wellIdentifierIndicator, valueOf_, )
supermod.WellIdentifierType.subclass = WellIdentifierTypeSub
# end class WellIdentifierTypeSub
class AdvisingBankTypeSub(supermod.AdvisingBankType):
def __init__(self, PartnerInformation=None):
super(AdvisingBankTypeSub, self).__init__(PartnerInformation, )
supermod.AdvisingBankType.subclass = AdvisingBankTypeSub
# end class AdvisingBankTypeSub
class LetterOfCreditQuantityTypeSub(supermod.LetterOfCreditQuantityType):
def __init__(self, MinimumQuantity=None, ActualQuantity=None, MaximumQuantity=None):
super(LetterOfCreditQuantityTypeSub, self).__init__(MinimumQuantity, ActualQuantity, MaximumQuantity, )
supermod.LetterOfCreditQuantityType.subclass = LetterOfCreditQuantityTypeSub
# end class LetterOfCreditQuantityTypeSub
class OrderResponseLineItemsTypeSub(supermod.OrderResponseLineItemsType):
def __init__(self, LineItemNumber=None, DocumentReference=None, PartnerInformation=None, CustomerSpecificInformation=None, Comment=None, RevisionNumber=None, ProductInformation=None, OrderQuantity=None, PackagingQuantity=None, LineItemPurposeCode=None, EngineeringChangeOrderIdentifier=None, BatchNumber=None, CountryOfOrigin=None, CountryOfFinalDestination=None, DeliveryTolerances=None, ShipmentTerms=None, ScheduleDateTime=None, ScheduleDateTimeRange=None, TransportInformation=None, RequestedDocument=None, Routing=None, SpecialServicesRequest=None, PackListRequirements=None, QuoteIdentifier=None, Label=None, ImportLicenseNeededFlag=None, ImportLicenseAvailableFlag=None, AccompanyingSampleCode=None, JobLocationClassCode=None, StatusCode=None, ResponseReason=None, extensiontype_=None):
super(OrderResponseLineItemsTypeSub, self).__init__(LineItemNumber, DocumentReference, PartnerInformation, CustomerSpecificInformation, Comment, RevisionNumber, ProductInformation, OrderQuantity, PackagingQuantity, LineItemPurposeCode, EngineeringChangeOrderIdentifier, BatchNumber, CountryOfOrigin, CountryOfFinalDestination, DeliveryTolerances, ShipmentTerms, ScheduleDateTime, ScheduleDateTimeRange, TransportInformation, RequestedDocument, Routing, SpecialServicesRequest, PackListRequirements, QuoteIdentifier, Label, ImportLicenseNeededFlag, ImportLicenseAvailableFlag, AccompanyingSampleCode, JobLocationClassCode, StatusCode, ResponseReason, extensiontype_, )
supermod.OrderResponseLineItemsType.subclass = OrderResponseLineItemsTypeSub
# end class OrderResponseLineItemsTypeSub
class ErrorWarningTypeSub(supermod.ErrorWarningType):
def __init__(self, Language=None, Status=None, Code=None, RecordID=None, ShortText=None, Tag=None, Type=None, DocURL=None, valueOf_=None):
super(ErrorWarningTypeSub, self).__init__(Language, Status, Code, RecordID, ShortText, Tag, Type, DocURL, valueOf_, )
supermod.ErrorWarningType.subclass = ErrorWarningTypeSub
# end class ErrorWarningTypeSub
class AdvancedShipmentNoticeLineItemsTypeSub(supermod.AdvancedShipmentNoticeLineItemsType):
def __init__(self, LineItemNumber=None, DocumentReference=None, PartnerInformation=None, CustomerSpecificInformation=None, Comment=None, StorageTankIdentifier=None):
super(AdvancedShipmentNoticeLineItemsTypeSub, self).__init__(LineItemNumber, DocumentReference, PartnerInformation, CustomerSpecificInformation, Comment, StorageTankIdentifier, )
supermod.AdvancedShipmentNoticeLineItemsType.subclass = AdvancedShipmentNoticeLineItemsTypeSub
# end class AdvancedShipmentNoticeLineItemsTypeSub
class TaxSummarySub(supermod.TaxSummary):
def __init__(self, TaxTypeCode=None, MixedRateIndicator=None, TaxIdentifierNumber=None, TaxExemptCode=None, TaxLocation=None, TaxRate=None, TaxBasisAmount=None, TaxAmount=None, TaxReference=None, DeferredAmount=None):
super(TaxSummarySub, self).__init__(TaxTypeCode, MixedRateIndicator, TaxIdentifierNumber, TaxExemptCode, TaxLocation, TaxRate, TaxBasisAmount, TaxAmount, TaxReference, DeferredAmount, )
supermod.TaxSummary.subclass = TaxSummarySub
# end class TaxSummarySub
class SubTotalAmountSub(supermod.SubTotalAmount):
def __init__(self, MonetaryAmount=None, CurrencyCode=None, subTotalIndicator=None):
super(SubTotalAmountSub, self).__init__(MonetaryAmount, CurrencyCode, subTotalIndicator, )
supermod.SubTotalAmount.subclass = SubTotalAmountSub
# end class SubTotalAmountSub
class PriceAmountSub(supermod.PriceAmount):
def __init__(self, MonetaryAmount=None, CurrencyCode=None, definitionOfOther=None, priceType=None):
super(PriceAmountSub, self).__init__(MonetaryAmount, CurrencyCode, definitionOfOther, priceType, )
supermod.PriceAmount.subclass = PriceAmountSub
# end class PriceAmountSub
class OrderStatusResponseLineItemsTypeSub(supermod.OrderStatusResponseLineItemsType):
def __init__(self, LineItemNumber=None, DocumentReference=None, PartnerInformation=None, CustomerSpecificInformation=None, Comment=None, RevisionNumber=None, ProductInformation=None, OrderQuantity=None, PackagingQuantity=None, LineItemPurposeCode=None, EngineeringChangeOrderIdentifier=None, BatchNumber=None, CountryOfOrigin=None, CountryOfFinalDestination=None, DeliveryTolerances=None, ShipmentTerms=None, ScheduleDateTime=None, ScheduleDateTimeRange=None, TransportInformation=None, RequestedDocument=None, Routing=None, SpecialServicesRequest=None, PackListRequirements=None, QuoteIdentifier=None, Label=None, ImportLicenseNeededFlag=None, ImportLicenseAvailableFlag=None, AccompanyingSampleCode=None, JobLocationClassCode=None, StatusCode=None, ResponseReason=None, ProprietaryShipmentTrackingIdentifier=None):
super(OrderStatusResponseLineItemsTypeSub, self).__init__(LineItemNumber, DocumentReference, PartnerInformation, CustomerSpecificInformation, Comment, RevisionNumber, ProductInformation, OrderQuantity, PackagingQuantity, LineItemPurposeCode, EngineeringChangeOrderIdentifier, BatchNumber, CountryOfOrigin, CountryOfFinalDestination, DeliveryTolerances, ShipmentTerms, ScheduleDateTime, ScheduleDateTimeRange, TransportInformation, RequestedDocument, Routing, SpecialServicesRequest, PackListRequirements, QuoteIdentifier, Label, ImportLicenseNeededFlag, ImportLicenseAvailableFlag, AccompanyingSampleCode, JobLocationClassCode, StatusCode, ResponseReason, ProprietaryShipmentTrackingIdentifier, )
supermod.OrderStatusResponseLineItemsType.subclass = OrderStatusResponseLineItemsTypeSub
# end class OrderStatusResponseLineItemsTypeSub
def get_root_tag(node):
tag = supermod.Tag_pattern_.match(node.tag).groups()[-1]
rootClass = None
rootClass = supermod.GDSClassesMapping.get(tag)
if rootClass is None and hasattr(supermod, tag):
rootClass = getattr(supermod, tag)
return tag, rootClass
def parse(inFilename):
doc = parsexml_(inFilename)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Receipt'
rootClass = supermod.Receipt
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='',
pretty_print=True)
return rootObj
def parseEtree(inFilename):
doc = parsexml_(inFilename)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Receipt'
rootClass = supermod.Receipt
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
rootElement = rootObj.to_etree(None, name_=rootTag)
content = etree_.tostring(rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Receipt'
rootClass = supermod.Receipt
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseLiteral(inFilename):
doc = parsexml_(inFilename)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Receipt'
rootClass = supermod.Receipt
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('#from Receipt import *\n\n')
sys.stdout.write('import Receipt as model_\n\n')
sys.stdout.write('rootObj = model_.Receipt(\n')
rootObj.exportLiteral(sys.stdout, 0, name_="Receipt")
sys.stdout.write(')\n')
return rootObj
USAGE_TEXT = """
Usage: python ???.py <infilename>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def main():
args = sys.argv[1:]
if len(args) != 1:
usage()
infilename = args[0]
root = parse(infilename)
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
| [
"tosako@wgen.net"
] | tosako@wgen.net |
2eba86a026b792054824c56dfc44b4c5a307bc4b | 5f15882db4d0c7d4cd241c4890bf540af5ca2bab | /page/urls.py | d19ced9693f241b9dd767ec1905d6f0a74068af8 | [] | no_license | alexaklex/nviaDjan | b4d7439a1e2fdf88e2a340965cac3da4fe81e675 | caae034947977e08914c8790067886cc3e6156d1 | refs/heads/master | 2020-03-22T21:02:17.307744 | 2018-07-17T08:36:11 | 2018-07-17T08:36:11 | 140,651,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | # -*- coding: utf-8 -*-
from django.shortcuts import render,redirect
from .models import Page
from django.http import HttpResponse
from django.conf.urls import url
from .import views
from articls import views as views_art_fr_detail
# пока я не понял что это за переменная,
# но она позволяет добавлять слово articles к name и все магическим образом работает
app_name = 'pages'
urlpatterns = [
# url(r'^$', views.articl_list, name='list'),
url(r'^about/$', views.page_about, name='detail_about'),
url(r'^contact/$', views.page_contact, name='detail_contact'),
url(r'^contact/$', views.page_contact, name='detail_contact'),
url(r'^(?P<slug>[\w-]+)/$', views_art_fr_detail.sidebar_detail, name='sidebar_detail'),
]
| [
"neon03-uu@yandex.ru"
] | neon03-uu@yandex.ru |
72b08ba56a9dc15abc1781680255be6764db0a93 | 4a0c596fbe076c6cb15a2f040780b5ef8facb20e | /Tuenti2020/3/program.py | e0ee7c9e4f2351b6b507e61bc65776e5c056ab5a | [] | no_license | Ludvins/Programming-Contests | 27f68e1a41601c6d50d6465af2176862980a1974 | ac3686d3a9f89f782bcb42a07deabc089327fc6c | refs/heads/master | 2022-07-06T01:06:44.129616 | 2020-05-19T07:10:41 | 2020-05-19T07:10:41 | 265,170,793 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | import re
file = open('tratado', 'w')
with open('pg17013.txt', 'r') as fileinput:
for line in fileinput:
line = line.lower()
line = re.sub(r"[^a-zñáéíóúü]",' ', line)
line = re.sub(r"\b[a-zñáéíóúü]{1}\b",'', line)
line = re.sub(r"\b[a-zñáéíóúü]{2}\b",'', line)
file.write(line)
file.close()
| [
"theludvins@gmail.com"
] | theludvins@gmail.com |
3b72b403646bf3f853538b8554e1a1f6c5d556c5 | 6135c1840a84780f4ad6137ae808ac29b228d49e | /ClientStorage.py | a5cd5f85fcb5d9c5b6650b2447d52d22110fb32a | [
"MIT"
] | permissive | jonasswa/Guess-My-Search | 609d15b9c7524c3abbed9cfe01c94fdc21d28f3c | 2b36183a66bf55b163fc647117ee1b9c8f9cd543 | refs/heads/master | 2021-10-26T11:44:12.401778 | 2019-04-12T09:25:02 | 2019-04-12T09:25:02 | 113,985,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,742 | py | import uuid
from threading import RLock
class User:
def __init__(self, sid, name = ''):
'''
Each user is allowed to spawn nrOfThreadsAllowed threads.
'''
self.sid = sid
self.name = name
self.nrOfSidUpdates = 0
self.uniqueID = uuid.uuid4().hex
self.lock = RLock()
self.nrOfThreadsSpawned = 0
self.nrOfThreadsAllowed = 1
self.gameObject = None
self.playerObject = None
def add_Name(self, name):
self.name = name
def update_Sid(self, sid):
self.sid = sid
self.nrOfSidUpdates
def update_Thread_Number(self, increase, verbose = False):
with self.lock:
if self.nrOfThreadsSpawned >= self.nrOfThreadsAllowed and increase:
if verbose: print('User {} is not allowed to spawn another thread'.format(self.name))
return False
elif increase:
if verbose: print('User {} just spawned a thread'.format(self.name))
self.nrOfThreadsSpawned += 1
return True
else:
if verbose: print('User {} just removed a thread'.format(self.name))
self.nrOfThreadsSpawned -= 1
return True
def resetUser(self):
self.gameObject=None
self.playerObject=None
self.name=''
class Clients:
def __init__(self):
self.users = []
self.nrOfClients = 0
def add_User(self, sid, name = ''):
user = User(sid,name)
self.users.append(user)
self.nrOfClients += 1
return user
def removeUser(self, name):
for i in range(self.users):
if self.users[i].name == name:
del self.users[i]
nrOfClients -= 1
def find_User_By_uniqueID(self, uniqueID):
for u in self.users:
if u.uniqueID == uniqueID:
return u
return None
def find_User_By_Name(self, name):
for u in self.users:
if u.name == name:
return u
return None
def find_User_By_Sid(self, sid):
for u in self.users:
if u.sid == sid:
return u
return None
def __str__(self):
ret = ''
ret+=('_________________CLIENTS_____________\n')
for u in self.users:
ret+=('-------------------------------------\n')
ret+=('User name: {}\n'.format(u.name))
ret+=('SID: {}\n'.format(u.sid))
ret+=('UniqueID: {}\n'.format(u.uniqueID))
ret+=('Nr. threads: {}\n'.format(u.nrOfThreadsSpawned))
ret+=('_____________________________________')
return ret
| [
"jonaswaaler@gmail.com"
] | jonaswaaler@gmail.com |
94912c9ed339cdf676610f0ca9397675dcf1e0ec | f9a8ee37334771f37edda863db08a7dcccc9522f | /AtCoder/Contest/ABC/ZONeエナジー プログラミングコンテスト/abc200E.py | dc144abd33d75de438010a0aa9fffe4eff052492 | [] | no_license | shimmee/competitive-programming | 25b008ee225858b7b208c3f3ca7681e33f6c0190 | 894f0b7d557d6997789af3fcf91fe65a33619080 | refs/heads/master | 2023-06-07T13:07:17.850769 | 2021-07-05T17:20:47 | 2021-07-05T17:20:47 | 331,076,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | # ZONeエナジー プログラミングコンテスト E
# URL:
# Date:
# ---------- Ideas ----------
#
# ------------------- Solution --------------------
#
# ------------------- Answer --------------------
#code:python
# ------------------ Sample Input -------------------
# ----------------- Length of time ------------------
#
# -------------- Editorial / my impression -------------
#
# ----------------- Category ------------------
#AtCoder
#AC_with_editorial #解説AC
#wanna_review #hard復習 #復習したい
| [
"shinmeikeita@gmail.com"
] | shinmeikeita@gmail.com |
95437a2cd2491ee6d9646bbe37d75d3aa1ea0f5c | 68dbc76a69e1994c38013a637f96ba8df448c255 | /practice/medium/2018/AbsolutePermutation.py | 11e946d28135e2576bbd7da71266dbc3443f39af | [] | no_license | mgs95/hackerrank | 61da322a93fa67557d9a2e03fab8d7c26a6fcb63 | 1a22365e21f746e2e5894728d21646edc4ada573 | refs/heads/master | 2020-05-15T04:55:32.132082 | 2019-04-21T14:04:00 | 2019-04-21T14:04:00 | 182,096,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | """
https://www.hackerrank.com/challenges/absolute-permutation/problem
Score: 40/40
Submitted: 2018
"""
for _ in range(int(input())):
n, k = map(int, input().split())
numbers = set(range(1, n+1))
r = []
end = False
for i in range(n, 0, -1):
a, b = i + k, i - k
if a in numbers:
r.append(a)
numbers.remove(a)
elif b in numbers:
r.append(b)
numbers.remove(b)
else:
print(-1)
break
else:
print(' '.join(list(map(str, r[::-1]))))
| [
"mariano.gonzalezsalazar@telefonica.com"
] | mariano.gonzalezsalazar@telefonica.com |
8843420f8af40d4f0e03c30593aa1bd0368cafd9 | 530aafd0285ba149c144720d4c036d4a145f2f3e | /Code/Python Code/FactoryMethod Pattern/generalExample.py | 5c9bf5e7ae00512b7e8e2508a80897ce66fedc18 | [] | no_license | HowDoIGitHelp/CMSC23MDNotes | 2d57f74756aeb39638e753f1df1e7a00ace5430d | c31eda34a2682d615de87ec68f8d611cb22cc980 | refs/heads/master | 2023-07-19T21:20:29.489797 | 2021-09-02T15:38:37 | 2021-09-02T15:38:37 | 294,376,674 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | from abc import ABC, abstractmethod
class Product(ABC):
@abstractmethod
def show(self) -> str:
pass
class DefaultProduct(Product):
def show(self) -> str:
print("I'm an instance of default product")
class SpecialProductA(Product):
def show(self) -> str:
print("I'm an instance of special product a")
class SpecialProductB(Product):
def show(self) -> str:
print("I'm an instance of special product b")
class Factory:
def printANewProduct(self):
p = self.newProduct()
p.show()
def newProduct(self) -> Product:
return DefaultProduct()
class SpecialFactoryA(Factory):
def newProduct(self) -> Product:
return SpecialProductA()
class SpecialFactoryB(Factory):
def newProduct(self) -> Product:
return SpecialProductB()
f:Factory = Factory()
f.printANewProduct()
g:Factory = SpecialFactoryA()
g.printANewProduct()
h:Factory = SpecialFactoryB()
h.printANewProduct()
| [
"rrabella@up.edu.ph"
] | rrabella@up.edu.ph |
c723f5fdff701d3e8e5da3916b313407906b7a1e | 377e3a552fb807febc18ce036af77edbce93ca19 | /searching algo/exponential_search.py | 776eff5ca5b6589ddfd1b7d6f43e0a9e8c67c45e | [] | no_license | souravs17031999/100dayscodingchallenge | 940eb9b6d6037be4fc0dd5605f9f808614085bd9 | d05966f3e6875a5ec5a8870b9d2627be570d18d9 | refs/heads/master | 2022-10-29T11:05:46.762554 | 2022-09-28T13:04:32 | 2022-09-28T13:04:32 | 215,993,823 | 44 | 12 | null | 2022-08-18T14:58:50 | 2019-10-18T09:55:03 | Python | UTF-8 | Python | false | false | 1,882 | py | # Program to search the element using exponential search algorithm
# IDEA: logic of the algorithm is to use the fact that if we are able to find the bounds of the answer where it may lie
# and then using binary search algorithm because that range is already ordered, and we just need to check our answer (if it actually exists)
# TIME : 0(lg(i)) where i is the index of the element to be existence (if it is in the list), assuming the list is already sorted (in comparison to binary
# search , it is much faster especially in case if the key is near to the first element)
def binary_search(l, start, end, key):
while(start <= end):
middle = (start + end) // 2
if key < l[middle]:
end = middle - 1
elif key > l[middle]:
start = middle + 1
else:
return middle
return -1
# function to implement exponential search
def exponential_search(arr, key):
# base case
if arr[0] == key:
return 0
# starting with 1th index
i = 1
n = len(arr)
# trying to search for first exponent j, for which 2^j is greater than key element
# that is to find if the current element is smaller than key, and since it is sorted, then we need to increase the range by doubling it
# also to avoid going out of bounds, we should ensure the invariant : i < n - 1
while i < n - 1 and arr[i] <= key:
i *= 2
print(i)
# lower bound will be i/2 , since we already have doubled then we have found greater number,
# and higher bound will be whatever last greater number index we have found
return binary_search(arr, i//2, i, key)
# driver function
if __name__ == '__main__':
arr = [2, 3, 4, 10, 20]
key = 10
index = exponential_search(arr, key)
if index == -1:
print("element not found !")
else:
print(f"element found at : {index}")
| [
"souravs_1999@rediffmail.com"
] | souravs_1999@rediffmail.com |
1345cea1e9e2400bea7ae39e7f02944674c890fd | 10f5de545bb35c153cc8c783049cd09a83c0d7e4 | /persistentdict/const.py | e46cb148ba300431e2275510586a365e425367c6 | [
"MIT"
] | permissive | silkyanteater/persistentdict | a6fbd0e2c373c96267240ab10e3cc7d6c46666de | a56a399ebfa76a7111de475123f773ceb151a4e4 | refs/heads/master | 2022-12-31T13:35:11.537585 | 2020-10-04T10:34:09 | 2020-10-04T10:34:09 | 262,553,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py |
SQLITE_SOURCE_FILE = 'sqlite_source.yaml'
| [
"cyclopesrufus@gmail.com"
] | cyclopesrufus@gmail.com |
ff4e0d6e7c0b10941ced2e6a74ccfc027bb1206b | b50f8de2f35858f866b8f7d54da2994e5b59a391 | /src/dataload/sources/entrez/entrez_genomic_pos.py | 839f8410cfc258609ad333cca65c85f34f67dca0 | [
"Apache-2.0"
] | permissive | SuLab/mygene.info | 455127c4e0bcae61eb36d76496dfc4139be0f584 | 506d7b1d2a7e4de55bdebba8671dc8a09fc303b2 | refs/heads/master | 2020-06-03T11:27:34.021692 | 2017-06-12T20:58:45 | 2017-06-12T20:58:45 | 54,933,630 | 20 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,148 | py | '''
Populates MICROBE gene entries with genomic position data
Currently updates the 120 microbial taxids that are NCBI Reference Sequences
run get_ref_microbe_taxids function to get an updated file for TAXIDS_FILE
when it's necessary.
'''
import os.path
from biothings.utils.common import (dump, loadobj, get_timestamp)
from utils.dataload import (tab2list, load_start, load_done)
from dataload import get_data_folder
DATA_FOLDER = get_data_folder('entrez')
print('DATA_FOLDER: ' + DATA_FOLDER)
__metadata__ = {
'__collection__': 'entrez_genomic_pos',
}
TAXIDS_FILE = os.path.join(DATA_FOLDER, "../ref_microbe_taxids.pyobj")
DATAFILE = os.path.join(DATA_FOLDER, 'gene/gene2refseq.gz')
def load_genedoc(self):
"""
Loads gene data from NCBI's refseq2gene.gz file.
Parses it based on genomic position data and refseq status provided by the
list of taxids from get_ref_microbe_taxids() as lookup table
:return:
"""
taxids = loadobj(TAXIDS_FILE)
taxid_set = set(taxids)
load_start(DATAFILE)
def _includefn(ld):
return ld[0] in taxid_set # match taxid from taxid_set
cols_included = [0, 1, 7, 9, 10, 11] # 0-based col idx
gene2genomic_pos_li = tab2list(DATAFILE, cols_included, header=1,
includefn=_includefn)
count = 0
last_id = None
for gene in gene2genomic_pos_li:
count += 1
strand = 1 if gene[5] == '+' else -1
_id = gene[1]
mgi_dict = {
'_id': _id,
'genomic_pos': {
'start': int(gene[3]),
'end': int(gene[4]),
'chr': gene[2],
'strand': strand
}
}
if _id != last_id:
# rows with dup _id will be skipped
yield mgi_dict
last_id = _id
load_done('[%d]' % count)
def get_mapping(self):
mapping = {
"genomic_pos": {
"dynamic": False,
"type": "nested",
"properties": {
"chr": {"type": "string"},
"start": {"type": "long"},
"end": {"type": "long"},
"strand": {
"type": "byte",
"index": "no"
},
},
},
}
return mapping
def get_ref_microbe_taxids():
"""
Downloads the latest bacterial genome assembly summary from the NCBI genome
ftp site and generate a list of taxids of the bacterial reference genomes.
:return:
"""
import urllib.request
import csv
urlbase = 'ftp://ftp.ncbi.nlm.nih.gov'
urlextension = '/genomes/refseq/bacteria/assembly_summary.txt'
assembly = urllib.request.urlopen(urlbase + urlextension)
datareader = csv.reader(assembly.read().decode().splitlines(), delimiter="\t")
taxid = []
for row in datareader:
if len(row) == 1 and row[0].startswith("#"):
continue
if row[4] in ['reference genome','representative genome']:
taxid.append(row[5])
ts = get_timestamp()
dump(taxid, "ref_microbe_taxids_{}.pyobj".format(ts))
return taxid
| [
"slelong@scripps.edu"
] | slelong@scripps.edu |
afda7ea8d6078a83cf8e5fa83040bf0667fd332b | 42047ac9266bb2a9912d1fb74d7efc134d70c464 | /manage.py | d74205e87a69ae5293e887c615ec7d2f2035cfec | [] | no_license | Ismailtlem/django-cms-3 | 1139bb39e052736ebef996dc4681f06dcd3e5af6 | cf12680fcfc7e05d607600f8632c43b487322e6a | refs/heads/main | 2023-05-30T23:21:10.958436 | 2021-07-09T22:27:32 | 2021-07-09T22:27:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv(filename='.environment'))
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
if os.environ.get('CLEAN_PYC') == 'yes' and 'clean_pyc' not in sys.argv:
# sys.stdout.write('\nCleaning .pyc files...')
proj, _ = os.path.split(__file__)
cmd = "find '{d}' -name '*.pyc' -delete".format(d=proj or '.')
os.system(cmd)
# sys.stdout.write('done\n\n')
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"mark.walker@realbuzz.com"
] | mark.walker@realbuzz.com |
0061991773a0ac089e54b264741a33de2c891e11 | ed316f531f3dcbbf1f393b33e5c8087cc65f08c5 | /games/claw/game.py | ac31809fb845e9ce83dd1ac86dfa1d7b6fe046de | [
"MIT"
] | permissive | IvarK/surrortg-sdk | 5aa094dfa56237eed910f3c6acbda50d2aa57567 | c68940357c330c2ef8a8f92265f776b038d227a3 | refs/heads/main | 2023-04-11T09:35:25.298239 | 2021-04-20T13:56:54 | 2021-04-20T13:56:54 | 313,255,818 | 0 | 2 | MIT | 2021-04-20T13:56:55 | 2020-11-16T09:48:30 | Python | UTF-8 | Python | false | false | 5,484 | py | import logging
import asyncio
import pigpio
from surrortg import Game
from surrortg.inputs import Directions
from games.claw.claw_joystick import ClawJoystick
from games.claw.claw_button import ClawButton
from games.claw.claw_toy_sensor import ClawToySensor
from games.claw.claw_internal_toy_sensor import ClawInternalToySensor
from games.claw.config import (
ABSOLUTE_GAME_MAX_TIME,
TOY_WAIT_TIME,
USE_INTERNAL_IR_SENSOR,
JOYSTICK_DISABLE_PIN,
STOP_TIME_BEFORE_BTN_PRESS,
AUTOMATIC_MOVE_TIME,
WAIT_TIME_AFTER_SENSOR_BLOCKED,
BLOCKED_SENSOR_PING_TIME,
)
class ClawGame(Game):
async def on_init(self):
# connect to pigpio daemon
self.pi = pigpio.pi()
if not self.pi.connected:
raise RuntimeError("Could not connect to pigpio")
# init joystick splitter, enable physical joystick by default
self.pi.set_mode(JOYSTICK_DISABLE_PIN, pigpio.OUTPUT)
self.pi.write(JOYSTICK_DISABLE_PIN, 0)
# init claw machine parts
self.joystick = ClawJoystick(self.pi)
self.button = ClawButton(
pi=self.pi,
pre_press_action=self.pre_button_press,
post_press_action=self.post_button_press,
)
self.toy_sensor = (
ClawInternalToySensor(io=self.io, pi=self.pi)
if USE_INTERNAL_IR_SENSOR
else ClawToySensor(self.io)
)
# init claw machine state variables
self.ready_for_next_game = False
self.button_pressed = False
self.io.register_inputs(
{"joystick_main": self.joystick, "button_main": self.button,}
)
async def on_prepare(self):
await self.joystick.reset()
# make sure the prize sensor is not blocked
if self.toy_sensor.get_sensor_state():
logging.warning(
"TOY SENSOR BLOCKED, PLEASE REMOVE BLOCKING OBJECTS"
)
# only continue game after the blocking objects have been removed
while True:
await asyncio.sleep(BLOCKED_SENSOR_PING_TIME)
if not self.toy_sensor.get_sensor_state():
logging.info(
f"Toy sensor not stuck anymore, will continue "
f"game in {WAIT_TIME_AFTER_SENSOR_BLOCKED} seconds"
)
await asyncio.sleep(WAIT_TIME_AFTER_SENSOR_BLOCKED)
break
# make sure the state is correct before approving game start
if not self.ready_for_next_game:
logging.info("Forcing the ClawMachine ready state, please wait...")
await self.enable_button()
await self.button.on()
await asyncio.sleep(TOY_WAIT_TIME)
self.ready_for_next_game = True
logging.info("...ClawMachine ready")
async def on_pre_game(self):
# disable the physical joystick
self.pi.write(JOYSTICK_DISABLE_PIN, 1)
await self.enable_button()
self.io.send_pre_game_ready()
async def on_start(self):
await self.joystick.reset()
logging.info("Playing started")
# set a flag for checking that the game has been finished
# will be set back to True only if finish_game gets to the end
self.ready_for_next_game = False
# this flag makes sure that the button will always be pressed
# by the player or the GE
self.button_pressed = False
# play game until player pushes button or time is up and GE moves to
# on finish. This game section should never finish by itself
try:
await asyncio.sleep(ABSOLUTE_GAME_MAX_TIME)
logging.warning(
"ABSOLUTE_GAME_MAX_TIME passed, this should never happen"
)
self.io.disable_inputs()
await self.joystick.reset()
self.io.send_playing_ended()
except asyncio.CancelledError:
logging.info("GE ended playing")
async def on_finish(self):
await self.joystick.reset()
# push the button if not done by the user
if not self.button_pressed:
await self.button.on()
# wait for toy and send result
await self.toy_sensor.wait_for_toy(TOY_WAIT_TIME)
# enable physical joystick
self.pi.write(JOYSTICK_DISABLE_PIN, 0)
# set flag that game was played until the end so time consuming
# preparations are not needed in prepare_game
self.ready_for_next_game = True
async def enable_button(self):
# move and stop to start game in the machine timer, because the
# drop claw button can't be used before moving.
# 'ur' + 'dl' forces the claw to move regardless of the current
# position
for direction in [
Directions.TOP_RIGHT,
Directions.MIDDLE,
Directions.BOTTOM_LEFT,
Directions.MIDDLE,
]:
self.joystick.move(direction)
await asyncio.sleep(AUTOMATIC_MOVE_TIME)
async def pre_button_press(self):
self.io.disable_inputs()
await self.joystick.reset()
await asyncio.sleep(STOP_TIME_BEFORE_BTN_PRESS)
async def post_button_press(self):
self.button_pressed = True
logging.info("sending playingEnded")
self.io.send_playing_ended()
async def on_exit(self, reason, exception):
self.pi.stop()
ClawGame().run()
| [
"severi@surrogate.tv"
] | severi@surrogate.tv |
7b0f49dc9cb49a930e7f9c4a423880ca89dddec8 | b276b6fb3b0fd07360370ef66208900ad3f78e83 | /home/migrations/0008_league.py | f84686fdd4e68df71650c1305983ce748d2e9a03 | [] | no_license | willshiel/animated-guac | 8f2f9041f7b8efefff9ee2eb1a12a203436c2e58 | 0231c6bfb6bbd65034a7429a0c04c9451451d0aa | refs/heads/master | 2021-01-02T22:33:31.203599 | 2017-10-15T20:55:04 | 2017-10-15T20:55:04 | 99,336,919 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 587 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-07-09 19:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('home', '0007_delete_team'),
]
operations = [
migrations.CreateModel(
name='League',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
]
| [
"ws248531@muhlenberg.edu"
] | ws248531@muhlenberg.edu |
83a85e711944fda90a525aa8dab51c901f7e2932 | 0662250ca269e8f8f768e31aa3ea43679c74cc10 | /Demo/cPickleOperation.py | e9e4c427fa2b575c920893c89cf1eb238c0ca421 | [] | no_license | HolyQuar/MyRepository | 1b3860ef9af6b985ac3028c325fef7d0e974011e | c6d26a049013a8e8f08e3e69ac61488926463744 | refs/heads/master | 2022-11-12T17:02:33.336145 | 2020-07-04T14:25:46 | 2020-07-04T14:25:46 | 277,091,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | """
--序列化对象使用pickle包
--wb-二进制写出
import cPickle as pickle
d = {url='index.html',title='主题',content='主页'}
pickle.dumps(d)
"""
#encoding=utf-8
import pickle
# 使用pickle写入
d = dict(url='index.html',title='主题',content='主页')
#使用dump方法--将序列化的对象直接写入文件
f = open('D:/Pycharm/Workspace/123/test1.txt','wb')
pickle.dump(d,f)
f.close()
import struct
# 读取二进制写入的数据--读取之后需要将二进制进行解码
with open('D:/Pycharm/Workspace/123/test1.txt', 'rb') as f:
for line in f.readlines():
data = line.strip()
print(data.decode('utf8','ignore'))
| [
"2365856524@qq.com"
] | 2365856524@qq.com |
234f352f49e84f52200b7e6ecd0fc1de6e0360d7 | 52728b8b20e62081891735056070f749ea379fb2 | /db_repository/versions/005_migration.py | c35d5fa664af63ee1eac2b5859ef6cb31231e487 | [] | no_license | lazarusvc/aulavara | 399fbf65da7a4a41d6e7391077d04734b8f74ca4 | c62ba0832b5d7ce398f257556f37bc5453a30153 | refs/heads/master | 2023-04-28T13:23:48.835482 | 2015-06-18T23:30:57 | 2015-06-18T23:30:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,190 | py | from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
images = Table('images', pre_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('file', String),
)
projects = Table('projects', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('title', String(length=20)),
Column('description', String(length=1000)),
Column('button_list_title', String(length=30)),
Column('button_list_url', String(length=30)),
Column('tags', String(length=20)),
Column('file', Unicode(length=1000)),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['images'].drop()
post_meta.tables['projects'].columns['file'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['images'].create()
post_meta.tables['projects'].columns['file'].drop()
| [
"austin.lazarus@gmail.com"
] | austin.lazarus@gmail.com |
00659b690810bf55cfdcc41b775bfe8aa70039fb | 20a6e0291ed4d235879eaafc3daa6b109dc867e0 | /313A_Ilya_And_Bank_Account.py | 0d770b2237f7454fea230e93658affcd608f4ef2 | [] | no_license | Nisarg6892/CodeForces-Codes | fab41a9e548e9531e2b275117189ec4e5b8db837 | a08efc8532575508f626359a0a60d3168f4b088e | refs/heads/master | 2021-01-10T15:06:41.501564 | 2016-01-07T07:51:49 | 2016-01-07T07:51:49 | 49,189,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | n = raw_input()
if int(n) >= 0 :
print n
else :
minimum = min(int(n[-1]), int(n[-2]))
finalString = n[:len(n)-2]+str(minimum)
if finalString == '-0' :
finalString = '0'
print finalString | [
"shah.nisarg6892@gmail.com"
] | shah.nisarg6892@gmail.com |
013c601190b3f5d244d6e79b1b907c16f3341d3c | 939acfc68eaee0346f5b969fcc9c8e3aa6bbf9a8 | /mmdet/ops/nms/setup.py | a2fb3b56522f701c7d910e853e0d82c85742bbf8 | [] | no_license | ch-ho00/FCOS_obb | 5a4cc873b77aec8a8035d4c6b4191cbc03e75033 | 8655dddebe86c9132b4bfee734e74d76a27bf914 | refs/heads/master | 2022-10-18T05:01:17.130046 | 2020-06-10T07:26:04 | 2020-06-10T07:26:04 | 270,529,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,679 | py | import os.path as osp
from setuptools import setup, Extension
import numpy as np
from Cython.Build import cythonize
from Cython.Distutils import build_ext
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
ext_args = dict(
include_dirs=[np.get_include()],
language='c++',
extra_compile_args={
'cc': ['-Wno-unused-function', '-Wno-write-strings'],
'nvcc': ['-c', '--compiler-options', '-fPIC'],
},
)
extensions = [
Extension('soft_nms_cpu', ['src/soft_nms_cpu.pyx'], **ext_args),
]
def customize_compiler_for_nvcc(self):
"""inject deep into distutils to customize how the dispatch
to cc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on."""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if osp.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', 'nvcc')
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['cc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
setup(
name='soft_nms',
cmdclass={'build_ext': custom_build_ext},
ext_modules=cythonize(extensions),
)
setup(
name='nms_cuda',
ext_modules=[
CUDAExtension('nms_cuda', [
'src/nms_cuda.cpp',
'src/nms_kernel.cu',
]),
CUDAExtension('nms_cpu', [
'src/nms_cpu.cpp',
]),
],
cmdclass={'build_ext': BuildExtension})
| [
"psbhchan@gmail.com"
] | psbhchan@gmail.com |
0ae246e21eb23160ee3be8dc5060109d11903209 | 26f862c5f17fd97beb38be35b4b5937673929c9b | /swagger_client/models/system_object.py | f2f7c5ffd1642cfd9026a3adcb69acada10676a8 | [] | no_license | m-wendt/swagger-client | bf146841fa4e7eb6add01c09822eb01d89defa5e | 2db96983a900dbb1f5d32c5e66d190e5c0d9b3dc | refs/heads/master | 2020-11-25T22:06:23.487954 | 2019-12-18T15:56:21 | 2019-12-18T15:56:21 | 228,865,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,441 | py | # coding: utf-8
"""
Save.TV API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SystemObject(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""SystemObject - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SystemObject, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SystemObject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"martin_wendt@internet-mail.org"
] | martin_wendt@internet-mail.org |
a845b6d5d7e9d14b9513c2835dd08585f69dfc8b | a4d2ae314c00e37d89dbad5a8d69077163fa0eb7 | /implementation/detection/technique_T1156_bashrc.py | 8c3c2c486be0f88b0ad90517badb8cbc79ac1913 | [] | no_license | ccaballe/cti | 8e99c355115ba7026147611ce3f5849356fe5465 | 93c45f1561ae3d3e3d75271e8391d9ff74d87493 | refs/heads/master | 2020-03-23T00:33:59.844234 | 2018-09-01T22:09:35 | 2018-09-01T22:09:35 | 140,870,643 | 0 | 0 | null | 2018-07-13T16:38:22 | 2018-07-13T16:38:22 | null | UTF-8 | Python | false | false | 1,140 | py | # Python 2
from implementation.data_sources.FileMonitoring import FileMonitoring
from implementation.data_sources.ProcessCommandLineParameters import ProcessCommandLineParameters
from implementation.data_sources.ProcessMonitoring import ProcessMonitoring
from implementation.data_sources.ProcessUseOfNetwork import ProcessUseOfNetwork
from implementation.detection.detection import Detection
class T1156_bashrc(Detection):
def analysis(self):
tech = self.get_technique_by_name(".bash_profile and .bashrc")
print(tech[0].x_tfm_commands_allowed)
# TODO correlate with other data sources
def detect(self):
# .bashrc monitoring
bashrc_monitoring = self.get_data_sources_info()[0]
if bashrc_monitoring["data"] != "":
return True
return False
def get_data_sources(self):
return [
FileMonitoring(self.input_config, self.tech_config["bashrcfile"]),
ProcessMonitoring(self.input_config, process="bash"),
ProcessCommandLineParameters(self.input_config),
ProcessUseOfNetwork(self.input_config, "bash")
]
| [
"ccaballero@stratio.com"
] | ccaballero@stratio.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.