blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ac8c87e05d0dfeae59cd3f11a91f16a4d2bafbbd | 47743d63962ae9af06a5710e41f06bfc7bad6c2c | /plot_functions.py | 20dfaabc05fd24aa06dd459bbbb414cc1c4d38ea | [] | no_license | dlahtou/wcl-raidnight-summarizer | 8cd9c3c7fd40b2de86d383a42db6e48e58a6334b | b4c97f835f0f5d9e9c5d14e6235be0200cf14193 | refs/heads/master | 2022-12-21T14:17:12.662887 | 2019-01-17T21:21:30 | 2019-01-17T21:21:30 | 135,497,536 | 0 | 0 | null | 2022-12-08T01:30:56 | 2018-05-30T21:04:45 | Python | UTF-8 | Python | false | false | 8,140 | py | from raid_night_summarizer import *
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from textwrap import wrap
import re
import pandas as pd
antorus_ordered_handles = {"Heroic Garothi Worldbreaker": 1,
"Heroic Felhounds of Sargeras": 2,
"Heroic The Defense of Eonar": 3,
"Heroic Portal Keeper Hasabel": 4,
"Heroic Antoran High Command": 5,
"Heroic Imonar the Soulhunter": 6,
"Heroic Kin'garoth": 7,
"Heroic Varimathras": 8,
"Heroic The Coven of Shivarra": 9,
"Heroic Aggramar": 10,
"Heroic Argus the Unmaker": 11}
uldir_ordered_handles = {"Heroic Taloc": 1,
"Heroic MOTHER": 2,
"Heroic Fetid Devourer": 3,
"Heroic Vectis": 4,
"Heroic Zek'voz": 5,
"Heroic Zul": 6,
"Heroic Mythrax": 7,
"Heroic G'huun": 8}
def get_parse_color(parse_number):
color_key = [(20, '#9d9d9d'), #common-gray
(50, '#1eff00'), #uncommon-green
(75, '#0070dd'), #rare-blue
(94, '#a335ee'), #epic-purple
(100, '#ff8000')] #legendary-orange
for colorcode in color_key:
if parse_number <= colorcode[0]:
return colorcode[1]
return 'gray'
def make_avg_ilvl_parse_bar_plot(raidnight_object):
average_parses = []
for boss_name in raidnight_object.parse_scrapes.keys():
iparselist = [raidnight_object.parse_scrapes[boss_name][name]['ilvl-performance'] for name in raidnight_object.parse_scrapes[boss_name].keys()]
average_parse = np.mean(iparselist)
average_parses.append((average_parse,boss_name))
fig = plt.figure(1)
ax = fig.add_subplot(111)
barplot = ax.bar(['\n'.join(wrap(x[1],16)) for x in average_parses], [x[0] for x in average_parses], color=[get_parse_color(x[0]) for x in average_parses], edgecolor='black')
#plt.xticks(np.arange(len(average_parses)), [x[1] for x in average_parses])
plt.xticks(rotation=70)
plt.yticks(np.arange(0,110,10))
plt.xlabel('Boss')
plt.tight_layout()
for a,b in enumerate(x[0] for x in average_parses):
ax.text(a, b +1, f'{b:{4}.{3}}', color=get_parse_color(b), fontweight='bold', horizontalalignment='center')
ax.set_facecolor('#A9A9A9')
fig.set_facecolor('gray')
plt.show()
def make_heroic_raid_avg_ilvl_parse_scatter_plot(raid_folder):
average_parses = []
for filename in [f for f in listdir(raid_folder) if isfile(join(raid_folder, f))]:
if 'Ant' in filename:
continue
raidnight = RaidnightData(filename, 'MyDudes')
date = pd.to_datetime(datetime.date.fromtimestamp(raidnight.raidnight_date))
for boss in raidnight.parse_scrapes.keys():
if not re.search('Heroic', boss):
continue
iparselist = [raidnight.parse_scrapes[boss][name]['overall-performance'] for name in raidnight.parse_scrapes[boss].keys()]
iparse_average = np.mean(iparselist)
average_parses.append((boss, iparse_average, date))
average_parses = sorted(average_parses, key = lambda x: x[0])
parse_df = pd.DataFrame(average_parses, columns=["Boss", "Parse", "Date"])
parse_df.set_index("Date", inplace=True)
fig = plt.figure(figsize=(15,8))
ax = fig.add_subplot(111)
parse_df.groupby('Boss')['Parse'].plot(legend=True, marker='o', linestyle='')
ax.set_title("Raid Overall Parses by Date")
ax.set_ylabel("Parse Percentile")
ax.set_xlabel("Date")
handles, labels = ax.get_legend_handles_labels()
handles, labels = zip(*sorted(zip(handles, labels), key=lambda x: uldir_ordered_handles[x[1]]))
ax.legend(handles, labels, bbox_to_anchor=(1.02, 1), loc=2, borderaxespad=0.)
plt.tight_layout()
ax.set_yticks(np.arange(0,110,10))
plt.show()
def make_ilvl_chart(raid_folder, playername=None):
'''
defaults to raid average ilvl
'''
title_string = "Raid Average Equipped ilvl"
playerregex = ".*"
if playername:
playerregex = playername
title_string = playername + " Best Equipped ilvl"
average_parses = []
for filename in [join(raid_folder, f) for f in listdir(raid_folder) if isfile(join(raid_folder, f))]:
if 'Ant' in filename:
continue
raidnight = RaidnightData(filename, 'MyDudes')
date = pd.to_datetime(datetime.date.fromtimestamp(raidnight.raidnight_date))
top_average_ilevel = 0
for boss in raidnight.damage_done.keys():
try:
player_ilevels = [x['itemLevel'] for x in raidnight.damage_done[boss]['entries'] if re.match(playerregex,x['name'])]
except KeyError:
continue
average_ilevel = np.mean(player_ilevels)
if average_ilevel > top_average_ilevel:
top_average_ilevel = average_ilevel
if top_average_ilevel != 0:
average_parses.append((date, top_average_ilevel))
parse_df = pd.DataFrame(average_parses, columns=["Date", "ilevel"])
parse_df.set_index("Date", inplace=True)
fig = plt.figure(figsize=(15,8))
ax = fig.add_subplot(111)
parse_df["ilevel"].plot(legend=None)
ax.set_ylabel("ilevel")
ax.set_title(title_string)
plt.show()
def make_raidstats_chart(raid_folder):
#TODO: raid duration, cumulative bosses down (heroic only AND normal only)
raidstats_data_columns = ["Date", "Lockout Number", "Duration"] + list(uldir_ordered_handles.keys())
raidstats_dictionary = dict()
for column_header in raidstats_data_columns:
raidstats_dictionary[column_header] = []
for filename in [f for f in listdir(raid_folder) if isfile(join(raid_folder, f))]:
if filename[:3] == "Ant":
continue
raidnight = RaidnightData(filename, raid_folder)
raidstats_dictionary["Lockout Number"].append(raidnight.get_raid_lockout_period())
date = pd.to_datetime(datetime.date.fromtimestamp(raidnight.raidnight_date))
raidstats_dictionary["Date"].append(date)
raidnight_duration = pd.to_timedelta(datetime.datetime.fromtimestamp(raidnight.fights['end']//1000) - datetime.datetime.fromtimestamp(raidnight.fights['start']//1000))
raidstats_dictionary["Duration"].append(raidnight_duration)
for boss in raidstats_data_columns[3:]:
if boss in raidnight.parse_scrapes.keys():
raidstats_dictionary[boss].append(uldir_ordered_handles[boss])
else:
raidstats_dictionary[boss].append(None)
raidstats_df = pd.DataFrame(raidstats_dictionary, columns=raidstats_data_columns)
raidstats_df.set_index("Date", inplace=True)
print(raidstats_df.head())
def timeTicks(nanoseconds, pos):
seconds = nanoseconds//1000000000
hours = str(int(seconds//3600))
minutes = str(int((seconds%3600)//60))
seconds = str(int(seconds%60))
return ':'.join([hours, minutes.zfill(2),seconds.zfill(2)])
formatter = mpl.ticker.FuncFormatter(timeTicks)
fig = plt.figure(1)
ax = fig.add_subplot(111)
'''for column in raidstats_df.keys():
if column == "Duration":
continue
plt.scatter(raidstats_df.index, raidstats_df[column])
ax.set_yticks(np.arange(12))'''
plt.bar(raidstats_df.index, raidstats_df["Duration"])
ax.set_yticks(np.arange(0,3600*1000000000*4, 1800*1000000000))
ax.yaxis.set_major_formatter(formatter)
plt.show()
make_heroic_raid_avg_ilvl_parse_scatter_plot('MyDudes') | [
"dlahtou@gmail.com"
] | dlahtou@gmail.com |
600d0751fd86b25e5c1a6b7538daf9855d479a4e | 78d12ba8401d2a7d79569d1a62a179c709421dad | /030-Master_Model_Python/Input_EIA_Crude_WTI.py | 7ec08edbfcf2cb99d4cd34dae96afdfaabb2c9f0 | [] | no_license | hawaii-clean-energy-metrics/hcem | 6f04ae93576c7a9872cdc3ef2f9ef179779e3976 | e0659daa6f211df7e7c5dfefe53129b37fb12e69 | refs/heads/master | 2020-03-28T07:02:04.303960 | 2018-10-26T20:45:49 | 2018-10-26T20:45:49 | 147,876,431 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 99,386 | py | # -*- coding: utf-8 -*-
from xl2py_excel_runtime import *
Input_EIA_Crude_WTI = Worksheet('Input_EIA_Crude_WTI', 10, 10)
@register(Input_EIA_Crude_WTI)
class A1():
# 'Input_EIA_Crude_WTI'!A1
value = "Sourcekey"
@register(Input_EIA_Crude_WTI)
class B1():
# 'Input_EIA_Crude_WTI'!B1
value = "RWTC"
@register(Input_EIA_Crude_WTI)
class A2():
# 'Input_EIA_Crude_WTI'!A2
value = "Date"
@register(Input_EIA_Crude_WTI)
class B2():
# 'Input_EIA_Crude_WTI'!B2
value = "Cushing, OK WTI Spot Price FOB (Dollars per Barrel)"
@register(Input_EIA_Crude_WTI)
class A3():
# 'Input_EIA_Crude_WTI'!A3
value = 31427
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B3():
# 'Input_EIA_Crude_WTI'!B3
value = 22.93
@register(Input_EIA_Crude_WTI)
class A4():
# 'Input_EIA_Crude_WTI'!A4
value = 31458
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B4():
# 'Input_EIA_Crude_WTI'!B4
value = 15.46
@register(Input_EIA_Crude_WTI)
class A5():
# 'Input_EIA_Crude_WTI'!A5
value = 31486
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B5():
# 'Input_EIA_Crude_WTI'!B5
value = 12.61
@register(Input_EIA_Crude_WTI)
class A6():
# 'Input_EIA_Crude_WTI'!A6
value = 31517
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B6():
# 'Input_EIA_Crude_WTI'!B6
value = 12.84
@register(Input_EIA_Crude_WTI)
class A7():
# 'Input_EIA_Crude_WTI'!A7
value = 31547
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B7():
# 'Input_EIA_Crude_WTI'!B7
value = 15.38
@register(Input_EIA_Crude_WTI)
class A8():
# 'Input_EIA_Crude_WTI'!A8
value = 31578
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B8():
# 'Input_EIA_Crude_WTI'!B8
value = 13.43
@register(Input_EIA_Crude_WTI)
class A9():
# 'Input_EIA_Crude_WTI'!A9
value = 31608
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B9():
# 'Input_EIA_Crude_WTI'!B9
value = 11.59
@register(Input_EIA_Crude_WTI)
class A10():
# 'Input_EIA_Crude_WTI'!A10
value = 31639
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B10():
# 'Input_EIA_Crude_WTI'!B10
value = 15.1
@register(Input_EIA_Crude_WTI)
class A11():
# 'Input_EIA_Crude_WTI'!A11
value = 31670
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B11():
# 'Input_EIA_Crude_WTI'!B11
value = 14.87
@register(Input_EIA_Crude_WTI)
class A12():
# 'Input_EIA_Crude_WTI'!A12
value = 31700
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B12():
# 'Input_EIA_Crude_WTI'!B12
value = 14.9
@register(Input_EIA_Crude_WTI)
class A13():
# 'Input_EIA_Crude_WTI'!A13
value = 31731
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B13():
# 'Input_EIA_Crude_WTI'!B13
value = 15.22
@register(Input_EIA_Crude_WTI)
class A14():
# 'Input_EIA_Crude_WTI'!A14
value = 31761
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B14():
# 'Input_EIA_Crude_WTI'!B14
value = 16.11
@register(Input_EIA_Crude_WTI)
class A15():
# 'Input_EIA_Crude_WTI'!A15
value = 31792
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B15():
# 'Input_EIA_Crude_WTI'!B15
value = 18.65
@register(Input_EIA_Crude_WTI)
class A16():
# 'Input_EIA_Crude_WTI'!A16
value = 31823
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B16():
# 'Input_EIA_Crude_WTI'!B16
value = 17.75
@register(Input_EIA_Crude_WTI)
class A17():
# 'Input_EIA_Crude_WTI'!A17
value = 31851
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B17():
# 'Input_EIA_Crude_WTI'!B17
value = 18.3
@register(Input_EIA_Crude_WTI)
class A18():
# 'Input_EIA_Crude_WTI'!A18
value = 31882
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B18():
# 'Input_EIA_Crude_WTI'!B18
value = 18.68
@register(Input_EIA_Crude_WTI)
class A19():
# 'Input_EIA_Crude_WTI'!A19
value = 31912
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B19():
# 'Input_EIA_Crude_WTI'!B19
value = 19.44
@register(Input_EIA_Crude_WTI)
class A20():
# 'Input_EIA_Crude_WTI'!A20
value = 31943
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B20():
# 'Input_EIA_Crude_WTI'!B20
value = 20.07
@register(Input_EIA_Crude_WTI)
class A21():
# 'Input_EIA_Crude_WTI'!A21
value = 31973
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B21():
# 'Input_EIA_Crude_WTI'!B21
value = 21.34
@register(Input_EIA_Crude_WTI)
class A22():
# 'Input_EIA_Crude_WTI'!A22
value = 32004
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B22():
# 'Input_EIA_Crude_WTI'!B22
value = 20.31
@register(Input_EIA_Crude_WTI)
class A23():
# 'Input_EIA_Crude_WTI'!A23
value = 32035
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B23():
# 'Input_EIA_Crude_WTI'!B23
value = 19.53
@register(Input_EIA_Crude_WTI)
class A24():
# 'Input_EIA_Crude_WTI'!A24
value = 32065
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B24():
# 'Input_EIA_Crude_WTI'!B24
value = 19.86
@register(Input_EIA_Crude_WTI)
class A25():
# 'Input_EIA_Crude_WTI'!A25
value = 32096
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B25():
# 'Input_EIA_Crude_WTI'!B25
value = 18.85
@register(Input_EIA_Crude_WTI)
class A26():
# 'Input_EIA_Crude_WTI'!A26
value = 32126
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B26():
# 'Input_EIA_Crude_WTI'!B26
value = 17.28
@register(Input_EIA_Crude_WTI)
class A27():
# 'Input_EIA_Crude_WTI'!A27
value = 32157
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B27():
# 'Input_EIA_Crude_WTI'!B27
value = 17.13
@register(Input_EIA_Crude_WTI)
class A28():
# 'Input_EIA_Crude_WTI'!A28
value = 32188
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B28():
# 'Input_EIA_Crude_WTI'!B28
value = 16.8
@register(Input_EIA_Crude_WTI)
class A29():
# 'Input_EIA_Crude_WTI'!A29
value = 32217
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B29():
# 'Input_EIA_Crude_WTI'!B29
value = 16.2
@register(Input_EIA_Crude_WTI)
class A30():
# 'Input_EIA_Crude_WTI'!A30
value = 32248
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B30():
# 'Input_EIA_Crude_WTI'!B30
value = 17.86
@register(Input_EIA_Crude_WTI)
class A31():
# 'Input_EIA_Crude_WTI'!A31
value = 32278
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B31():
# 'Input_EIA_Crude_WTI'!B31
value = 17.42
@register(Input_EIA_Crude_WTI)
class A32():
# 'Input_EIA_Crude_WTI'!A32
value = 32309
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B32():
# 'Input_EIA_Crude_WTI'!B32
value = 16.53
@register(Input_EIA_Crude_WTI)
class A33():
# 'Input_EIA_Crude_WTI'!A33
value = 32339
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B33():
# 'Input_EIA_Crude_WTI'!B33
value = 15.5
@register(Input_EIA_Crude_WTI)
class A34():
# 'Input_EIA_Crude_WTI'!A34
value = 32370
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B34():
# 'Input_EIA_Crude_WTI'!B34
value = 15.52
@register(Input_EIA_Crude_WTI)
class A35():
# 'Input_EIA_Crude_WTI'!A35
value = 32401
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B35():
# 'Input_EIA_Crude_WTI'!B35
value = 14.54
@register(Input_EIA_Crude_WTI)
class A36():
# 'Input_EIA_Crude_WTI'!A36
value = 32431
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B36():
# 'Input_EIA_Crude_WTI'!B36
value = 13.77
@register(Input_EIA_Crude_WTI)
class A37():
# 'Input_EIA_Crude_WTI'!A37
value = 32462
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B37():
# 'Input_EIA_Crude_WTI'!B37
value = 14.14
@register(Input_EIA_Crude_WTI)
class A38():
# 'Input_EIA_Crude_WTI'!A38
value = 32492
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B38():
# 'Input_EIA_Crude_WTI'!B38
value = 16.38
@register(Input_EIA_Crude_WTI)
class A39():
# 'Input_EIA_Crude_WTI'!A39
value = 32523
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B39():
# 'Input_EIA_Crude_WTI'!B39
value = 18.02
@register(Input_EIA_Crude_WTI)
class A40():
# 'Input_EIA_Crude_WTI'!A40
value = 32554
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B40():
# 'Input_EIA_Crude_WTI'!B40
value = 17.94
@register(Input_EIA_Crude_WTI)
class A41():
# 'Input_EIA_Crude_WTI'!A41
value = 32582
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B41():
# 'Input_EIA_Crude_WTI'!B41
value = 19.48
@register(Input_EIA_Crude_WTI)
class A42():
# 'Input_EIA_Crude_WTI'!A42
value = 32613
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B42():
# 'Input_EIA_Crude_WTI'!B42
value = 21.07
@register(Input_EIA_Crude_WTI)
class A43():
# 'Input_EIA_Crude_WTI'!A43
value = 32643
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B43():
# 'Input_EIA_Crude_WTI'!B43
value = 20.12
@register(Input_EIA_Crude_WTI)
class A44():
# 'Input_EIA_Crude_WTI'!A44
value = 32674
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B44():
# 'Input_EIA_Crude_WTI'!B44
value = 20.05
@register(Input_EIA_Crude_WTI)
class A45():
# 'Input_EIA_Crude_WTI'!A45
value = 32704
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B45():
# 'Input_EIA_Crude_WTI'!B45
value = 19.78
@register(Input_EIA_Crude_WTI)
class A46():
# 'Input_EIA_Crude_WTI'!A46
value = 32735
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B46():
# 'Input_EIA_Crude_WTI'!B46
value = 18.58
@register(Input_EIA_Crude_WTI)
class A47():
# 'Input_EIA_Crude_WTI'!A47
value = 32766
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B47():
# 'Input_EIA_Crude_WTI'!B47
value = 19.59
@register(Input_EIA_Crude_WTI)
class A48():
# 'Input_EIA_Crude_WTI'!A48
value = 32796
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B48():
# 'Input_EIA_Crude_WTI'!B48
value = 20.1
@register(Input_EIA_Crude_WTI)
class A49():
# 'Input_EIA_Crude_WTI'!A49
value = 32827
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B49():
# 'Input_EIA_Crude_WTI'!B49
value = 19.86
@register(Input_EIA_Crude_WTI)
class A50():
# 'Input_EIA_Crude_WTI'!A50
value = 32857
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B50():
# 'Input_EIA_Crude_WTI'!B50
value = 21.1
@register(Input_EIA_Crude_WTI)
class A51():
# 'Input_EIA_Crude_WTI'!A51
value = 32888
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B51():
# 'Input_EIA_Crude_WTI'!B51
value = 22.86
@register(Input_EIA_Crude_WTI)
class A52():
# 'Input_EIA_Crude_WTI'!A52
value = 32919
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B52():
# 'Input_EIA_Crude_WTI'!B52
value = 22.11
@register(Input_EIA_Crude_WTI)
class A53():
# 'Input_EIA_Crude_WTI'!A53
value = 32947
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B53():
# 'Input_EIA_Crude_WTI'!B53
value = 20.39
@register(Input_EIA_Crude_WTI)
class A54():
# 'Input_EIA_Crude_WTI'!A54
value = 32978
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B54():
# 'Input_EIA_Crude_WTI'!B54
value = 18.43
@register(Input_EIA_Crude_WTI)
class A55():
# 'Input_EIA_Crude_WTI'!A55
value = 33008
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B55():
# 'Input_EIA_Crude_WTI'!B55
value = 18.2
@register(Input_EIA_Crude_WTI)
class A56():
# 'Input_EIA_Crude_WTI'!A56
value = 33039
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B56():
# 'Input_EIA_Crude_WTI'!B56
value = 16.7
@register(Input_EIA_Crude_WTI)
class A57():
# 'Input_EIA_Crude_WTI'!A57
value = 33069
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B57():
# 'Input_EIA_Crude_WTI'!B57
value = 18.45
@register(Input_EIA_Crude_WTI)
class A58():
# 'Input_EIA_Crude_WTI'!A58
value = 33100
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B58():
# 'Input_EIA_Crude_WTI'!B58
value = 27.31
@register(Input_EIA_Crude_WTI)
class A59():
# 'Input_EIA_Crude_WTI'!A59
value = 33131
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B59():
# 'Input_EIA_Crude_WTI'!B59
value = 33.51
@register(Input_EIA_Crude_WTI)
class A60():
# 'Input_EIA_Crude_WTI'!A60
value = 33161
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B60():
# 'Input_EIA_Crude_WTI'!B60
value = 36.04
@register(Input_EIA_Crude_WTI)
class A61():
# 'Input_EIA_Crude_WTI'!A61
value = 33192
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B61():
# 'Input_EIA_Crude_WTI'!B61
value = 32.33
@register(Input_EIA_Crude_WTI)
class A62():
# 'Input_EIA_Crude_WTI'!A62
value = 33222
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B62():
# 'Input_EIA_Crude_WTI'!B62
value = 27.28
@register(Input_EIA_Crude_WTI)
class A63():
# 'Input_EIA_Crude_WTI'!A63
value = 33253
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B63():
# 'Input_EIA_Crude_WTI'!B63
value = 25.23
@register(Input_EIA_Crude_WTI)
class A64():
# 'Input_EIA_Crude_WTI'!A64
value = 33284
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B64():
# 'Input_EIA_Crude_WTI'!B64
value = 20.48
@register(Input_EIA_Crude_WTI)
class A65():
# 'Input_EIA_Crude_WTI'!A65
value = 33312
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B65():
# 'Input_EIA_Crude_WTI'!B65
value = 19.9
@register(Input_EIA_Crude_WTI)
class A66():
# 'Input_EIA_Crude_WTI'!A66
value = 33343
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B66():
# 'Input_EIA_Crude_WTI'!B66
value = 20.83
@register(Input_EIA_Crude_WTI)
class A67():
# 'Input_EIA_Crude_WTI'!A67
value = 33373
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B67():
# 'Input_EIA_Crude_WTI'!B67
value = 21.23
@register(Input_EIA_Crude_WTI)
class A68():
# 'Input_EIA_Crude_WTI'!A68
value = 33404
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B68():
# 'Input_EIA_Crude_WTI'!B68
value = 20.19
@register(Input_EIA_Crude_WTI)
class A69():
# 'Input_EIA_Crude_WTI'!A69
value = 33434
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B69():
# 'Input_EIA_Crude_WTI'!B69
value = 21.4
@register(Input_EIA_Crude_WTI)
class A70():
# 'Input_EIA_Crude_WTI'!A70
value = 33465
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B70():
# 'Input_EIA_Crude_WTI'!B70
value = 21.69
@register(Input_EIA_Crude_WTI)
class A71():
# 'Input_EIA_Crude_WTI'!A71
value = 33496
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B71():
# 'Input_EIA_Crude_WTI'!B71
value = 21.89
@register(Input_EIA_Crude_WTI)
class A72():
# 'Input_EIA_Crude_WTI'!A72
value = 33526
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B72():
# 'Input_EIA_Crude_WTI'!B72
value = 23.23
@register(Input_EIA_Crude_WTI)
class A73():
# 'Input_EIA_Crude_WTI'!A73
value = 33557
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B73():
# 'Input_EIA_Crude_WTI'!B73
value = 22.46
@register(Input_EIA_Crude_WTI)
class A74():
# 'Input_EIA_Crude_WTI'!A74
value = 33587
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B74():
# 'Input_EIA_Crude_WTI'!B74
value = 19.5
@register(Input_EIA_Crude_WTI)
class A75():
# 'Input_EIA_Crude_WTI'!A75
value = 33618
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B75():
# 'Input_EIA_Crude_WTI'!B75
value = 18.79
@register(Input_EIA_Crude_WTI)
class A76():
# 'Input_EIA_Crude_WTI'!A76
value = 33649
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B76():
# 'Input_EIA_Crude_WTI'!B76
value = 19.01
@register(Input_EIA_Crude_WTI)
class A77():
# 'Input_EIA_Crude_WTI'!A77
value = 33678
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B77():
# 'Input_EIA_Crude_WTI'!B77
value = 18.92
@register(Input_EIA_Crude_WTI)
class A78():
# 'Input_EIA_Crude_WTI'!A78
value = 33709
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B78():
# 'Input_EIA_Crude_WTI'!B78
value = 20.23
@register(Input_EIA_Crude_WTI)
class A79():
# 'Input_EIA_Crude_WTI'!A79
value = 33739
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B79():
# 'Input_EIA_Crude_WTI'!B79
value = 20.98
@register(Input_EIA_Crude_WTI)
class A80():
# 'Input_EIA_Crude_WTI'!A80
value = 33770
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B80():
# 'Input_EIA_Crude_WTI'!B80
value = 22.39
@register(Input_EIA_Crude_WTI)
class A81():
# 'Input_EIA_Crude_WTI'!A81
value = 33800
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B81():
# 'Input_EIA_Crude_WTI'!B81
value = 21.78
@register(Input_EIA_Crude_WTI)
class A82():
# 'Input_EIA_Crude_WTI'!A82
value = 33831
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B82():
# 'Input_EIA_Crude_WTI'!B82
value = 21.34
@register(Input_EIA_Crude_WTI)
class A83():
# 'Input_EIA_Crude_WTI'!A83
value = 33862
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B83():
# 'Input_EIA_Crude_WTI'!B83
value = 21.88
@register(Input_EIA_Crude_WTI)
class A84():
# 'Input_EIA_Crude_WTI'!A84
value = 33892
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B84():
# 'Input_EIA_Crude_WTI'!B84
value = 21.69
@register(Input_EIA_Crude_WTI)
class A85():
# 'Input_EIA_Crude_WTI'!A85
value = 33923
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B85():
# 'Input_EIA_Crude_WTI'!B85
value = 20.34
@register(Input_EIA_Crude_WTI)
class A86():
# 'Input_EIA_Crude_WTI'!A86
value = 33953
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B86():
# 'Input_EIA_Crude_WTI'!B86
value = 19.41
@register(Input_EIA_Crude_WTI)
class A87():
# 'Input_EIA_Crude_WTI'!A87
value = 33984
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B87():
# 'Input_EIA_Crude_WTI'!B87
value = 19.03
@register(Input_EIA_Crude_WTI)
class A88():
# 'Input_EIA_Crude_WTI'!A88
value = 34015
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B88():
# 'Input_EIA_Crude_WTI'!B88
value = 20.09
@register(Input_EIA_Crude_WTI)
class A89():
# 'Input_EIA_Crude_WTI'!A89
value = 34043
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B89():
# 'Input_EIA_Crude_WTI'!B89
value = 20.32
@register(Input_EIA_Crude_WTI)
class A90():
# 'Input_EIA_Crude_WTI'!A90
value = 34074
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B90():
# 'Input_EIA_Crude_WTI'!B90
value = 20.25
@register(Input_EIA_Crude_WTI)
class A91():
# 'Input_EIA_Crude_WTI'!A91
value = 34104
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B91():
# 'Input_EIA_Crude_WTI'!B91
value = 19.95
@register(Input_EIA_Crude_WTI)
class A92():
# 'Input_EIA_Crude_WTI'!A92
value = 34135
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B92():
# 'Input_EIA_Crude_WTI'!B92
value = 19.09
@register(Input_EIA_Crude_WTI)
class A93():
# 'Input_EIA_Crude_WTI'!A93
value = 34165
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B93():
# 'Input_EIA_Crude_WTI'!B93
value = 17.89
@register(Input_EIA_Crude_WTI)
class A94():
# 'Input_EIA_Crude_WTI'!A94
value = 34196
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B94():
# 'Input_EIA_Crude_WTI'!B94
value = 18.01
@register(Input_EIA_Crude_WTI)
class A95():
# 'Input_EIA_Crude_WTI'!A95
value = 34227
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B95():
# 'Input_EIA_Crude_WTI'!B95
value = 17.5
@register(Input_EIA_Crude_WTI)
class A96():
# 'Input_EIA_Crude_WTI'!A96
value = 34257
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B96():
# 'Input_EIA_Crude_WTI'!B96
value = 18.15
@register(Input_EIA_Crude_WTI)
class A97():
# 'Input_EIA_Crude_WTI'!A97
value = 34288
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B97():
# 'Input_EIA_Crude_WTI'!B97
value = 16.61
@register(Input_EIA_Crude_WTI)
class A98():
# 'Input_EIA_Crude_WTI'!A98
value = 34318
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B98():
# 'Input_EIA_Crude_WTI'!B98
value = 14.52
@register(Input_EIA_Crude_WTI)
class A99():
# 'Input_EIA_Crude_WTI'!A99
value = 34349
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B99():
# 'Input_EIA_Crude_WTI'!B99
value = 15.03
@register(Input_EIA_Crude_WTI)
class A100():
# 'Input_EIA_Crude_WTI'!A100
value = 34380
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B100():
# 'Input_EIA_Crude_WTI'!B100
value = 14.78
@register(Input_EIA_Crude_WTI)
class A101():
# 'Input_EIA_Crude_WTI'!A101
value = 34408
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B101():
# 'Input_EIA_Crude_WTI'!B101
value = 14.68
@register(Input_EIA_Crude_WTI)
class A102():
# 'Input_EIA_Crude_WTI'!A102
value = 34439
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B102():
# 'Input_EIA_Crude_WTI'!B102
value = 16.42
@register(Input_EIA_Crude_WTI)
class A103():
# 'Input_EIA_Crude_WTI'!A103
value = 34469
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B103():
# 'Input_EIA_Crude_WTI'!B103
value = 17.89
@register(Input_EIA_Crude_WTI)
class A104():
# 'Input_EIA_Crude_WTI'!A104
value = 34500
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B104():
# 'Input_EIA_Crude_WTI'!B104
value = 19.06
@register(Input_EIA_Crude_WTI)
class A105():
# 'Input_EIA_Crude_WTI'!A105
value = 34530
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B105():
# 'Input_EIA_Crude_WTI'!B105
value = 19.66
@register(Input_EIA_Crude_WTI)
class A106():
# 'Input_EIA_Crude_WTI'!A106
value = 34561
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B106():
# 'Input_EIA_Crude_WTI'!B106
value = 18.38
@register(Input_EIA_Crude_WTI)
class A107():
# 'Input_EIA_Crude_WTI'!A107
value = 34592
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B107():
# 'Input_EIA_Crude_WTI'!B107
value = 17.45
@register(Input_EIA_Crude_WTI)
class A108():
# 'Input_EIA_Crude_WTI'!A108
value = 34622
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B108():
# 'Input_EIA_Crude_WTI'!B108
value = 17.72
@register(Input_EIA_Crude_WTI)
class A109():
# 'Input_EIA_Crude_WTI'!A109
value = 34653
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B109():
# 'Input_EIA_Crude_WTI'!B109
value = 18.07
@register(Input_EIA_Crude_WTI)
class A110():
# 'Input_EIA_Crude_WTI'!A110
value = 34683
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B110():
# 'Input_EIA_Crude_WTI'!B110
value = 17.16
@register(Input_EIA_Crude_WTI)
class A111():
# 'Input_EIA_Crude_WTI'!A111
value = 34714
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B111():
# 'Input_EIA_Crude_WTI'!B111
value = 18.04
@register(Input_EIA_Crude_WTI)
class A112():
# 'Input_EIA_Crude_WTI'!A112
value = 34745
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B112():
# 'Input_EIA_Crude_WTI'!B112
value = 18.57
@register(Input_EIA_Crude_WTI)
class A113():
# 'Input_EIA_Crude_WTI'!A113
value = 34773
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B113():
# 'Input_EIA_Crude_WTI'!B113
value = 18.54
@register(Input_EIA_Crude_WTI)
class A114():
# 'Input_EIA_Crude_WTI'!A114
value = 34804
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B114():
# 'Input_EIA_Crude_WTI'!B114
value = 19.9
@register(Input_EIA_Crude_WTI)
class A115():
# 'Input_EIA_Crude_WTI'!A115
value = 34834
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B115():
# 'Input_EIA_Crude_WTI'!B115
value = 19.74
@register(Input_EIA_Crude_WTI)
class A116():
# 'Input_EIA_Crude_WTI'!A116
value = 34865
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B116():
# 'Input_EIA_Crude_WTI'!B116
value = 18.45
@register(Input_EIA_Crude_WTI)
class A117():
# 'Input_EIA_Crude_WTI'!A117
value = 34895
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B117():
# 'Input_EIA_Crude_WTI'!B117
value = 17.33
@register(Input_EIA_Crude_WTI)
class A118():
# 'Input_EIA_Crude_WTI'!A118
value = 34926
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B118():
# 'Input_EIA_Crude_WTI'!B118
value = 18.02
@register(Input_EIA_Crude_WTI)
class A119():
# 'Input_EIA_Crude_WTI'!A119
value = 34957
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B119():
# 'Input_EIA_Crude_WTI'!B119
value = 18.23
@register(Input_EIA_Crude_WTI)
class A120():
# 'Input_EIA_Crude_WTI'!A120
value = 34987
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B120():
# 'Input_EIA_Crude_WTI'!B120
value = 17.43
@register(Input_EIA_Crude_WTI)
class A121():
# 'Input_EIA_Crude_WTI'!A121
value = 35018
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B121():
# 'Input_EIA_Crude_WTI'!B121
value = 17.99
@register(Input_EIA_Crude_WTI)
class A122():
# 'Input_EIA_Crude_WTI'!A122
value = 35048
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B122():
# 'Input_EIA_Crude_WTI'!B122
value = 19.03
@register(Input_EIA_Crude_WTI)
class A123():
# 'Input_EIA_Crude_WTI'!A123
value = 35079
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B123():
# 'Input_EIA_Crude_WTI'!B123
value = 18.86
@register(Input_EIA_Crude_WTI)
class A124():
# 'Input_EIA_Crude_WTI'!A124
value = 35110
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B124():
# 'Input_EIA_Crude_WTI'!B124
value = 19.09
@register(Input_EIA_Crude_WTI)
class A125():
# 'Input_EIA_Crude_WTI'!A125
value = 35139
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B125():
# 'Input_EIA_Crude_WTI'!B125
value = 21.33
@register(Input_EIA_Crude_WTI)
class A126():
# 'Input_EIA_Crude_WTI'!A126
value = 35170
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B126():
# 'Input_EIA_Crude_WTI'!B126
value = 23.5
@register(Input_EIA_Crude_WTI)
class A127():
# 'Input_EIA_Crude_WTI'!A127
value = 35200
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B127():
# 'Input_EIA_Crude_WTI'!B127
value = 21.17
@register(Input_EIA_Crude_WTI)
class A128():
# 'Input_EIA_Crude_WTI'!A128
value = 35231
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B128():
# 'Input_EIA_Crude_WTI'!B128
value = 20.42
@register(Input_EIA_Crude_WTI)
class A129():
# 'Input_EIA_Crude_WTI'!A129
value = 35261
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B129():
# 'Input_EIA_Crude_WTI'!B129
value = 21.3
@register(Input_EIA_Crude_WTI)
class A130():
# 'Input_EIA_Crude_WTI'!A130
value = 35292
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B130():
# 'Input_EIA_Crude_WTI'!B130
value = 21.9
@register(Input_EIA_Crude_WTI)
class A131():
# 'Input_EIA_Crude_WTI'!A131
value = 35323
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B131():
# 'Input_EIA_Crude_WTI'!B131
value = 23.97
@register(Input_EIA_Crude_WTI)
class A132():
# 'Input_EIA_Crude_WTI'!A132
value = 35353
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B132():
# 'Input_EIA_Crude_WTI'!B132
value = 24.88
@register(Input_EIA_Crude_WTI)
class A133():
# 'Input_EIA_Crude_WTI'!A133
value = 35384
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B133():
# 'Input_EIA_Crude_WTI'!B133
value = 23.71
@register(Input_EIA_Crude_WTI)
class A134():
# 'Input_EIA_Crude_WTI'!A134
value = 35414
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B134():
# 'Input_EIA_Crude_WTI'!B134
value = 25.23
@register(Input_EIA_Crude_WTI)
class A135():
# 'Input_EIA_Crude_WTI'!A135
value = 35445
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B135():
# 'Input_EIA_Crude_WTI'!B135
value = 25.13
@register(Input_EIA_Crude_WTI)
class A136():
# 'Input_EIA_Crude_WTI'!A136
value = 35476
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B136():
# 'Input_EIA_Crude_WTI'!B136
value = 22.18
@register(Input_EIA_Crude_WTI)
class A137():
# 'Input_EIA_Crude_WTI'!A137
value = 35504
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B137():
# 'Input_EIA_Crude_WTI'!B137
value = 20.97
@register(Input_EIA_Crude_WTI)
class A138():
# 'Input_EIA_Crude_WTI'!A138
value = 35535
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B138():
# 'Input_EIA_Crude_WTI'!B138
value = 19.7
@register(Input_EIA_Crude_WTI)
class A139():
# 'Input_EIA_Crude_WTI'!A139
value = 35565
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B139():
# 'Input_EIA_Crude_WTI'!B139
value = 20.82
@register(Input_EIA_Crude_WTI)
class A140():
# 'Input_EIA_Crude_WTI'!A140
value = 35596
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B140():
# 'Input_EIA_Crude_WTI'!B140
value = 19.26
@register(Input_EIA_Crude_WTI)
class A141():
# 'Input_EIA_Crude_WTI'!A141
value = 35626
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B141():
# 'Input_EIA_Crude_WTI'!B141
value = 19.66
@register(Input_EIA_Crude_WTI)
class A142():
# 'Input_EIA_Crude_WTI'!A142
value = 35657
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B142():
# 'Input_EIA_Crude_WTI'!B142
value = 19.95
@register(Input_EIA_Crude_WTI)
class A143():
# 'Input_EIA_Crude_WTI'!A143
value = 35688
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B143():
# 'Input_EIA_Crude_WTI'!B143
value = 19.8
@register(Input_EIA_Crude_WTI)
class A144():
# 'Input_EIA_Crude_WTI'!A144
value = 35718
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B144():
# 'Input_EIA_Crude_WTI'!B144
value = 21.33
@register(Input_EIA_Crude_WTI)
class A145():
# 'Input_EIA_Crude_WTI'!A145
value = 35749
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B145():
# 'Input_EIA_Crude_WTI'!B145
value = 20.19
@register(Input_EIA_Crude_WTI)
class A146():
# 'Input_EIA_Crude_WTI'!A146
value = 35779
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B146():
# 'Input_EIA_Crude_WTI'!B146
value = 18.33
@register(Input_EIA_Crude_WTI)
class A147():
# 'Input_EIA_Crude_WTI'!A147
value = 35810
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B147():
# 'Input_EIA_Crude_WTI'!B147
value = 16.72
@register(Input_EIA_Crude_WTI)
class A148():
# 'Input_EIA_Crude_WTI'!A148
value = 35841
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B148():
# 'Input_EIA_Crude_WTI'!B148
value = 16.06
@register(Input_EIA_Crude_WTI)
class A149():
# 'Input_EIA_Crude_WTI'!A149
value = 35869
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B149():
# 'Input_EIA_Crude_WTI'!B149
value = 15.12
@register(Input_EIA_Crude_WTI)
class A150():
# 'Input_EIA_Crude_WTI'!A150
value = 35900
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B150():
# 'Input_EIA_Crude_WTI'!B150
value = 15.35
@register(Input_EIA_Crude_WTI)
class A151():
# 'Input_EIA_Crude_WTI'!A151
value = 35930
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B151():
# 'Input_EIA_Crude_WTI'!B151
value = 14.91
@register(Input_EIA_Crude_WTI)
class A152():
# 'Input_EIA_Crude_WTI'!A152
value = 35961
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B152():
# 'Input_EIA_Crude_WTI'!B152
value = 13.72
@register(Input_EIA_Crude_WTI)
class A153():
# 'Input_EIA_Crude_WTI'!A153
value = 35991
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B153():
# 'Input_EIA_Crude_WTI'!B153
value = 14.17
@register(Input_EIA_Crude_WTI)
class A154():
# 'Input_EIA_Crude_WTI'!A154
value = 36022
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B154():
# 'Input_EIA_Crude_WTI'!B154
value = 13.47
@register(Input_EIA_Crude_WTI)
class A155():
# 'Input_EIA_Crude_WTI'!A155
value = 36053
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B155():
# 'Input_EIA_Crude_WTI'!B155
value = 15.03
@register(Input_EIA_Crude_WTI)
class A156():
# 'Input_EIA_Crude_WTI'!A156
value = 36083
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B156():
# 'Input_EIA_Crude_WTI'!B156
value = 14.46
@register(Input_EIA_Crude_WTI)
class A157():
# 'Input_EIA_Crude_WTI'!A157
value = 36114
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B157():
# 'Input_EIA_Crude_WTI'!B157
value = 13
@register(Input_EIA_Crude_WTI)
class A158():
# 'Input_EIA_Crude_WTI'!A158
value = 36144
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B158():
# 'Input_EIA_Crude_WTI'!B158
value = 11.35
@register(Input_EIA_Crude_WTI)
class A159():
# 'Input_EIA_Crude_WTI'!A159
value = 36175
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B159():
# 'Input_EIA_Crude_WTI'!B159
value = 12.52
@register(Input_EIA_Crude_WTI)
class A160():
# 'Input_EIA_Crude_WTI'!A160
value = 36206
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B160():
# 'Input_EIA_Crude_WTI'!B160
value = 12.01
@register(Input_EIA_Crude_WTI)
class A161():
# 'Input_EIA_Crude_WTI'!A161
value = 36234
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B161():
# 'Input_EIA_Crude_WTI'!B161
value = 14.68
@register(Input_EIA_Crude_WTI)
class A162():
# 'Input_EIA_Crude_WTI'!A162
value = 36265
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B162():
# 'Input_EIA_Crude_WTI'!B162
value = 17.31
@register(Input_EIA_Crude_WTI)
class A163():
# 'Input_EIA_Crude_WTI'!A163
value = 36295
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B163():
# 'Input_EIA_Crude_WTI'!B163
value = 17.72
@register(Input_EIA_Crude_WTI)
class A164():
# 'Input_EIA_Crude_WTI'!A164
value = 36326
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B164():
# 'Input_EIA_Crude_WTI'!B164
value = 17.92
@register(Input_EIA_Crude_WTI)
class A165():
# 'Input_EIA_Crude_WTI'!A165
value = 36356
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B165():
# 'Input_EIA_Crude_WTI'!B165
value = 20.1
@register(Input_EIA_Crude_WTI)
class A166():
# 'Input_EIA_Crude_WTI'!A166
value = 36387
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B166():
# 'Input_EIA_Crude_WTI'!B166
value = 21.28
@register(Input_EIA_Crude_WTI)
class A167():
# 'Input_EIA_Crude_WTI'!A167
value = 36418
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B167():
# 'Input_EIA_Crude_WTI'!B167
value = 23.8
@register(Input_EIA_Crude_WTI)
class A168():
# 'Input_EIA_Crude_WTI'!A168
value = 36448
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B168():
# 'Input_EIA_Crude_WTI'!B168
value = 22.69
@register(Input_EIA_Crude_WTI)
class A169():
# 'Input_EIA_Crude_WTI'!A169
value = 36479
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B169():
# 'Input_EIA_Crude_WTI'!B169
value = 25
@register(Input_EIA_Crude_WTI)
class A170():
# 'Input_EIA_Crude_WTI'!A170
value = 36509
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B170():
# 'Input_EIA_Crude_WTI'!B170
value = 26.1
@register(Input_EIA_Crude_WTI)
class A171():
# 'Input_EIA_Crude_WTI'!A171
value = 36540
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B171():
# 'Input_EIA_Crude_WTI'!B171
value = 27.26
@register(Input_EIA_Crude_WTI)
class A172():
# 'Input_EIA_Crude_WTI'!A172
value = 36571
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B172():
# 'Input_EIA_Crude_WTI'!B172
value = 29.37
@register(Input_EIA_Crude_WTI)
class A173():
# 'Input_EIA_Crude_WTI'!A173
value = 36600
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B173():
# 'Input_EIA_Crude_WTI'!B173
value = 29.84
@register(Input_EIA_Crude_WTI)
class A174():
# 'Input_EIA_Crude_WTI'!A174
value = 36631
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B174():
# 'Input_EIA_Crude_WTI'!B174
value = 25.72
@register(Input_EIA_Crude_WTI)
class A175():
# 'Input_EIA_Crude_WTI'!A175
value = 36661
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B175():
# 'Input_EIA_Crude_WTI'!B175
value = 28.79
@register(Input_EIA_Crude_WTI)
class A176():
# 'Input_EIA_Crude_WTI'!A176
value = 36692
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B176():
# 'Input_EIA_Crude_WTI'!B176
value = 31.82
@register(Input_EIA_Crude_WTI)
class A177():
# 'Input_EIA_Crude_WTI'!A177
value = 36722
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B177():
# 'Input_EIA_Crude_WTI'!B177
value = 29.7
@register(Input_EIA_Crude_WTI)
class A178():
# 'Input_EIA_Crude_WTI'!A178
value = 36753
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B178():
# 'Input_EIA_Crude_WTI'!B178
value = 31.26
@register(Input_EIA_Crude_WTI)
class A179():
# 'Input_EIA_Crude_WTI'!A179
value = 36784
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B179():
# 'Input_EIA_Crude_WTI'!B179
value = 33.88
@register(Input_EIA_Crude_WTI)
class A180():
# 'Input_EIA_Crude_WTI'!A180
value = 36814
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B180():
# 'Input_EIA_Crude_WTI'!B180
value = 33.11
@register(Input_EIA_Crude_WTI)
class A181():
# 'Input_EIA_Crude_WTI'!A181
value = 36845
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B181():
# 'Input_EIA_Crude_WTI'!B181
value = 34.42
@register(Input_EIA_Crude_WTI)
class A182():
# 'Input_EIA_Crude_WTI'!A182
value = 36875
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B182():
# 'Input_EIA_Crude_WTI'!B182
value = 28.44
@register(Input_EIA_Crude_WTI)
class A183():
# 'Input_EIA_Crude_WTI'!A183
value = 36906
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B183():
# 'Input_EIA_Crude_WTI'!B183
value = 29.59
@register(Input_EIA_Crude_WTI)
class A184():
# 'Input_EIA_Crude_WTI'!A184
value = 36937
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B184():
# 'Input_EIA_Crude_WTI'!B184
value = 29.61
@register(Input_EIA_Crude_WTI)
class A185():
# 'Input_EIA_Crude_WTI'!A185
value = 36965
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B185():
# 'Input_EIA_Crude_WTI'!B185
value = 27.25
@register(Input_EIA_Crude_WTI)
class A186():
# 'Input_EIA_Crude_WTI'!A186
value = 36996
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B186():
# 'Input_EIA_Crude_WTI'!B186
value = 27.49
@register(Input_EIA_Crude_WTI)
class A187():
# 'Input_EIA_Crude_WTI'!A187
value = 37026
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B187():
# 'Input_EIA_Crude_WTI'!B187
value = 28.63
@register(Input_EIA_Crude_WTI)
class A188():
# 'Input_EIA_Crude_WTI'!A188
value = 37057
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B188():
# 'Input_EIA_Crude_WTI'!B188
value = 27.6
@register(Input_EIA_Crude_WTI)
class A189():
# 'Input_EIA_Crude_WTI'!A189
value = 37087
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B189():
# 'Input_EIA_Crude_WTI'!B189
value = 26.43
@register(Input_EIA_Crude_WTI)
class A190():
# 'Input_EIA_Crude_WTI'!A190
value = 37118
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B190():
# 'Input_EIA_Crude_WTI'!B190
value = 27.37
@register(Input_EIA_Crude_WTI)
class A191():
# 'Input_EIA_Crude_WTI'!A191
value = 37149
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B191():
# 'Input_EIA_Crude_WTI'!B191
value = 26.2
@register(Input_EIA_Crude_WTI)
class A192():
# 'Input_EIA_Crude_WTI'!A192
value = 37179
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B192():
# 'Input_EIA_Crude_WTI'!B192
value = 22.17
@register(Input_EIA_Crude_WTI)
class A193():
# 'Input_EIA_Crude_WTI'!A193
value = 37210
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B193():
# 'Input_EIA_Crude_WTI'!B193
value = 19.64
@register(Input_EIA_Crude_WTI)
class A194():
# 'Input_EIA_Crude_WTI'!A194
value = 37240
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B194():
# 'Input_EIA_Crude_WTI'!B194
value = 19.39
@register(Input_EIA_Crude_WTI)
class A195():
# 'Input_EIA_Crude_WTI'!A195
value = 37271
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B195():
# 'Input_EIA_Crude_WTI'!B195
value = 19.72
@register(Input_EIA_Crude_WTI)
class A196():
# 'Input_EIA_Crude_WTI'!A196
value = 37302
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B196():
# 'Input_EIA_Crude_WTI'!B196
value = 20.72
@register(Input_EIA_Crude_WTI)
class A197():
# 'Input_EIA_Crude_WTI'!A197
value = 37330
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B197():
# 'Input_EIA_Crude_WTI'!B197
value = 24.53
@register(Input_EIA_Crude_WTI)
class A198():
# 'Input_EIA_Crude_WTI'!A198
value = 37361
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B198():
# 'Input_EIA_Crude_WTI'!B198
value = 26.18
@register(Input_EIA_Crude_WTI)
class A199():
# 'Input_EIA_Crude_WTI'!A199
value = 37391
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B199():
# 'Input_EIA_Crude_WTI'!B199
value = 27.04
@register(Input_EIA_Crude_WTI)
class A200():
# 'Input_EIA_Crude_WTI'!A200
value = 37422
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B200():
# 'Input_EIA_Crude_WTI'!B200
value = 25.52
@register(Input_EIA_Crude_WTI)
class A201():
# 'Input_EIA_Crude_WTI'!A201
value = 37452
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B201():
# 'Input_EIA_Crude_WTI'!B201
value = 26.97
@register(Input_EIA_Crude_WTI)
class A202():
# 'Input_EIA_Crude_WTI'!A202
value = 37483
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B202():
# 'Input_EIA_Crude_WTI'!B202
value = 28.39
@register(Input_EIA_Crude_WTI)
class A203():
# 'Input_EIA_Crude_WTI'!A203
value = 37514
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B203():
# 'Input_EIA_Crude_WTI'!B203
value = 29.66
@register(Input_EIA_Crude_WTI)
class A204():
# 'Input_EIA_Crude_WTI'!A204
value = 37544
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B204():
# 'Input_EIA_Crude_WTI'!B204
value = 28.84
@register(Input_EIA_Crude_WTI)
class A205():
# 'Input_EIA_Crude_WTI'!A205
value = 37575
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B205():
# 'Input_EIA_Crude_WTI'!B205
value = 26.35
@register(Input_EIA_Crude_WTI)
class A206():
# 'Input_EIA_Crude_WTI'!A206
value = 37605
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B206():
# 'Input_EIA_Crude_WTI'!B206
value = 29.46
@register(Input_EIA_Crude_WTI)
class A207():
# 'Input_EIA_Crude_WTI'!A207
value = 37636
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B207():
# 'Input_EIA_Crude_WTI'!B207
value = 32.95
@register(Input_EIA_Crude_WTI)
class A208():
# 'Input_EIA_Crude_WTI'!A208
value = 37667
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B208():
# 'Input_EIA_Crude_WTI'!B208
value = 35.83
@register(Input_EIA_Crude_WTI)
class A209():
# 'Input_EIA_Crude_WTI'!A209
value = 37695
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B209():
# 'Input_EIA_Crude_WTI'!B209
value = 33.51
@register(Input_EIA_Crude_WTI)
class A210():
# 'Input_EIA_Crude_WTI'!A210
value = 37726
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B210():
# 'Input_EIA_Crude_WTI'!B210
value = 28.17
@register(Input_EIA_Crude_WTI)
class A211():
# 'Input_EIA_Crude_WTI'!A211
value = 37756
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B211():
# 'Input_EIA_Crude_WTI'!B211
value = 28.11
@register(Input_EIA_Crude_WTI)
class A212():
# 'Input_EIA_Crude_WTI'!A212
value = 37787
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B212():
# 'Input_EIA_Crude_WTI'!B212
value = 30.66
@register(Input_EIA_Crude_WTI)
class A213():
# 'Input_EIA_Crude_WTI'!A213
value = 37817
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B213():
# 'Input_EIA_Crude_WTI'!B213
value = 30.76
@register(Input_EIA_Crude_WTI)
class A214():
# 'Input_EIA_Crude_WTI'!A214
value = 37848
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B214():
# 'Input_EIA_Crude_WTI'!B214
value = 31.57
@register(Input_EIA_Crude_WTI)
class A215():
# 'Input_EIA_Crude_WTI'!A215
value = 37879
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B215():
# 'Input_EIA_Crude_WTI'!B215
value = 28.31
@register(Input_EIA_Crude_WTI)
class A216():
# 'Input_EIA_Crude_WTI'!A216
value = 37909
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B216():
# 'Input_EIA_Crude_WTI'!B216
value = 30.34
@register(Input_EIA_Crude_WTI)
class A217():
# 'Input_EIA_Crude_WTI'!A217
value = 37940
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B217():
# 'Input_EIA_Crude_WTI'!B217
value = 31.11
@register(Input_EIA_Crude_WTI)
class A218():
# 'Input_EIA_Crude_WTI'!A218
value = 37970
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B218():
# 'Input_EIA_Crude_WTI'!B218
value = 32.13
@register(Input_EIA_Crude_WTI)
class A219():
# 'Input_EIA_Crude_WTI'!A219
value = 38001
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B219():
# 'Input_EIA_Crude_WTI'!B219
value = 34.31
@register(Input_EIA_Crude_WTI)
class A220():
# 'Input_EIA_Crude_WTI'!A220
value = 38032
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B220():
# 'Input_EIA_Crude_WTI'!B220
value = 34.69
@register(Input_EIA_Crude_WTI)
class A221():
# 'Input_EIA_Crude_WTI'!A221
value = 38061
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B221():
# 'Input_EIA_Crude_WTI'!B221
value = 36.74
@register(Input_EIA_Crude_WTI)
class A222():
# 'Input_EIA_Crude_WTI'!A222
value = 38092
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B222():
# 'Input_EIA_Crude_WTI'!B222
value = 36.75
@register(Input_EIA_Crude_WTI)
class A223():
# 'Input_EIA_Crude_WTI'!A223
value = 38122
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B223():
# 'Input_EIA_Crude_WTI'!B223
value = 40.28
@register(Input_EIA_Crude_WTI)
class A224():
# 'Input_EIA_Crude_WTI'!A224
value = 38153
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B224():
# 'Input_EIA_Crude_WTI'!B224
value = 38.03
@register(Input_EIA_Crude_WTI)
class A225():
# 'Input_EIA_Crude_WTI'!A225
value = 38183
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B225():
# 'Input_EIA_Crude_WTI'!B225
value = 40.78
@register(Input_EIA_Crude_WTI)
class A226():
# 'Input_EIA_Crude_WTI'!A226
value = 38214
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B226():
# 'Input_EIA_Crude_WTI'!B226
value = 44.9
@register(Input_EIA_Crude_WTI)
class A227():
# 'Input_EIA_Crude_WTI'!A227
value = 38245
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B227():
# 'Input_EIA_Crude_WTI'!B227
value = 45.94
@register(Input_EIA_Crude_WTI)
class A228():
# 'Input_EIA_Crude_WTI'!A228
value = 38275
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B228():
# 'Input_EIA_Crude_WTI'!B228
value = 53.28
@register(Input_EIA_Crude_WTI)
class A229():
# 'Input_EIA_Crude_WTI'!A229
value = 38306
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B229():
# 'Input_EIA_Crude_WTI'!B229
value = 48.47
@register(Input_EIA_Crude_WTI)
class A230():
# 'Input_EIA_Crude_WTI'!A230
value = 38336
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B230():
# 'Input_EIA_Crude_WTI'!B230
value = 43.15
@register(Input_EIA_Crude_WTI)
class A231():
# 'Input_EIA_Crude_WTI'!A231
value = 38367
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B231():
# 'Input_EIA_Crude_WTI'!B231
value = 46.84
@register(Input_EIA_Crude_WTI)
class A232():
# 'Input_EIA_Crude_WTI'!A232
value = 38398
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B232():
# 'Input_EIA_Crude_WTI'!B232
value = 48.15
@register(Input_EIA_Crude_WTI)
class A233():
# 'Input_EIA_Crude_WTI'!A233
value = 38426
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B233():
# 'Input_EIA_Crude_WTI'!B233
value = 54.19
@register(Input_EIA_Crude_WTI)
class A234():
# 'Input_EIA_Crude_WTI'!A234
value = 38457
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B234():
# 'Input_EIA_Crude_WTI'!B234
value = 52.98
@register(Input_EIA_Crude_WTI)
class A235():
# 'Input_EIA_Crude_WTI'!A235
value = 38487
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B235():
# 'Input_EIA_Crude_WTI'!B235
value = 49.83
@register(Input_EIA_Crude_WTI)
class A236():
# 'Input_EIA_Crude_WTI'!A236
value = 38518
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B236():
# 'Input_EIA_Crude_WTI'!B236
value = 56.35
@register(Input_EIA_Crude_WTI)
class A237():
# 'Input_EIA_Crude_WTI'!A237
value = 38548
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B237():
# 'Input_EIA_Crude_WTI'!B237
value = 59
@register(Input_EIA_Crude_WTI)
class A238():
# 'Input_EIA_Crude_WTI'!A238
value = 38579
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B238():
# 'Input_EIA_Crude_WTI'!B238
value = 64.99
@register(Input_EIA_Crude_WTI)
class A239():
# 'Input_EIA_Crude_WTI'!A239
value = 38610
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B239():
# 'Input_EIA_Crude_WTI'!B239
value = 65.59
@register(Input_EIA_Crude_WTI)
class A240():
# 'Input_EIA_Crude_WTI'!A240
value = 38640
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B240():
# 'Input_EIA_Crude_WTI'!B240
value = 62.26
@register(Input_EIA_Crude_WTI)
class A241():
# 'Input_EIA_Crude_WTI'!A241
value = 38671
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B241():
# 'Input_EIA_Crude_WTI'!B241
value = 58.32
@register(Input_EIA_Crude_WTI)
class A242():
# 'Input_EIA_Crude_WTI'!A242
value = 38701
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B242():
# 'Input_EIA_Crude_WTI'!B242
value = 59.41
@register(Input_EIA_Crude_WTI)
class A243():
# 'Input_EIA_Crude_WTI'!A243
value = 38732
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B243():
# 'Input_EIA_Crude_WTI'!B243
value = 65.49
@register(Input_EIA_Crude_WTI)
class A244():
# 'Input_EIA_Crude_WTI'!A244
value = 38763
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B244():
# 'Input_EIA_Crude_WTI'!B244
value = 61.63
@register(Input_EIA_Crude_WTI)
class A245():
# 'Input_EIA_Crude_WTI'!A245
value = 38791
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B245():
# 'Input_EIA_Crude_WTI'!B245
value = 62.69
@register(Input_EIA_Crude_WTI)
class A246():
# 'Input_EIA_Crude_WTI'!A246
value = 38822
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B246():
# 'Input_EIA_Crude_WTI'!B246
value = 69.44
@register(Input_EIA_Crude_WTI)
class A247():
# 'Input_EIA_Crude_WTI'!A247
value = 38852
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B247():
# 'Input_EIA_Crude_WTI'!B247
value = 70.84
@register(Input_EIA_Crude_WTI)
class A248():
# 'Input_EIA_Crude_WTI'!A248
value = 38883
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B248():
# 'Input_EIA_Crude_WTI'!B248
value = 70.95
@register(Input_EIA_Crude_WTI)
class A249():
# 'Input_EIA_Crude_WTI'!A249
value = 38913
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B249():
# 'Input_EIA_Crude_WTI'!B249
value = 74.41
@register(Input_EIA_Crude_WTI)
class A250():
# 'Input_EIA_Crude_WTI'!A250
value = 38944
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B250():
# 'Input_EIA_Crude_WTI'!B250
value = 73.04
@register(Input_EIA_Crude_WTI)
class A251():
# 'Input_EIA_Crude_WTI'!A251
value = 38975
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B251():
# 'Input_EIA_Crude_WTI'!B251
value = 63.8
@register(Input_EIA_Crude_WTI)
class A252():
# 'Input_EIA_Crude_WTI'!A252
value = 39005
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B252():
# 'Input_EIA_Crude_WTI'!B252
value = 58.89
@register(Input_EIA_Crude_WTI)
class A253():
# 'Input_EIA_Crude_WTI'!A253
value = 39036
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B253():
# 'Input_EIA_Crude_WTI'!B253
value = 59.08
@register(Input_EIA_Crude_WTI)
class A254():
# 'Input_EIA_Crude_WTI'!A254
value = 39066
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B254():
# 'Input_EIA_Crude_WTI'!B254
value = 61.96
@register(Input_EIA_Crude_WTI)
class A255():
# 'Input_EIA_Crude_WTI'!A255
value = 39097
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B255():
# 'Input_EIA_Crude_WTI'!B255
value = 54.51
@register(Input_EIA_Crude_WTI)
class A256():
# 'Input_EIA_Crude_WTI'!A256
value = 39128
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B256():
# 'Input_EIA_Crude_WTI'!B256
value = 59.28
@register(Input_EIA_Crude_WTI)
class A257():
# 'Input_EIA_Crude_WTI'!A257
value = 39156
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B257():
# 'Input_EIA_Crude_WTI'!B257
value = 60.44
@register(Input_EIA_Crude_WTI)
class A258():
# 'Input_EIA_Crude_WTI'!A258
value = 39187
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B258():
# 'Input_EIA_Crude_WTI'!B258
value = 63.98
@register(Input_EIA_Crude_WTI)
class A259():
# 'Input_EIA_Crude_WTI'!A259
value = 39217
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B259():
# 'Input_EIA_Crude_WTI'!B259
value = 63.46
@register(Input_EIA_Crude_WTI)
class A260():
# 'Input_EIA_Crude_WTI'!A260
value = 39248
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B260():
# 'Input_EIA_Crude_WTI'!B260
value = 67.49
@register(Input_EIA_Crude_WTI)
class A261():
# 'Input_EIA_Crude_WTI'!A261
value = 39278
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B261():
# 'Input_EIA_Crude_WTI'!B261
value = 74.12
@register(Input_EIA_Crude_WTI)
class A262():
# 'Input_EIA_Crude_WTI'!A262
value = 39309
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B262():
# 'Input_EIA_Crude_WTI'!B262
value = 72.36
@register(Input_EIA_Crude_WTI)
class A263():
# 'Input_EIA_Crude_WTI'!A263
value = 39340
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B263():
# 'Input_EIA_Crude_WTI'!B263
value = 79.92
@register(Input_EIA_Crude_WTI)
class A264():
# 'Input_EIA_Crude_WTI'!A264
value = 39370
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B264():
# 'Input_EIA_Crude_WTI'!B264
value = 85.8
@register(Input_EIA_Crude_WTI)
class A265():
# 'Input_EIA_Crude_WTI'!A265
value = 39401
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B265():
# 'Input_EIA_Crude_WTI'!B265
value = 94.77
@register(Input_EIA_Crude_WTI)
class A266():
# 'Input_EIA_Crude_WTI'!A266
value = 39431
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B266():
# 'Input_EIA_Crude_WTI'!B266
value = 91.69
@register(Input_EIA_Crude_WTI)
class A267():
# 'Input_EIA_Crude_WTI'!A267
value = 39462
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B267():
# 'Input_EIA_Crude_WTI'!B267
value = 92.97
@register(Input_EIA_Crude_WTI)
class A268():
# 'Input_EIA_Crude_WTI'!A268
value = 39493
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B268():
# 'Input_EIA_Crude_WTI'!B268
value = 95.39
@register(Input_EIA_Crude_WTI)
class A269():
# 'Input_EIA_Crude_WTI'!A269
value = 39522
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B269():
# 'Input_EIA_Crude_WTI'!B269
value = 105.45
@register(Input_EIA_Crude_WTI)
class A270():
# 'Input_EIA_Crude_WTI'!A270
value = 39553
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B270():
# 'Input_EIA_Crude_WTI'!B270
value = 112.58
@register(Input_EIA_Crude_WTI)
class A271():
# 'Input_EIA_Crude_WTI'!A271
value = 39583
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B271():
# 'Input_EIA_Crude_WTI'!B271
value = 125.4
@register(Input_EIA_Crude_WTI)
class A272():
# 'Input_EIA_Crude_WTI'!A272
value = 39614
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B272():
# 'Input_EIA_Crude_WTI'!B272
value = 133.88
@register(Input_EIA_Crude_WTI)
class A273():
# 'Input_EIA_Crude_WTI'!A273
value = 39644
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B273():
# 'Input_EIA_Crude_WTI'!B273
value = 133.37
@register(Input_EIA_Crude_WTI)
class A274():
# 'Input_EIA_Crude_WTI'!A274
value = 39675
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B274():
# 'Input_EIA_Crude_WTI'!B274
value = 116.67
@register(Input_EIA_Crude_WTI)
class A275():
# 'Input_EIA_Crude_WTI'!A275
value = 39706
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B275():
# 'Input_EIA_Crude_WTI'!B275
value = 104.11
@register(Input_EIA_Crude_WTI)
class A276():
# 'Input_EIA_Crude_WTI'!A276
value = 39736
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B276():
# 'Input_EIA_Crude_WTI'!B276
value = 76.61
@register(Input_EIA_Crude_WTI)
class A277():
# 'Input_EIA_Crude_WTI'!A277
value = 39767
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B277():
# 'Input_EIA_Crude_WTI'!B277
value = 57.31
@register(Input_EIA_Crude_WTI)
class A278():
# 'Input_EIA_Crude_WTI'!A278
value = 39797
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B278():
# 'Input_EIA_Crude_WTI'!B278
value = 41.12
@register(Input_EIA_Crude_WTI)
class A279():
# 'Input_EIA_Crude_WTI'!A279
value = 39828
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B279():
# 'Input_EIA_Crude_WTI'!B279
value = 41.71
@register(Input_EIA_Crude_WTI)
class A280():
# 'Input_EIA_Crude_WTI'!A280
value = 39859
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B280():
# 'Input_EIA_Crude_WTI'!B280
value = 39.09
@register(Input_EIA_Crude_WTI)
class A281():
# 'Input_EIA_Crude_WTI'!A281
value = 39887
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B281():
# 'Input_EIA_Crude_WTI'!B281
value = 47.94
@register(Input_EIA_Crude_WTI)
class A282():
# 'Input_EIA_Crude_WTI'!A282
value = 39918
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B282():
# 'Input_EIA_Crude_WTI'!B282
value = 49.65
@register(Input_EIA_Crude_WTI)
class A283():
# 'Input_EIA_Crude_WTI'!A283
value = 39948
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B283():
# 'Input_EIA_Crude_WTI'!B283
value = 59.03
@register(Input_EIA_Crude_WTI)
class A284():
# 'Input_EIA_Crude_WTI'!A284
value = 39979
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B284():
# 'Input_EIA_Crude_WTI'!B284
value = 69.64
@register(Input_EIA_Crude_WTI)
class A285():
# 'Input_EIA_Crude_WTI'!A285
value = 40009
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B285():
# 'Input_EIA_Crude_WTI'!B285
value = 64.15
@register(Input_EIA_Crude_WTI)
class A286():
# 'Input_EIA_Crude_WTI'!A286
value = 40040
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B286():
# 'Input_EIA_Crude_WTI'!B286
value = 71.05
@register(Input_EIA_Crude_WTI)
class A287():
# 'Input_EIA_Crude_WTI'!A287
value = 40071
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B287():
# 'Input_EIA_Crude_WTI'!B287
value = 69.41
@register(Input_EIA_Crude_WTI)
class A288():
# 'Input_EIA_Crude_WTI'!A288
value = 40101
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B288():
# 'Input_EIA_Crude_WTI'!B288
value = 75.72
@register(Input_EIA_Crude_WTI)
class A289():
# 'Input_EIA_Crude_WTI'!A289
value = 40132
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B289():
# 'Input_EIA_Crude_WTI'!B289
value = 77.99
@register(Input_EIA_Crude_WTI)
class A290():
# 'Input_EIA_Crude_WTI'!A290
value = 40162
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B290():
# 'Input_EIA_Crude_WTI'!B290
value = 74.47
@register(Input_EIA_Crude_WTI)
class A291():
# 'Input_EIA_Crude_WTI'!A291
value = 40193
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B291():
# 'Input_EIA_Crude_WTI'!B291
value = 78.33
@register(Input_EIA_Crude_WTI)
class A292():
# 'Input_EIA_Crude_WTI'!A292
value = 40224
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B292():
# 'Input_EIA_Crude_WTI'!B292
value = 76.39
@register(Input_EIA_Crude_WTI)
class A293():
# 'Input_EIA_Crude_WTI'!A293
value = 40252
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B293():
# 'Input_EIA_Crude_WTI'!B293
value = 81.2
@register(Input_EIA_Crude_WTI)
class A294():
# 'Input_EIA_Crude_WTI'!A294
value = 40283
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B294():
# 'Input_EIA_Crude_WTI'!B294
value = 84.29
@register(Input_EIA_Crude_WTI)
class A295():
# 'Input_EIA_Crude_WTI'!A295
value = 40313
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B295():
# 'Input_EIA_Crude_WTI'!B295
value = 73.74
@register(Input_EIA_Crude_WTI)
class A296():
# 'Input_EIA_Crude_WTI'!A296
value = 40344
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B296():
# 'Input_EIA_Crude_WTI'!B296
value = 75.34
@register(Input_EIA_Crude_WTI)
class A297():
# 'Input_EIA_Crude_WTI'!A297
value = 40374
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B297():
# 'Input_EIA_Crude_WTI'!B297
value = 76.32
@register(Input_EIA_Crude_WTI)
class A298():
# 'Input_EIA_Crude_WTI'!A298
value = 40405
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B298():
# 'Input_EIA_Crude_WTI'!B298
value = 76.6
@register(Input_EIA_Crude_WTI)
class A299():
# 'Input_EIA_Crude_WTI'!A299
value = 40436
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B299():
# 'Input_EIA_Crude_WTI'!B299
value = 75.24
@register(Input_EIA_Crude_WTI)
class A300():
# 'Input_EIA_Crude_WTI'!A300
value = 40466
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B300():
# 'Input_EIA_Crude_WTI'!B300
value = 81.89
@register(Input_EIA_Crude_WTI)
class A301():
# 'Input_EIA_Crude_WTI'!A301
value = 40497
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B301():
# 'Input_EIA_Crude_WTI'!B301
value = 84.25
@register(Input_EIA_Crude_WTI)
class A302():
# 'Input_EIA_Crude_WTI'!A302
value = 40527
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B302():
# 'Input_EIA_Crude_WTI'!B302
value = 89.15
@register(Input_EIA_Crude_WTI)
class A303():
# 'Input_EIA_Crude_WTI'!A303
value = 40558
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B303():
# 'Input_EIA_Crude_WTI'!B303
value = 89.17
@register(Input_EIA_Crude_WTI)
class A304():
# 'Input_EIA_Crude_WTI'!A304
value = 40589
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B304():
# 'Input_EIA_Crude_WTI'!B304
value = 88.58
@register(Input_EIA_Crude_WTI)
class A305():
# 'Input_EIA_Crude_WTI'!A305
value = 40617
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B305():
# 'Input_EIA_Crude_WTI'!B305
value = 102.86
@register(Input_EIA_Crude_WTI)
class A306():
# 'Input_EIA_Crude_WTI'!A306
value = 40648
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B306():
# 'Input_EIA_Crude_WTI'!B306
value = 109.53
@register(Input_EIA_Crude_WTI)
class A307():
# 'Input_EIA_Crude_WTI'!A307
value = 40678
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B307():
# 'Input_EIA_Crude_WTI'!B307
value = 100.9
@register(Input_EIA_Crude_WTI)
class A308():
# 'Input_EIA_Crude_WTI'!A308
value = 40709
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B308():
# 'Input_EIA_Crude_WTI'!B308
value = 96.26
@register(Input_EIA_Crude_WTI)
class A309():
# 'Input_EIA_Crude_WTI'!A309
value = 40739
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B309():
# 'Input_EIA_Crude_WTI'!B309
value = 97.3
@register(Input_EIA_Crude_WTI)
class A310():
# 'Input_EIA_Crude_WTI'!A310
value = 40770
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B310():
# 'Input_EIA_Crude_WTI'!B310
value = 86.33
@register(Input_EIA_Crude_WTI)
class A311():
# 'Input_EIA_Crude_WTI'!A311
value = 40801
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B311():
# 'Input_EIA_Crude_WTI'!B311
value = 85.52
@register(Input_EIA_Crude_WTI)
class A312():
# 'Input_EIA_Crude_WTI'!A312
value = 40831
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B312():
# 'Input_EIA_Crude_WTI'!B312
value = 86.32
@register(Input_EIA_Crude_WTI)
class A313():
# 'Input_EIA_Crude_WTI'!A313
value = 40862
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B313():
# 'Input_EIA_Crude_WTI'!B313
value = 97.16
@register(Input_EIA_Crude_WTI)
class A314():
# 'Input_EIA_Crude_WTI'!A314
value = 40892
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B314():
# 'Input_EIA_Crude_WTI'!B314
value = 98.56
@register(Input_EIA_Crude_WTI)
class A315():
# 'Input_EIA_Crude_WTI'!A315
value = 40923
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B315():
# 'Input_EIA_Crude_WTI'!B315
value = 100.27
@register(Input_EIA_Crude_WTI)
class A316():
# 'Input_EIA_Crude_WTI'!A316
value = 40954
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B316():
# 'Input_EIA_Crude_WTI'!B316
value = 102.2
@register(Input_EIA_Crude_WTI)
class A317():
# 'Input_EIA_Crude_WTI'!A317
value = 40983
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B317():
# 'Input_EIA_Crude_WTI'!B317
value = 106.16
@register(Input_EIA_Crude_WTI)
class A318():
# 'Input_EIA_Crude_WTI'!A318
value = 41014
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B318():
# 'Input_EIA_Crude_WTI'!B318
value = 103.32
@register(Input_EIA_Crude_WTI)
class A319():
# 'Input_EIA_Crude_WTI'!A319
value = 41044
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B319():
# 'Input_EIA_Crude_WTI'!B319
value = 94.66
@register(Input_EIA_Crude_WTI)
class A320():
# 'Input_EIA_Crude_WTI'!A320
value = 41075
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B320():
# 'Input_EIA_Crude_WTI'!B320
value = 82.3
@register(Input_EIA_Crude_WTI)
class A321():
# 'Input_EIA_Crude_WTI'!A321
value = 41105
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B321():
# 'Input_EIA_Crude_WTI'!B321
value = 87.9
@register(Input_EIA_Crude_WTI)
class A322():
# 'Input_EIA_Crude_WTI'!A322
value = 41136
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B322():
# 'Input_EIA_Crude_WTI'!B322
value = 94.13
@register(Input_EIA_Crude_WTI)
class A323():
# 'Input_EIA_Crude_WTI'!A323
value = 41167
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B323():
# 'Input_EIA_Crude_WTI'!B323
value = 94.51
@register(Input_EIA_Crude_WTI)
class A324():
# 'Input_EIA_Crude_WTI'!A324
value = 41197
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B324():
# 'Input_EIA_Crude_WTI'!B324
value = 89.49
@register(Input_EIA_Crude_WTI)
class A325():
# 'Input_EIA_Crude_WTI'!A325
value = 41228
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B325():
# 'Input_EIA_Crude_WTI'!B325
value = 86.53
@register(Input_EIA_Crude_WTI)
class A326():
# 'Input_EIA_Crude_WTI'!A326
value = 41258
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B326():
# 'Input_EIA_Crude_WTI'!B326
value = 87.86
@register(Input_EIA_Crude_WTI)
class A327():
# 'Input_EIA_Crude_WTI'!A327
value = 41289
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B327():
# 'Input_EIA_Crude_WTI'!B327
value = 94.76
@register(Input_EIA_Crude_WTI)
class A328():
# 'Input_EIA_Crude_WTI'!A328
value = 41320
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B328():
# 'Input_EIA_Crude_WTI'!B328
value = 95.31
@register(Input_EIA_Crude_WTI)
class A329():
# 'Input_EIA_Crude_WTI'!A329
value = 41348
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B329():
# 'Input_EIA_Crude_WTI'!B329
value = 92.94
@register(Input_EIA_Crude_WTI)
class A330():
# 'Input_EIA_Crude_WTI'!A330
value = 41379
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B330():
# 'Input_EIA_Crude_WTI'!B330
value = 92.02
@register(Input_EIA_Crude_WTI)
class A331():
# 'Input_EIA_Crude_WTI'!A331
value = 41409
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B331():
# 'Input_EIA_Crude_WTI'!B331
value = 94.51
@register(Input_EIA_Crude_WTI)
class A332():
# 'Input_EIA_Crude_WTI'!A332
value = 41440
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B332():
# 'Input_EIA_Crude_WTI'!B332
value = 95.77
@register(Input_EIA_Crude_WTI)
class A333():
# 'Input_EIA_Crude_WTI'!A333
value = 41470
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B333():
# 'Input_EIA_Crude_WTI'!B333
value = 104.67
@register(Input_EIA_Crude_WTI)
class A334():
# 'Input_EIA_Crude_WTI'!A334
value = 41501
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B334():
# 'Input_EIA_Crude_WTI'!B334
value = 106.57
@register(Input_EIA_Crude_WTI)
class A335():
# 'Input_EIA_Crude_WTI'!A335
value = 41532
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B335():
# 'Input_EIA_Crude_WTI'!B335
value = 106.29
@register(Input_EIA_Crude_WTI)
class A336():
# 'Input_EIA_Crude_WTI'!A336
value = 41562
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B336():
# 'Input_EIA_Crude_WTI'!B336
value = 100.54
@register(Input_EIA_Crude_WTI)
class A337():
# 'Input_EIA_Crude_WTI'!A337
value = 41593
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B337():
# 'Input_EIA_Crude_WTI'!B337
value = 93.86
@register(Input_EIA_Crude_WTI)
class A338():
# 'Input_EIA_Crude_WTI'!A338
value = 41623
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B338():
# 'Input_EIA_Crude_WTI'!B338
value = 97.63
@register(Input_EIA_Crude_WTI)
class A339():
# 'Input_EIA_Crude_WTI'!A339
value = 41654
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B339():
# 'Input_EIA_Crude_WTI'!B339
value = 94.62
@register(Input_EIA_Crude_WTI)
class A340():
# 'Input_EIA_Crude_WTI'!A340
value = 41685
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B340():
# 'Input_EIA_Crude_WTI'!B340
value = 100.82
@register(Input_EIA_Crude_WTI)
class A341():
# 'Input_EIA_Crude_WTI'!A341
value = 41713
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B341():
# 'Input_EIA_Crude_WTI'!B341
value = 100.8
@register(Input_EIA_Crude_WTI)
class A342():
# 'Input_EIA_Crude_WTI'!A342
value = 41744
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B342():
# 'Input_EIA_Crude_WTI'!B342
value = 102.07
@register(Input_EIA_Crude_WTI)
class A343():
# 'Input_EIA_Crude_WTI'!A343
value = 41774
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B343():
# 'Input_EIA_Crude_WTI'!B343
value = 102.18
@register(Input_EIA_Crude_WTI)
class A344():
# 'Input_EIA_Crude_WTI'!A344
value = 41805
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B344():
# 'Input_EIA_Crude_WTI'!B344
value = 105.79
@register(Input_EIA_Crude_WTI)
class A345():
# 'Input_EIA_Crude_WTI'!A345
value = 41835
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B345():
# 'Input_EIA_Crude_WTI'!B345
value = 103.59
@register(Input_EIA_Crude_WTI)
class A346():
# 'Input_EIA_Crude_WTI'!A346
value = 41866
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B346():
# 'Input_EIA_Crude_WTI'!B346
value = 96.54
@register(Input_EIA_Crude_WTI)
class A347():
# 'Input_EIA_Crude_WTI'!A347
value = 41897
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B347():
# 'Input_EIA_Crude_WTI'!B347
value = 93.21
@register(Input_EIA_Crude_WTI)
class A348():
# 'Input_EIA_Crude_WTI'!A348
value = 41927
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B348():
# 'Input_EIA_Crude_WTI'!B348
value = 84.4
@register(Input_EIA_Crude_WTI)
class A349():
# 'Input_EIA_Crude_WTI'!A349
value = 41958
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B349():
# 'Input_EIA_Crude_WTI'!B349
value = 75.79
@register(Input_EIA_Crude_WTI)
class A350():
# 'Input_EIA_Crude_WTI'!A350
value = 41988
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B350():
# 'Input_EIA_Crude_WTI'!B350
value = 59.29
@register(Input_EIA_Crude_WTI)
class A351():
# 'Input_EIA_Crude_WTI'!A351
value = 42019
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B351():
# 'Input_EIA_Crude_WTI'!B351
value = 47.22
@register(Input_EIA_Crude_WTI)
class A352():
# 'Input_EIA_Crude_WTI'!A352
value = 42050
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B352():
# 'Input_EIA_Crude_WTI'!B352
value = 50.58
@register(Input_EIA_Crude_WTI)
class A353():
# 'Input_EIA_Crude_WTI'!A353
value = 42078
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B353():
# 'Input_EIA_Crude_WTI'!B353
value = 47.82
@register(Input_EIA_Crude_WTI)
class A354():
# 'Input_EIA_Crude_WTI'!A354
value = 42109
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B354():
# 'Input_EIA_Crude_WTI'!B354
value = 54.45
@register(Input_EIA_Crude_WTI)
class A355():
# 'Input_EIA_Crude_WTI'!A355
value = 42139
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B355():
# 'Input_EIA_Crude_WTI'!B355
value = 59.27
@register(Input_EIA_Crude_WTI)
class A356():
# 'Input_EIA_Crude_WTI'!A356
value = 42170
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B356():
# 'Input_EIA_Crude_WTI'!B356
value = 59.82
@register(Input_EIA_Crude_WTI)
class A357():
# 'Input_EIA_Crude_WTI'!A357
value = 42200
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B357():
# 'Input_EIA_Crude_WTI'!B357
value = 50.9
@register(Input_EIA_Crude_WTI)
class A358():
# 'Input_EIA_Crude_WTI'!A358
value = 42231
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B358():
# 'Input_EIA_Crude_WTI'!B358
value = 42.87
@register(Input_EIA_Crude_WTI)
class A359():
# 'Input_EIA_Crude_WTI'!A359
value = 42262
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B359():
# 'Input_EIA_Crude_WTI'!B359
value = 45.48
@register(Input_EIA_Crude_WTI)
class A360():
# 'Input_EIA_Crude_WTI'!A360
value = 42292
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B360():
# 'Input_EIA_Crude_WTI'!B360
value = 46.22
@register(Input_EIA_Crude_WTI)
class A361():
# 'Input_EIA_Crude_WTI'!A361
value = 42323
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B361():
# 'Input_EIA_Crude_WTI'!B361
value = 42.44
@register(Input_EIA_Crude_WTI)
class A362():
# 'Input_EIA_Crude_WTI'!A362
value = 42353
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B362():
# 'Input_EIA_Crude_WTI'!B362
value = 37.19
@register(Input_EIA_Crude_WTI)
class A363():
# 'Input_EIA_Crude_WTI'!A363
value = 42384
isdatetime = True
@register(Input_EIA_Crude_WTI)
class B363():
# 'Input_EIA_Crude_WTI'!B363
value = 31.68
@register(Input_EIA_Crude_WTI)
class A364():
# 'Input_EIA_Crude_WTI'!A364
value = 42415
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A365():
# 'Input_EIA_Crude_WTI'!A365
value = 42444
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A366():
# 'Input_EIA_Crude_WTI'!A366
value = 42475
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A367():
# 'Input_EIA_Crude_WTI'!A367
value = 42505
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A368():
# 'Input_EIA_Crude_WTI'!A368
value = 42536
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A369():
# 'Input_EIA_Crude_WTI'!A369
value = 42566
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A370():
# 'Input_EIA_Crude_WTI'!A370
value = 42597
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A371():
# 'Input_EIA_Crude_WTI'!A371
value = 42628
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A372():
# 'Input_EIA_Crude_WTI'!A372
value = 42658
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A373():
# 'Input_EIA_Crude_WTI'!A373
value = 42689
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A374():
# 'Input_EIA_Crude_WTI'!A374
value = 42719
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A375():
# 'Input_EIA_Crude_WTI'!A375
value = 42750
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A376():
# 'Input_EIA_Crude_WTI'!A376
value = 42781
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A377():
# 'Input_EIA_Crude_WTI'!A377
value = 42809
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A378():
# 'Input_EIA_Crude_WTI'!A378
value = 42840
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A379():
# 'Input_EIA_Crude_WTI'!A379
value = 42870
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A380():
# 'Input_EIA_Crude_WTI'!A380
value = 42901
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A381():
# 'Input_EIA_Crude_WTI'!A381
value = 42931
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A382():
# 'Input_EIA_Crude_WTI'!A382
value = 42962
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A383():
# 'Input_EIA_Crude_WTI'!A383
value = 42993
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A384():
# 'Input_EIA_Crude_WTI'!A384
value = 43023
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A385():
# 'Input_EIA_Crude_WTI'!A385
value = 43054
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A386():
# 'Input_EIA_Crude_WTI'!A386
value = 43084
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A387():
# 'Input_EIA_Crude_WTI'!A387
value = 43115
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A388():
# 'Input_EIA_Crude_WTI'!A388
value = 43146
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A389():
# 'Input_EIA_Crude_WTI'!A389
value = 43174
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A390():
# 'Input_EIA_Crude_WTI'!A390
value = 43205
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A391():
# 'Input_EIA_Crude_WTI'!A391
value = 43235
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A392():
# 'Input_EIA_Crude_WTI'!A392
value = 43266
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A393():
# 'Input_EIA_Crude_WTI'!A393
value = 43296
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A394():
# 'Input_EIA_Crude_WTI'!A394
value = 43327
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A395():
# 'Input_EIA_Crude_WTI'!A395
value = 43358
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A396():
# 'Input_EIA_Crude_WTI'!A396
value = 43388
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A397():
# 'Input_EIA_Crude_WTI'!A397
value = 43419
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A398():
# 'Input_EIA_Crude_WTI'!A398
value = 43449
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A399():
# 'Input_EIA_Crude_WTI'!A399
value = 43480
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A400():
# 'Input_EIA_Crude_WTI'!A400
value = 43511
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A401():
# 'Input_EIA_Crude_WTI'!A401
value = 43539
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A402():
# 'Input_EIA_Crude_WTI'!A402
value = 43570
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A403():
# 'Input_EIA_Crude_WTI'!A403
value = 43600
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A404():
# 'Input_EIA_Crude_WTI'!A404
value = 43631
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A405():
# 'Input_EIA_Crude_WTI'!A405
value = 43661
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A406():
# 'Input_EIA_Crude_WTI'!A406
value = 43692
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A407():
# 'Input_EIA_Crude_WTI'!A407
value = 43723
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A408():
# 'Input_EIA_Crude_WTI'!A408
value = 43753
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A409():
# 'Input_EIA_Crude_WTI'!A409
value = 43784
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A410():
# 'Input_EIA_Crude_WTI'!A410
value = 43814
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A411():
# 'Input_EIA_Crude_WTI'!A411
value = 43845
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A412():
# 'Input_EIA_Crude_WTI'!A412
value = 43876
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A413():
# 'Input_EIA_Crude_WTI'!A413
value = 43905
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A414():
# 'Input_EIA_Crude_WTI'!A414
value = 43936
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A415():
# 'Input_EIA_Crude_WTI'!A415
value = 43966
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A416():
# 'Input_EIA_Crude_WTI'!A416
value = 43997
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A417():
# 'Input_EIA_Crude_WTI'!A417
value = 44027
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A418():
# 'Input_EIA_Crude_WTI'!A418
value = 44058
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A419():
# 'Input_EIA_Crude_WTI'!A419
value = 44089
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A420():
# 'Input_EIA_Crude_WTI'!A420
value = 44119
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A421():
# 'Input_EIA_Crude_WTI'!A421
value = 44150
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A422():
# 'Input_EIA_Crude_WTI'!A422
value = 44180
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A423():
# 'Input_EIA_Crude_WTI'!A423
value = 44211
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A424():
# 'Input_EIA_Crude_WTI'!A424
value = 44242
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A425():
# 'Input_EIA_Crude_WTI'!A425
value = 44270
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A426():
# 'Input_EIA_Crude_WTI'!A426
value = 44301
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A427():
# 'Input_EIA_Crude_WTI'!A427
value = 44331
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A428():
# 'Input_EIA_Crude_WTI'!A428
value = 44362
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A429():
# 'Input_EIA_Crude_WTI'!A429
value = 44392
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A430():
# 'Input_EIA_Crude_WTI'!A430
value = 44423
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A431():
# 'Input_EIA_Crude_WTI'!A431
value = 44454
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A432():
# 'Input_EIA_Crude_WTI'!A432
value = 44484
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A433():
# 'Input_EIA_Crude_WTI'!A433
value = 44515
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A434():
# 'Input_EIA_Crude_WTI'!A434
value = 44545
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A435():
# 'Input_EIA_Crude_WTI'!A435
value = 44576
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A436():
# 'Input_EIA_Crude_WTI'!A436
value = 44607
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A437():
# 'Input_EIA_Crude_WTI'!A437
value = 44635
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A438():
# 'Input_EIA_Crude_WTI'!A438
value = 44666
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A439():
# 'Input_EIA_Crude_WTI'!A439
value = 44696
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A440():
# 'Input_EIA_Crude_WTI'!A440
value = 44727
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A441():
# 'Input_EIA_Crude_WTI'!A441
value = 44757
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A442():
# 'Input_EIA_Crude_WTI'!A442
value = 44788
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A443():
# 'Input_EIA_Crude_WTI'!A443
value = 44819
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A444():
# 'Input_EIA_Crude_WTI'!A444
value = 44849
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A445():
# 'Input_EIA_Crude_WTI'!A445
value = 44880
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A446():
# 'Input_EIA_Crude_WTI'!A446
value = 44910
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A447():
# 'Input_EIA_Crude_WTI'!A447
value = 44941
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A448():
# 'Input_EIA_Crude_WTI'!A448
value = 44972
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A449():
# 'Input_EIA_Crude_WTI'!A449
value = 45000
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A450():
# 'Input_EIA_Crude_WTI'!A450
value = 45031
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A451():
# 'Input_EIA_Crude_WTI'!A451
value = 45061
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A452():
# 'Input_EIA_Crude_WTI'!A452
value = 45092
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A453():
# 'Input_EIA_Crude_WTI'!A453
value = 45122
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A454():
# 'Input_EIA_Crude_WTI'!A454
value = 45153
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A455():
# 'Input_EIA_Crude_WTI'!A455
value = 45184
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A456():
# 'Input_EIA_Crude_WTI'!A456
value = 45214
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A457():
# 'Input_EIA_Crude_WTI'!A457
value = 45245
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A458():
# 'Input_EIA_Crude_WTI'!A458
value = 45275
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A459():
# 'Input_EIA_Crude_WTI'!A459
value = 45306
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A460():
# 'Input_EIA_Crude_WTI'!A460
value = 45337
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A461():
# 'Input_EIA_Crude_WTI'!A461
value = 45366
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A462():
# 'Input_EIA_Crude_WTI'!A462
value = 45397
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A463():
# 'Input_EIA_Crude_WTI'!A463
value = 45427
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A464():
# 'Input_EIA_Crude_WTI'!A464
value = 45458
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A465():
# 'Input_EIA_Crude_WTI'!A465
value = 45488
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A466():
# 'Input_EIA_Crude_WTI'!A466
value = 45519
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A467():
# 'Input_EIA_Crude_WTI'!A467
value = 45550
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A468():
# 'Input_EIA_Crude_WTI'!A468
value = 45580
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A469():
# 'Input_EIA_Crude_WTI'!A469
value = 45611
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A470():
# 'Input_EIA_Crude_WTI'!A470
value = 45641
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A471():
# 'Input_EIA_Crude_WTI'!A471
value = 45672
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A472():
# 'Input_EIA_Crude_WTI'!A472
value = 45703
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A473():
# 'Input_EIA_Crude_WTI'!A473
value = 45731
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A474():
# 'Input_EIA_Crude_WTI'!A474
value = 45762
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A475():
# 'Input_EIA_Crude_WTI'!A475
value = 45792
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A476():
# 'Input_EIA_Crude_WTI'!A476
value = 45823
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A477():
# 'Input_EIA_Crude_WTI'!A477
value = 45853
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A478():
# 'Input_EIA_Crude_WTI'!A478
value = 45884
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A479():
# 'Input_EIA_Crude_WTI'!A479
value = 45915
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A480():
# 'Input_EIA_Crude_WTI'!A480
value = 45945
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A481():
# 'Input_EIA_Crude_WTI'!A481
value = 45976
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A482():
# 'Input_EIA_Crude_WTI'!A482
value = 46006
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A483():
# 'Input_EIA_Crude_WTI'!A483
value = 46037
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A484():
# 'Input_EIA_Crude_WTI'!A484
value = 46068
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A485():
# 'Input_EIA_Crude_WTI'!A485
value = 46096
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A486():
# 'Input_EIA_Crude_WTI'!A486
value = 46127
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A487():
# 'Input_EIA_Crude_WTI'!A487
value = 46157
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A488():
# 'Input_EIA_Crude_WTI'!A488
value = 46188
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A489():
# 'Input_EIA_Crude_WTI'!A489
value = 46218
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A490():
# 'Input_EIA_Crude_WTI'!A490
value = 46249
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A491():
# 'Input_EIA_Crude_WTI'!A491
value = 46280
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A492():
# 'Input_EIA_Crude_WTI'!A492
value = 46310
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A493():
# 'Input_EIA_Crude_WTI'!A493
value = 46341
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A494():
# 'Input_EIA_Crude_WTI'!A494
value = 46371
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A495():
# 'Input_EIA_Crude_WTI'!A495
value = 46402
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A496():
# 'Input_EIA_Crude_WTI'!A496
value = 46433
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A497():
# 'Input_EIA_Crude_WTI'!A497
value = 46461
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A498():
# 'Input_EIA_Crude_WTI'!A498
value = 46492
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A499():
# 'Input_EIA_Crude_WTI'!A499
value = 46522
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A500():
# 'Input_EIA_Crude_WTI'!A500
value = 46553
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A501():
# 'Input_EIA_Crude_WTI'!A501
value = 46583
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A502():
# 'Input_EIA_Crude_WTI'!A502
value = 46614
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A503():
# 'Input_EIA_Crude_WTI'!A503
value = 46645
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A504():
# 'Input_EIA_Crude_WTI'!A504
value = 46675
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A505():
# 'Input_EIA_Crude_WTI'!A505
value = 46706
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A506():
# 'Input_EIA_Crude_WTI'!A506
value = 46736
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A507():
# 'Input_EIA_Crude_WTI'!A507
value = 46767
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A508():
# 'Input_EIA_Crude_WTI'!A508
value = 46798
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A509():
# 'Input_EIA_Crude_WTI'!A509
value = 46827
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A510():
# 'Input_EIA_Crude_WTI'!A510
value = 46858
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A511():
# 'Input_EIA_Crude_WTI'!A511
value = 46888
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A512():
# 'Input_EIA_Crude_WTI'!A512
value = 46919
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A513():
# 'Input_EIA_Crude_WTI'!A513
value = 46949
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A514():
# 'Input_EIA_Crude_WTI'!A514
value = 46980
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A515():
# 'Input_EIA_Crude_WTI'!A515
value = 47011
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A516():
# 'Input_EIA_Crude_WTI'!A516
value = 47041
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A517():
# 'Input_EIA_Crude_WTI'!A517
value = 47072
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A518():
# 'Input_EIA_Crude_WTI'!A518
value = 47102
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A519():
# 'Input_EIA_Crude_WTI'!A519
value = 47133
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A520():
# 'Input_EIA_Crude_WTI'!A520
value = 47164
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A521():
# 'Input_EIA_Crude_WTI'!A521
value = 47192
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A522():
# 'Input_EIA_Crude_WTI'!A522
value = 47223
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A523():
# 'Input_EIA_Crude_WTI'!A523
value = 47253
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A524():
# 'Input_EIA_Crude_WTI'!A524
value = 47284
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A525():
# 'Input_EIA_Crude_WTI'!A525
value = 47314
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A526():
# 'Input_EIA_Crude_WTI'!A526
value = 47345
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A527():
# 'Input_EIA_Crude_WTI'!A527
value = 47376
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A528():
# 'Input_EIA_Crude_WTI'!A528
value = 47406
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A529():
# 'Input_EIA_Crude_WTI'!A529
value = 47437
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A530():
# 'Input_EIA_Crude_WTI'!A530
value = 47467
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A531():
# 'Input_EIA_Crude_WTI'!A531
value = 47498
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A532():
# 'Input_EIA_Crude_WTI'!A532
value = 47529
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A533():
# 'Input_EIA_Crude_WTI'!A533
value = 47557
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A534():
# 'Input_EIA_Crude_WTI'!A534
value = 47588
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A535():
# 'Input_EIA_Crude_WTI'!A535
value = 47618
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A536():
# 'Input_EIA_Crude_WTI'!A536
value = 47649
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A537():
# 'Input_EIA_Crude_WTI'!A537
value = 47679
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A538():
# 'Input_EIA_Crude_WTI'!A538
value = 47710
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A539():
# 'Input_EIA_Crude_WTI'!A539
value = 47741
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A540():
# 'Input_EIA_Crude_WTI'!A540
value = 47771
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A541():
# 'Input_EIA_Crude_WTI'!A541
value = 47802
isdatetime = True
@register(Input_EIA_Crude_WTI)
class A542():
# 'Input_EIA_Crude_WTI'!A542
value = 47832
isdatetime = True | [
"hawaiicleanenergymetrics@gmail.com"
] | hawaiicleanenergymetrics@gmail.com |
a9851d4124db6cbf60e2964a3025c5fbf0291320 | 6bcd30d9fe661c500070bdeed3b0ceb1f543db55 | /server.py | b3d202d2278e08714814b00b7e0666b55bc5ce29 | [
"MIT"
] | permissive | baka-san/imagezmq | b6fa03d629226eabfca2ac29d3de5ccbe806f8d3 | fd9044000f6b43286d6224712a5a73e534fcfcf8 | refs/heads/master | 2020-06-23T21:32:02.040921 | 2019-08-16T08:38:35 | 2019-08-16T08:38:35 | 198,758,669 | 0 | 0 | MIT | 2019-07-25T04:42:32 | 2019-07-25T04:42:32 | null | UTF-8 | Python | false | false | 7,420 | py | import sys
import os
import pathlib
import argparse
import time
import darknet as dn
import numpy as np
import cv2
from PIL import Image, ImageFile
# A few variables
zmq_default = os.path.expanduser('~/') + 'imagezmq'
cwd = os.getcwd()
results_default = cwd + '/results/frames/'
# Parse arguments
ap = argparse.ArgumentParser()
ap.add_argument("--zmq", help="Full path to imagezmq main folder (defalut: ~/imagezmq).", default=zmq_default)
ap.add_argument("--cfg", help="Relative path to cfg file.", required=True)
ap.add_argument("--weights", help="Relative path to weights file.", required=True)
ap.add_argument("--data", help="Relative path to data file.", required=True)
ap.add_argument("--results", help="Relative path to results folder in format dir_1/dir_2/ (default: path_to_current_dir/results/frames/")
ap.add_argument("--save_original_img", help="Save original image without bounding box (default: false).", default=False)
args = ap.parse_args()
# Import imagezmq
zmq_path = args.zmq + '/imagezmq'
sys.path.insert(0, zmq_path) # imagezmq.py /imagezmq
import imagezmq
# Load the darknet
ImageFile.MAXBLOCK = 2**20
image_hub = imagezmq.ImageHub()
total_time = 0
frame_count = 0
dn.set_gpu(0)
net = dn.load_net(bytes(args.cfg, encoding='utf-8'), bytes(args.weights, encoding='utf-8'), 0)
meta = dn.load_meta(bytes(args.data, encoding='utf-8'))
# Set the results path and make it if it doesn't exist
if args.results:
#If the user included a forward slash at the start of the relative path, cut it
if args.results[0] == '/':
args.results = args.results[1:]
# Make the relative path into an absolute path
results_path = cwd + '/' + args.results
# If the user forgot the trailing forward slash cut it
if not args.results[-1:] == '/':
results_path = results_path + '/'
else:
results_path = results_default
os.makedirs(results_path, exist_ok=True)
# We're ready to go!
print('Neural net loaded. Ready for frames.')
def drawBoundingBoxes(detections, image):
# Initialize some variables
result = {
'image': image
}
label = 'Nothing detected'
try:
for detection in detections:
objectClass = detection[0].decode("utf-8")
confidence = detection[1]
label = objectClass + ': ' + str(np.rint(100 * confidence)) + '%'
# x-center, y-center, x-width, y-width
bounds = detection[2]
# Set the bounding coords
x1 = int(bounds[0]) - int(bounds[2]/2)
y1 = int(bounds[1]) - int(bounds[3]/2)
x2 = int(bounds[0]) + int(bounds[2]/2)
y2 = int(bounds[1]) + int(bounds[3]/2)
# Draw the bounding box
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 255), 5)
# Write a label
cv2.putText(image, label, (x1+5, y1+40), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 2)
result = {
'detections': detections,
'image': image,
'caption': '\n<br/>'.join(label)
}
except Exception as e:
print("Unable to draw boxes: ", e)
return result
# Handle the received images
try:
while True: # show streamed images until Ctrl-C
# Receive frame
rpi_name, jpg_buffer = image_hub.recv_jpg()
# Set timer to track FPS
if frame_count > 0:
start_time = time.time()
# Decode the image
image = cv2.imdecode(np.fromstring(jpg_buffer, dtype='uint8'), -1)
# Save original image
if args.save_original_img:
try:
file_path = results_path + 'frame-' + str(frame_count) + '-original.jpg'
print('Filename: ', file_path)
cv2.imwrite(file_path, image)
except Exception as e:
print("Couldn't save file: ", e)
# Convert BGR (OpenCV) to RGB (Yolo)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Save image resolution
if frame_count == 0:
height, width = image.shape[:2]
# Detect objects
detections = dn.detect(net, meta, image, thresh=0.5, hier_thresh=0.5)
print('detections = ', detections)
# Draw bounding box on image
result = drawBoundingBoxes(detections, image)
# Save image
try:
file_path = results_path + 'frame-' + str(frame_count) + '.jpg'
print('Filename: ', file_path)
cv2.imwrite(file_path, result['image'])
except Exception as e:
print("Couldn't save file: ", e)
# Measure processing time
if frame_count > 0:
processing_time = time.time() - start_time
total_time += processing_time
print('Processing time: ', processing_time)
frame_count += 1
# Ask client for another frame
image_hub.send_reply(b'OK')
except KeyboardInterrupt:
pass # Ctrl-C was pressed to end program; FPS stats computed below
except Exception as e:
print('Python error: ', e)
finally:
print('')
print('=========== SUMMARY ===========')
print('Results: ', results_path)
print('Total images: {:,g}'.format(frame_count))
if frame_count == 0:
sys.exit()
print('Stream resolution: {}x{}'.format(width, height))
fps = frame_count/total_time
print('Approximate FPS: ', fps)
sys.exit()
# def drawBoundingBoxes(detections, image):
# try:
# from skimage import io, draw
# import numpy as np
# print("*** "+str(len(detections))+" Results, color coded by confidence ***")
# imcaption = []
# for detection in detections:
# label = detection[0].decode()
# confidence = detection[1]
# pstring = label+": "+str(np.rint(100 * confidence))+"%"
# imcaption.append(pstring)
# print(pstring)
# bounds = detection[2]
# shape = image.shape
# yExtent = int(bounds[3])
# xEntent = int(bounds[2])
# # Coordinates are around the center
# xCoord = int(bounds[0] - bounds[2]/2)
# yCoord = int(bounds[1] - bounds[3]/2)
# boundingBox = [
# [xCoord, yCoord],
# [xCoord, yCoord + yExtent],
# [xCoord + xEntent, yCoord + yExtent],
# [xCoord + xEntent, yCoord]
# ]
# # Wiggle it around to make a 3px border
# rr, cc = draw.polygon_perimeter([x[1] for x in boundingBox], [x[0] for x in boundingBox], shape= shape)
# rr2, cc2 = draw.polygon_perimeter([x[1] + 1 for x in boundingBox], [x[0] for x in boundingBox], shape= shape)
# rr3, cc3 = draw.polygon_perimeter([x[1] - 1 for x in boundingBox], [x[0] for x in boundingBox], shape= shape)
# rr4, cc4 = draw.polygon_perimeter([x[1] for x in boundingBox], [x[0] + 1 for x in boundingBox], shape= shape)
# rr5, cc5 = draw.polygon_perimeter([x[1] for x in boundingBox], [x[0] - 1 for x in boundingBox], shape= shape)
# boxColor = (int(255 * (1 - (confidence ** 2))), int(255 * (confidence ** 2)), 0)
# draw.set_color(image, (rr, cc), boxColor, alpha= 0.8)
# draw.set_color(image, (rr2, cc2), boxColor, alpha= 0.8)
# draw.set_color(image, (rr3, cc3), boxColor, alpha= 0.8)
# draw.set_color(image, (rr4, cc4), boxColor, alpha= 0.8)
# draw.set_color(image, (rr5, cc5), boxColor, alpha= 0.8)
# detections = {
# "detections": detections,
# "image": image,
# "caption": "\n<br/>".join(imcaption)
# }
# except Exception as e:
# print("Unable to draw boxes: "+str(e))
# return detections
| [
"grant.backes@A-NPC-000072.local"
] | grant.backes@A-NPC-000072.local |
8ccf4c9a412f6e1432129a8e3f52406951f701e4 | 3897a641c17c2a8664f4c061d94043f7abdd5ad8 | /formulario/gestion_empresa/models.py | 2d81a1e1324bf8f5d961fc9a5703f13c64e9ae98 | [] | no_license | irvincc/Proyecto1_django | fc900ce2e7b05a10e9685961625ffc10ac2c813c | af180f75ad249bb5aa87a641b80098d772500b45 | refs/heads/master | 2023-07-16T00:54:41.598910 | 2021-09-03T20:39:02 | 2021-09-03T20:39:02 | 402,890,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | from django.db import models
# Create your models here.
class Persona(models.Model):
id=models.AutoField(primary_key=True)
empresa=models.CharField(max_length=50)
departamento=models.CharField(max_length=20)
nombre=models.CharField(max_length=100)
apellido=models.CharField(max_length=100)
fecha_de_nacimineto=models.DateField()
correo=models.EmailField(max_length=200)
telefono=models.CharField(max_length=10)
fecha_ingrso=models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.nombre
| [
"edgar.irvincc@gmail.com"
] | edgar.irvincc@gmail.com |
dff8348d6d40546cebcb1b981709d02569b56dd3 | d47ed20026349e443451b22a71d5e145ba1bf2db | /EdytaZylinskaInformatyka/lab 2.py | 273649c57b26a21fce8a71d1f9dab78e78530275 | [] | no_license | ze39099/ZylinskaEdytaInformatyka- | c066b09e58dcee528554c5d4778a514f9557ddb2 | 304b53dcaf74e76a5ff3913d0a739fb0fef77e2a | refs/heads/main | 2023-06-02T04:21:53.465913 | 2021-06-18T21:50:49 | 2021-06-18T21:50:49 | 365,433,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | # -*- coding: utf-8 -*-
"""
Created on Sun May 9 12:42:54 2021
@author: Edyta
"""
n = 5
silnia = 1
i = 1
print ('silnia = ', silnia)
print ('i = ', i)
while i <= n:
silnia = silnia * i
print ('silnia = ', silnia)
i = i + 1
print ('i = ', i)
print ('Koniec silnia = ', silnia)
| [
"noreply@github.com"
] | noreply@github.com |
2ff0ba4d7c5974ada32ac830e330d2b4e6702ab6 | 57254e4dde5bf8701d937c96a2fccb8e55d3012a | /SPOJ/Some-Solutions/FAST2.py | 7e386aa2c54fc6b27675d26973c7c491bb5209e5 | [] | no_license | congtrung2k1/Algorithms | d5bef7f00b2be701f362ea2e7c173f057a31d713 | 6d8e7b344082315d7fa9ea0915267266e703007c | refs/heads/master | 2020-11-25T01:56:24.963817 | 2020-01-07T19:21:27 | 2020-01-07T19:21:27 | 228,436,434 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | import math
moduler = 1298074214633706835075030044377087
max_n = 500
sum_of_exp = [1] + [None]*max_n
for i in xrange(1, max_n+1):
sum_of_exp[i] = sum_of_exp[i-1]*2+1
if sum_of_exp[i] > moduler:
sum_of_exp[i] %= moduler
T = int(raw_input())
for t in xrange(T):
print sum_of_exp[int(raw_input())]
| [
"42043537+congtrung2k1@users.noreply.github.com"
] | 42043537+congtrung2k1@users.noreply.github.com |
c381e2e6f7f896f8bbba8f23286755f0e30a19a2 | 73c34674dcec6d186224e19e6dd7294ea3408561 | /week8/blast_format.py | 378190e4170d2a6dec57e0290cb7dd85f2a91da3 | [] | no_license | kkchau/bimm185 | 47d9905183762e520f0ddb14f326e397b119de45 | 28b12712d9b04a0bf84b28be5e4bbdbb74621b98 | refs/heads/master | 2020-11-29T14:46:06.642985 | 2017-05-30T19:59:51 | 2017-05-30T19:59:51 | 87,493,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,120 | py | import pymysql
import getpass
# sql table field names
fields = ['qseqid', 'sseqid', 'qlen', 'slen', 'bitscore', 'evalue', 'pident',
'nident', 'length', 'qcovs', 'qstart', 'qend', 'sstart', 'send',
'scov']
# database connection
print("Connecting to bm185s-mysql.ucsd.edu as kkchau; using kkchau_db")
passwd = getpass.getpass("Input password: ")
sqlconnection = pymysql.connect(host='bm185s-mysql.ucsd.edu',
user='kkchau',
password=str(passwd),
db='kkchau_db',
cursorclass=pymysql.cursors.DictCursor)
c = sqlconnection.cursor()
with open('./blast_scratch1.out', 'r') as scratch:
for line in scratch:
record = line.strip().split()
record[0] = record[0].strip().split('.')[0]
record[1] = record[1].strip().split('.')[0]
record[1] = record[1].strip().split('|')[1]
# skip self alignments
if record[0] == record[1]:
continue
record.append(float(record[8]) / float(record[3])) # scov
record = ['\"{}\"'.format(str(x)) for x in record]
insert_command = "INSERT INTO blast_gid1_self({}) VALUES ({});".format(','.join(fields), ','.join(record))
print(insert_command)
c.execute(insert_command)
sqlconnection.commit()
with open('./blast_scratch2.out', 'r') as scratch:
for line in scratch:
record = line.strip().split()
record[0] = record[0].strip().split('.')[0]
record[1] = record[1].strip().split('.')[0]
record[1] = record[1].strip().split('|')[1]
# skip self alignments
if record[0] == record[1]:
continue
record.append(float(record[8]) / float(record[3])) # scov
record = ['\"{}\"'.format(str(x)) for x in record]
insert_command = "INSERT INTO blast_gid2_self({}) VALUES ({});".format(','.join(fields), ','.join(record))
print(insert_command)
c.execute(insert_command)
sqlconnection.commit()
sqlconnection.close()
| [
"kkhaichau@gmail.com"
] | kkhaichau@gmail.com |
9ad86092e385a8f8238bb7bb27ac3740c79a39f7 | 1ecb282756c95d9ae19035761c6e4bb480fdaf26 | /python/lsst/ctrl/stats/records/generic.py | a07b96fbfc651a578c7b2e48c3f7924b5d26cf16 | [] | no_license | provingground-moe/ctrl_stats | 58cba09f95a30007fc5df10d6d8992719b0f5368 | 14790770765b3a167d0d9f318b40e12bbb5df0bb | refs/heads/master | 2020-06-10T20:42:34.260304 | 2017-08-24T21:26:34 | 2017-08-24T21:26:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | #
# LSST Data Management System
# Copyright 2008-2012 LSST Corporation.
#
# This product includes software developed by the
# LSST Project (http://www.lsst.org/).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the LSST License Statement and
# the GNU General Public License along with this program. If not,
# see <http://www.lsstcorp.org/LegalNotices/>.
#
from __future__ import absolute_import
from .record import Record
class Generic(Record):
"""Generic log event
Listed in documention as not used, but here for completeness.
Parameters
----------
year: `str`
the year to tag the job with
lines: list
the strings making up this record
"""
def __init__(self, year, lines):
Record.__init__(self, year, lines)
eventClass = Generic
eventCode = "008"
| [
"srp@ncsa.illinois.edu"
] | srp@ncsa.illinois.edu |
1cbae9af00ea9521607893f9d6d6e51e4fbb082c | 0168da9db55c0213b3bef378e4e5f45a64117028 | /manage.py | 68e12eb8e81287c81d959d7556a5fb7404984bd5 | [] | no_license | EpicDeveloperGuy/CrashBoard | 04dac8776680b4ff877912348e0686c39dec8eff | e451a358cb5033bfc6a5b3c000e1ef23a415aade | refs/heads/main | 2023-08-25T13:11:23.475000 | 2021-11-01T16:35:03 | 2021-11-01T16:35:03 | 420,816,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'crashboard.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"xg54@cornell.edu"
] | xg54@cornell.edu |
17680a092a0687b4a3669fef2c938e8909595f61 | cacb12e343c537dc35224b745daa0af9d9a38cf3 | /setup.py | 9cab4626db7d5e96d08175b8d30c019b4a2f9eb5 | [] | no_license | AlexLexx706/RobotHand | 2881ff41d776ee89c989859ced963d17160816ee | fcc3828dc7c34a65ded95e7b57ee1e7ce6cd1215 | refs/heads/master | 2021-01-01T17:10:27.711414 | 2018-07-25T14:55:38 | 2018-07-25T14:55:38 | 31,978,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | from setuptools import setup, find_packages
setup(
name='robothand',
version='0.1',
author='alexlexx',
author_email='alexlexx1@gmail.com',
packages=find_packages(),
license='GPL',
zip_safe=False,
entry_points={
'console_scripts': [
'configurator = robothand.configurator.widget:main'
],
},
package_data={
'robothand': [
'configurator/*.ui',
'servos_settings/*.ui']
},
)
| [
"alexlexx1@gmail.com"
] | alexlexx1@gmail.com |
aac36e5e97effc021d51bddce00836cf86108ad9 | e1fe1ed4f2ba8ab0146ce7c08d65bc7947150fc8 | /credit11315/pipelines.py | 6e80a0ff0684dd2011f6c21e58ced8a6f581ef7f | [] | no_license | yidun55/credit11315 | 0d88ceef314efa444de58eb5da8939c1acff3abe | b048ec9db036a382287d5faacb9490ccbf50735c | refs/heads/master | 2021-01-20T01:03:30.617914 | 2015-07-31T09:58:24 | 2015-07-31T09:58:24 | 38,853,611 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy import log
import os
os.chdir("/home/dyh/data/credit11315/infoDetail")
class Credit11315Pipeline(object):
def process_item(self, item, spider):
if len(item.keys()) == 1: #存的是content
try:
os.chdir("/home/dyh/data/credit11315/infoDetail")
with open(spider.writeInFile,"a") as f:
f.write(item["content"])
except Exception,e:
log.msg("content pipeline error_info=%s"%e, level=log.ERROR)
else:
for key in item.iterkeys():
try:
os.chdir("/home/dyh/data/credit11315/infoDetail")
with open('detailInfoScrapy_'+key,"a") as f:
f.write(item[key]+"\n")
except Exception,e:
log.msg("DetailInformation(Item) pipeline error_info=%s"%e, level=log.ERROR)
| [
"heshang1203@sina.com"
] | heshang1203@sina.com |
689be72dd1a8ec11ab24d4187e86a076f0a776b9 | 5de4ca71780651d4d6a8f8a4e27ccf0c6468eed7 | /venv/bin/pip3 | aafdfd3929dd6a51da17fdd90276145c93b3cca5 | [] | no_license | PlayerForever/object | 72d8bb813567561bb577a398d7b27024bab3dd70 | 92ca30613f1ae065540d2df93a8d1c1418957f62 | refs/heads/master | 2020-05-16T23:18:57.698838 | 2019-04-25T05:15:17 | 2019-04-25T05:15:17 | 183,360,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | #!/Users/hujia/PycharmProjects/object/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"chyneya@gmail.com"
] | chyneya@gmail.com | |
a8e2eab657b40aabce4eae7e0fdea44b81d773da | a96c4cf554ac046a42652d0d5f55a0ada598d596 | /resid-plot.py | 61e370edb6d3ecb1b3a29532393ce9ec6a5b32ae | [
"MIT"
] | permissive | naveenluke/ccr | a7282ad2d6f7ffe36d8573853d1a84dc3d92ab39 | 2eed5e1674a9e1b1ead4efdb148d6d24e6b4b1d2 | refs/heads/master | 2022-11-17T20:13:38.520072 | 2020-07-20T12:02:30 | 2020-07-20T12:02:30 | 279,649,169 | 0 | 0 | MIT | 2020-07-14T17:24:15 | 2020-07-14T17:21:32 | null | UTF-8 | Python | false | false | 8,436 | py | import toolshed as ts
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
import seaborn as sns
import statsmodels.api as sm
from statsmodels.stats.outliers_influence import OLSInfluence
import scipy.stats as ss
from statsmodels.formula.api import ols
import pandas as pd
from scipy.stats.mstats import hmean
from sklearn import preprocessing
import csv
import sys
csv.field_size_limit(34365000)
import cPickle as pickle
from cyvcf2 import VCF
import utils as u
from collections import defaultdict
X = defaultdict(list)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--cpg", help="cpg added to regression model", action="store_true", default=False)
parser.add_argument("-s", "--synonymous", help="synonymous added to regression model", action="store_true", default=False)
parser.add_argument("-f", "--file", help="regions input file, from exac-regions.py", required=True)
parser.add_argument("-n", "--nosingletons", help="if you do NOT want singletons", action="store_true", default=False)
parser.add_argument("-w", "--varflag", help="if you want separation by variant flags", action="store_true", default=False)
parser.add_argument("-p", "--chromosomes", nargs='*', help="any chromosomes you want to capture explicitly", default=[])
parser.add_argument("-x", "--exclude", nargs='*', help="any chromosomes you want to exclude explicitly", default=['Y'])
parser.add_argument("-q", "--xweighted", action="store_true", help="this adds special weighting to the X chromosome if you want to run the full model", default=False)
parser.add_argument("-r", "--removesyn", action="store_true", help="if you don't want to run the model with synonymous density AND CpG", default=False)
args=parser.parse_args()
cpg=args.cpg
synonymous=args.synonymous
nosingletons=args.nosingletons
rfile=args.file
varflag=args.varflag
chromosomes=args.chromosomes
exclude=args.exclude
xweighted=args.xweighted
removesyn=args.removesyn
gnomad=VCF('data/gnomad-vep-vt.vcf.gz')
kcsq = gnomad["CSQ"]["Description"].split(":")[1].strip(' "').split("|")
ys, genes = [], []
def syn_density(pairs, d, gnomad, kcsq, nosingletons, varflag):
syn=0
prevvar=None
if varflag:
if 'VARTRUE' in d['varflag']: # don't need syn for a 0 bp region, i.e., variant, so give it the lowest possible, 0
return syn
for pair in pairs:
if varflag:
r0=str(int(pair[0])+1); r1=str(int(pair[1])); #in this case, does not include a variant at the end coordinate
else:
r0=str(int(pair[0])+1); r1=str(int(pair[1])-1);
if not varflag:
if int(r0)-int(r1)==1: continue # don't need syn for a region of length 1 (0 bp region), which it would be if a variant was included at the end coordinate
for v in gnomad(d['chrom']+':'+r0+'-'+r1):
if v.INFO['AC']==1 and nosingletons: continue
if prevvar is not None and str(v.start)+str(v.end)+str(v.ALT[0])==prevvar: continue
if not (v.FILTER is None or v.FILTER in ["PASS", "SEGDUP", "LCR"]):
continue
info = v.INFO
try:
as_filter=info['AS_FilterStatus'].split(",")[0]
if as_filter not in ["PASS", "SEGDUP", "LCR"] :
continue
except KeyError:
pass
info = v.INFO
try:
csqs = [dict(zip(kcsq, c.split("|"))) for c in info['CSQ'].split(",")]
except KeyError:
continue
for csq in (c for c in csqs if c['BIOTYPE'] == 'protein_coding'):
if csq['Feature'] == '' or csq['EXON'] == '' or csq['cDNA_position'] == '' or csq['SYMBOL']!=d['gene']: continue #in case non-exonic or not the same gene
if u.issynonymous(csq):
syn+=1; break
prevvar=str(v.start)+str(v.end)+str(v.ALT[0])
return syn
varrow = []
for i, d in enumerate(ts.reader(rfile)):
if chromosomes and d['chrom'] not in chromosomes: continue
if d['chrom'] in exclude: continue
pairs = [x.split("-") for x in d['ranges'].strip().split(",")]
if 'VARTRUE' in d['varflag']:
varrow.append((d['chrom'], str(d['start']), str(d['end']), d['gene'], d['transcript'], d['exon'], d['ranges'], d['varflag'], 0, 0))
continue
row=(d['chrom'], str(d['start']), str(d['end']), d['gene'], d['transcript'], d['exon'], d['ranges'], d['varflag'])
if synonymous:
syn=syn_density(pairs, d, gnomad, kcsq, nosingletons, varflag)
if int(d['n_bases'])>1:
if varflag:
if 'VARTRUE' not in d['varflag']: # code here in case we decided to downweight differently later
d['syn_density']=syn/(float(d['n_bases'])); #+","+str(syn)+"/"+d['n_bases']
else:
d['syn_density']=syn/(float(d['n_bases'])-1); #+","+str(syn)+"/"+d['n_bases']; # -1 because we can't count the end coordinate, which is by default a variant
else:
if varflag:
if 'VARTRUE' not in d['varflag']: # code here in case we decided to downweight differently later
d['syn_density']=0
else:
d['syn_density']=0
X['syn'].append(float(d['syn_density'])) # 1-syn if we want to use as a measure of constraint; syn as a measure of mutability
row = row + ("%.3f" % float(d['syn_density']),)
else:
d['syn_density']="na" # calculating synonymous density is really slow, so if we don't need to, we'd rather not.
row = row + (d['syn_density'],)
if cpg:
if varflag:
if 'VARTRUE' not in d['varflag']: # code here in case we decided to downweight differently later
X['CpG'].append(float(d['cg_content']))
else:
X['CpG'].append(float(d['cg_content']))
row = row + ("%.3f" % float(d['cg_content']),)
genes.append(row)
coverage=[]
for val in d['coverage'].split(","):
if val:
val = float(val)
if varflag:
if 'VARTRUE' not in d['varflag']: # code here in case we decided to downweight differently later
if d['chrom'] == 'X' and xweighted:
val = val*(178817.0/(123136*2)) # max AN not in PARs
coverage.append(val)
else:
coverage.append(val)
if not coverage:
if varflag:
if 'VARTRUE' not in d['varflag']: # code here in case we decided to downweight differently later
coverage=[0]
else:
coverage=[0]
ys.append(sum(coverage))
X['intercept'] = np.ones(len(ys))
if removesyn:
X.pop('syn', None)
X = pd.DataFrame(X)
results = sm.OLS(ys, X, hasconst=True).fit()
resid = OLSInfluence(results).get_resid_studentized_external()
#variables={}
#variables['cpg']=X['CpG']
#variables['cov']=ys
#variables['resid']=resid
#variables['rawresid']=results.resid
#variables['genes']=genes
#variables['gerp']=gerp
#variables['intercept']=results.params['intercept']
#variables['cpgcoef']=results.params['CpG']
#pickle.dump(variables, open("var.pickle", "wb"))
lowestresidual=np.min(resid)-.001
#for i, row in enumerate(genes):
# if "VARTRUE" in row[7] and varflag: #row[7] is varflag
# resid[i]=lowestresidual
resid=resid.tolist()
for i, row in enumerate(varrow):
resid.append(lowestresidual)
genes.append(row)
ys.append(0)
X_train=np.array(resid).reshape(len(resid),1)
min_max_scaler = preprocessing.MinMaxScaler(feature_range=(0,100))
resid_pctile = min_max_scaler.fit_transform(X_train)
#resid_pctile = 101.0 * np.sort(resid).searchsorted(resid) / float(len(resid))
assert len(genes) == len(ys) == len(resid)
print "chrom\tstart\tend\tgene\ttranscript\texon\tranges\tvarflag\tsyn_density\tcpg\tcov_score\tresid\tresid_pctile"
for i, row in enumerate(genes):
#if "VARTRUE" in row[7] and varflag: #row[7] is varflag
vals = ["%.3f" % ys[i], "%.3f" % resid[i], "%.9f" % resid_pctile[i]]
#if not "," in row[-1]:
# if not row[-1]:
# row=list(row)
# row[-1]=row[1]+"-"+row[2]
# print "\t".join(list(row) + vals)
# continue
ranges = [x.split("-") for x in row[6].split(",")]
row=list(row)
for s, e in ranges:
row[1], row[2] = s, e
print "\t".join(map(str,list(row) + vals))
| [
"u1021864@kingspeak23.wasatch.peaks"
] | u1021864@kingspeak23.wasatch.peaks |
fe65624b0bc29c7a7a544917b5c1a6bb5e82431a | b38520185366643cb4eba890db77eea3b8547713 | /05_oreilly02/chapter2_02_blur.py | 17d1cc78070755b823812839347a8c07a56672f4 | [] | no_license | dasanchez/opencv_study | c4c021244309362e64071ac34749266b2d980715 | 2f02ef5830b8bcebf9ddc516236f9fda777372a6 | refs/heads/master | 2020-08-28T22:47:39.982100 | 2017-12-05T17:52:45 | 2017-12-05T17:52:45 | 94,382,449 | 0 | 0 | null | 2017-06-25T15:04:37 | 2017-06-15T00:03:44 | Python | UTF-8 | Python | false | false | 598 | py | import cv2
""" Load and display an AVI video file. """
fps = 40
cv2.namedWindow("Video window")
cv2.namedWindow("Blurred vision")
cap = cv2.VideoCapture('face2.avi')
while cap.isOpened():
ret, frame = cap.read()
if ret:
#blurFrame = cv2.GaussianBlur(frame,(15,15),0)
#blurFrame = cv2.medianBlur(frame,31)
blurFrame = cv2.bilateralFilter(frame,21,85,85)
cv2.imshow("Video window", frame)
cv2.imshow("Blurred vision", blurFrame)
else:
break
if cv2.waitKey(fps) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| [
"dante.a.sanchez@gmail.com"
] | dante.a.sanchez@gmail.com |
44769b6f7771b26655d7a767babdc40ca1f56e46 | 08d5b3662bfd0dc304da30fb005f5500358c73b0 | /pythonsrc/tracker-viewer.py | 1853eb740a3e2024f29138ab732335facc860a64 | [] | no_license | somaproject/tracker | 26f74ea0218654f55dc6529c549be1e6064261d6 | f9781fcfc9e2d971d4925c23eee791704f714619 | refs/heads/master | 2021-01-22T18:27:56.480640 | 2009-01-08T20:33:48 | 2009-01-08T20:33:48 | 103,466 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,824 | py | #p!/usr/bin/env python
import pygst, pygtk
pygst.require('0.10')
pygtk.require('2.0')
import gst, gtk, gtk.glade, dbus, dbus.glib, gobject
from somaTrackerViewerPipeline import SomaTrackerViewerPipeline
class TrackerViewer():
def __init__(self):
self.pipe = SomaTrackerViewerPipeline().getTrackerPipeline()
self.bus = self.pipe.get_bus()
self.bus.add_signal_watch()
self.bus.enable_sync_message_emission()
self.bus.connect("message", self.on_message)
self.bus.connect("sync-message::element", self.on_sync_message)
self.gladeFile = 'trackerViewer.glade'
windowName = "mainWindow"
self.wTree = gtk.glade.XML(self.gladeFile, windowName)
self.window = self.wTree.get_widget(windowName)
self.window.show()
if (self.window):
self.window.connect("destroy", self.quit)
dic = {
"on_thold_scale_value_changed" : self.thold_change,
"on_thold_lock_btn_toggled" : self.thold_lock_toggle,
"on_playpause_btn_toggled" : self.play_pause,
"on_reset_trail_btn_clicked" : self.reset_trail,
"on_pos_overlay_toggled" : self.overlay_toggled,
"on_enable_trail_toggled" : self.trail_toggled,
}
self.wTree.signal_autoconnect(dic)
dBus = dbus.SessionBus()
self.tracker = dBus.get_object('soma.tracker.TrackerCore', "/SomaTracker")
def on_message(self, bus, message):
# print message
return
def on_sync_message(self, bus, message):
if message.structure is None:
return
message_name = message.structure.get_name()
if message_name == "prepare-xwindow-id":
print "\tAsking for xwindow-id"
drawArea = self.wTree.get_widget("draw_area")
imagesink = message.src
imagesink.set_property("force-aspect-ratio", True)
imagesink.set_xwindow_id(drawArea.window.xid)
## --- GTK METHODS --- ##
def quit(self, *args):
print "Quitting, shutting down tracker-core"
self.tracker.kill_tracker_core()
gtk.main_quit(*args)
def thold_change(self, widget):
thold = widget.get_value()
print "New threshold selected:", thold
def thold_lock_toggle(self, widget):
print "Threshold Lock:", widget.get_active()
self.wTree.get_widget('thold_scale').set_sensitive(not(widget.get_active()))
def play_pause(self, widget):
active = widget.get_active()
print "Playing: ", active
if active:
widget.set_label("gtk-media-pause")
print "\tStarting VIEWER pipeline"
self.pipe.set_state(gst.STATE_PLAYING)
self.tracker.start_tracker()
else:
widget.set_label("gtk-media-play")
self.tracker.stop_tracker()
self.pipe.set_state(gst.STATE_NULL)
def reset_trail(self, widget):
print "Reset Trail overlay trail"
def overlay_toggled(self, widget):
active = widget.get_active()
print "Enable Overlay: ", active
self.wTree.get_widget('enable_trail').set_sensitive(active)
if not(active):
self.wTree.get_widget('reset_trail_btn').set_sensitive(active)
else:
self.wTree.get_widget('reset_trail_btn').set_sensitive(self.wTree.get_widget("enable_trail").get_active())
def trail_toggled(self, widget):
active = widget.get_active()
print "Enable Trails: ", active
self.wTree.get_widget('reset_trail_btn').set_sensitive(active)
if __name__=="__main__":
viewer = TrackerViewer()
gtk.main()
| [
"slayton@mit.edu"
] | slayton@mit.edu |
7f7be7515b49d2339d45739a3d6096151dc8de80 | 9381c0a73251768441dc45c7e181548742b9bdbc | /src/educative/fibonacci_numbers/house_thief_memo.py | dfe266791fa02380306c6208bd07804a7c2fbd97 | [] | no_license | Flaeros/leetcode | 45cc510ec513bfb26dbb762aa1bd98f3b42dce18 | 1dcea81a21bd39fee3e3f245a1418526bd0a5e8f | refs/heads/master | 2022-06-02T14:15:31.539238 | 2022-04-18T14:44:18 | 2022-04-18T14:49:05 | 250,183,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | def find_max_steal(wealth):
memo = [-1 for _ in range(len(wealth))]
return find_max_steal_rec(wealth, memo, 0)
def find_max_steal_rec(wealth, memo, index):
if index >= len(wealth):
return 0
if memo[index] == -1:
inclusive = wealth[index] + find_max_steal_rec(wealth, memo, index + 2)
exclusive = find_max_steal_rec(wealth, memo, index + 1)
memo[index] = max(inclusive, exclusive)
return memo[index]
def main():
print(find_max_steal([2, 5, 1, 3, 6, 2, 4]))
print(find_max_steal([2, 10, 14, 8, 1]))
if __name__ == '__main__':
main()
| [
"flaeross@yandex-team.ru"
] | flaeross@yandex-team.ru |
da64530ce81ca5192823694b51c6a6a433d982b0 | 13c6f4664c37e130a3dac6feacbd3486bcdc65d4 | /api/viewsets/DocumentViewSet.py | dca446423fd01d22991e737c30dfd66970645113 | [] | no_license | daviaroldi/trabSisWeb | c49fb64cccc7554e79f71d776a6d9c9a4fb96dd6 | fc7e3e548ad1a1fca7bd5924edebabe453026c09 | refs/heads/master | 2021-05-17T09:24:29.166936 | 2020-04-03T02:09:25 | 2020-04-03T02:09:25 | 250,725,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | from django.shortcuts import render
from django.http import JsonResponse
from django.db import models
from rest_framework.response import Response
from rest_framework.decorators import api_view, authentication_classes, permission_classes, parser_classes
from rest_framework.permissions import IsAuthenticated
from ..models.DocumentModel import Document
from _datetime import datetime
import json
from rest_framework import viewsets
from ..serializers.DocumentSerializer import DocumentSerializer
from oauth2_provider.contrib.rest_framework import TokenHasReadWriteScope
class DocumentViewSet(viewsets.ModelViewSet):
queryset = Document.objects.all().order_by('name')
serializer_class = DocumentSerializer
# permission_classes = [TokenHasReadWriteScope] | [
"davi.aroldi@gmail.com"
] | davi.aroldi@gmail.com |
7d9cdab7eb54945ad09ba92fd102c826a45e0c8a | 5b926bf12340e03deced7495e818958b8866ada5 | /src/09-10/ubc-tbird-ros-pkg/sb_joystick/scripts/keyboard_js_sim.py | 101834096db3ddab2a906ef2eb56cc83214f57a9 | [] | no_license | jpearkes/snowbots | 6514b19e24f246ee1d4291b18090135e2f4998e6 | 52bacd9f58524090e0ab421a47714629249ca273 | refs/heads/master | 2021-05-27T02:02:15.119679 | 2014-05-23T01:19:24 | 2014-05-23T01:19:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,043 | py | #!/usr/bin/env python
"""
Curses-based furious state monitor node.
"""
# Configuration constants
NODE_NAME = "keyboard_js_sim"
PUBLISH_TOPIC = "joy"
LOOP_FREQ = 2 # Hz
# Standard library imports
import curses, curses.wrapper
# ROS imports
import roslib
roslib.load_manifest("sb_joystick")
import rospy
# furious package imports
from joy.msg import Joy
def main(screen):
screen.nodelay(1) # make getch() a non-blocking call
pub = rospy.Publisher(PUBLISH_TOPIC, Joy)
rospy.init_node(NODE_NAME)
rate = rospy.Rate(LOOP_FREQ)
while not rospy.is_shutdown():
keypress = screen.getch()
if keypress > -1:
keyval = chr(keypress)
if keyval == "q": # quit
rospy.signal_shutdown("User requested shutdown.")
else:
button = int(keyval)
if button < 10:
msg = Joy()
msg.buttons = [0 for x in range(10)]
msg.buttons[button] = 1
pub.publish(msg)
rate.sleep()
if __name__ == "__main__":
curses.wrapper(main)
rospy.loginfo("Shutting down...")
| [
"navid.fattahi@snowbots.ca"
] | navid.fattahi@snowbots.ca |
6887f767b739578e9d02e5c9df4963584ffc4eb2 | 8eeeb807a9010e94c07b9622e521ec5e266c21b6 | /bin/pip | 729a43ac175a3a57d52a1d519424622e53c05f7e | [] | no_license | HazyPlanet/my-first-blog | 7e88e242544c1a8c6517e76469dbb6a773ebb40e | 628585aec0a667ee1f56c5babcea38b7abd68964 | refs/heads/master | 2023-03-02T16:52:19.656773 | 2021-01-25T13:24:25 | 2021-01-25T13:24:25 | 332,500,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | #!/System/Volumes/Data/local/evans/computing/swDevel/python/proj/books-examples-practice/django/djangogirls/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"chris@hazyplanet"
] | chris@hazyplanet | |
d1a7945bfa5fc7770292c82fd27cfa0775b820fe | e9543720e53de3e387133497e66af3b039129dd8 | /apps/user/models.py | 299cfa0e5490cd19f0fd04fa6dac228e1eac5ff1 | [] | no_license | weicunheng/BookStore | d0e5782e45578bf84a36c98c2e029dfc10582959 | d2fd226e130627ae3b39470260ef0961796900a4 | refs/heads/master | 2020-03-25T08:35:07.862245 | 2018-08-17T13:42:33 | 2018-08-17T13:42:33 | 143,620,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | from datetime import datetime
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class UserProfile(AbstractUser):
"""
用户
"""
name = models.CharField(max_length=30, null=True, blank=True, verbose_name="姓名")
birthday = models.DateField(null=True, blank=True, verbose_name="出生年月")
gender = models.CharField(max_length=6, choices=(("male", u"男"), ("female", "女")), default="female", verbose_name="性别")
mobile = models.CharField(null=True, blank=True, max_length=11, verbose_name="电话")
email = models.EmailField(max_length=100, null=True, blank=True, verbose_name="邮箱")
class Meta:
verbose_name = "用户"
verbose_name_plural = verbose_name
def __str__(self):
return self.username
class VerifyCode(models.Model):
"""
短信验证码
"""
code = models.CharField(max_length=10, verbose_name="验证码")
mobile = models.CharField(max_length=11, verbose_name="电话")
add_time = models.DateTimeField(auto_now_add=True, verbose_name="添加时间")
class Meta:
verbose_name = "短信验证码"
verbose_name_plural = verbose_name
def __str__(self):
return self.code
| [
"1261360292@qq.com"
] | 1261360292@qq.com |
84740688fcf4822e320b0acf8ec9148e1ca8121b | 8ea49fe02789aee5076e9aa56fd2e67cc85765bd | /DBMS_Prashant/Student/tasks/forms.py | 0aa1fc5707371f53ebfd4687a26d84c0257e48ec | [] | no_license | prashant-pandit/Python_Django_CRUD | 8fdc9160faa1b68080cc03a365ba930511dbc69e | c842ac3581aa0c1b32960756c4ec3d3cd763db2c | refs/heads/main | 2023-02-07T09:21:07.243427 | 2021-01-01T11:19:52 | 2021-01-01T11:19:52 | 325,964,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | from django import forms
from django.forms import ModelForm
from .models import *
class TaskForm(forms.ModelForm):
usn = forms.CharField(widget= forms.TextInput(attrs={'placeholder': 'Enter Your USN'}))
Name = forms.CharField(widget= forms.TextInput(attrs={'placeholder': 'Enter Your Name'}))
Platform = forms.CharField(widget= forms.TextInput(attrs={'placeholder': 'Course Platform'}))
Course = forms.CharField(widget= forms.TextInput(attrs={'placeholder': 'Enter Course Name'}))
class Meta:
model = Task
fields = '__all__' | [
"noreply@github.com"
] | noreply@github.com |
653e6c64350626aa236b8279825acc6265310b27 | b3a8718fba0b2d6922f8fb49ec035cd1d7df879d | /8.python-basics-ii/88-92_scope.py | 75d1b894e3043ced1c2bfb16610f5d6a52df9b63 | [] | no_license | CodingCCarpenter/ZTM-Python-Complete | 0c1cd3c03d01ac21f532d64cfedddc80596737be | 8d4dc287bf0a5f789d117fda2914dbde9dd8817f | refs/heads/master | 2022-12-02T05:09:20.803799 | 2020-08-13T03:21:32 | 2020-08-13T03:21:32 | 283,321,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,887 | py | # Scope - what variables do I have access to?
# Python has functional scope
# global scope - accessible by all code
total = 100
def some_func():
# functional scope - not available outside of some_func
counter = 100
return counter
# we have access to total
print (total)
# but we do not have access to counter - will throw error
# uncomment line 17 and run to view error
#print(counter)
"""
functional scope only applies when we define a function.
Variables created by loops and conditionals are still
accessible globally
"""
if True:
x = 10
# we still have access to print x:
print(x)
# SCOPE GAME!!!
# What variables do I have access to?
a = 1
def confusion():
a = 5
return a
# what will be the output of the following?
print(a) # 1 - value of global scope variable a
print(confusion()) # 5 - value functional scope variable a
"""
Scope rules that python interpreter follows:
1 - starts with checking local scope (same scope as expression that's looking for it)
2 - if nothing in the local scope, checks in the parent's local scope
3 - global scope
4 - build in python functions
"""
# GLOBAL KEYWORD
abacus = 0
def count():
# use the global variable to access any global variables
global abacus
# after we have access to it we can use it in our function
abacus += 1
return abacus
print(count())
"""
note:
it's arguably cleaner to simply pass a global variable
into a function as an argument, and then create an expression
to update the global variable as needed upon function call
"""
# NONLOCAL KEYWORD
def outer():
x = 'local'
def inner():
# used to access the parent's local variable
nonlocal x
# here we are reassigning the parent's local variable value
x = 'nonlocal'
print('inner:', x)
inner()
print('outer:', x)
outer() | [
"Christineassists@aol.com"
] | Christineassists@aol.com |
dfbeaa25e10c97304810a2de289007e2e095bd42 | 637eb4a6475d3732da9f162ae06762c4db0d1193 | /addatomic/500000_par/benchmark.py | 3957da1d89b339e2a12f57016cd6450398a1c00a | [] | no_license | jamillan/warp_reduce_vs_addatomic_hoomd_blue | 57d1b9b70548c8b162a5d28a9724fcdfa3edb3a6 | 93e11d8d59e024a2e2511c7e1aa7be883140df1b | refs/heads/master | 2021-04-30T09:44:05.342996 | 2018-02-15T20:30:09 | 2018-02-15T20:30:09 | 121,316,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,825 | py | from __future__ import print_function, division
import sys
sys.path.append('/projects/b1030/hoomd/hoomd-2.1.9-new_walls/')
sys.path.append('/projects/b1030/hoomd/hoomd-2.1.9-new_walls/hoomd')
#sys.path.append('/home/jaime/software/hoomd-install')
#sys.path.append('/home/jaime/software/hoomd-install/hoomd')
import os
import random
#os.environ['LD_LIBRARY_PATH'] = "/home/jaimemillan/boost/lib"
#print os.environ['LD_LIBRARY_PATH']
from hoomd import *
from hoomd.md import *
import hoomd.deprecated as deprecated
import numpy
c=context.initialize()
if len(option.get_user()) == 0:
workspace = '.';
else:
workspace = option.get_user()[0]
system = deprecated.init.read_xml("250000.xml")
system.replicate(nx=2,ny=1,nz=1)
group_A = group.type(name='a-particles', type = 'A')
#introduce SLJ forces between particles to make them 'hard'
nl =nlist.cell()
c.sorter.disable()
slj= pair.slj(r_cut = 3.5, nlist=nl)
slj.set_params(mode="shift")
slj.pair_coeff.set('A','A' , epsilon=1.0, sigma=1.0 )
slj.pair_coeff.set('A','B' , epsilon=0.0, sigma=1.0 )
slj.pair_coeff.set('B','B' , epsilon=0.0, sigma=1.0 )
#introduce Yukawa Walls bounding from above/below Z direction
sigma=1.0
walls = wall.group()
walls.set_tag(0);
wall_pos = 0.5*system.box.Lz - 1
walls.add_plane((0,0,wall_pos),(0.,0.,-1.))
walls.add_plane((0,0,-wall_pos),(0.,0.,1.))
wall_force_slj=wall.lj(walls, r_cut=4.0,active_planes=[0,1])
wall_force_slj.force_coeff.set('A', epsilon= 1.5,r_cut=4,sigma=sigma,r_extrap = 0.05)
wall_force_slj.force_coeff.set('B', epsilon= 0,r_cut=4,sigma=sigma,r_extrap = 0.05)
for p in system.particles:
vx = (2.0 * random.random()- 1.0 )
vy = (2.0 * random.random()- 1.0 )
vz = (2.0 * random.random()- 1.0 )
p.velocity = (vx,vy,vz)
p.diameter = 1.0
#log Thermos
logger = analyze.log(quantities=['temperature' , 'potential_energy', 'kinetic_energy'],
period=5e2, filename='log.log', overwrite=True)
#Create Trajectory
integrate.mode_standard(dt=0.001)
#NVE Integration
integrator = integrate.nve(group_A , limit = 0.0001)
zero = update.zero_momentum(period =100)
run(1e3)
integrator.disable()
zero.disable()
#NVT interation to reached target temperature
tf=0.01
integrator = integrate.nvt(group=group_A , tau = 0.65 , kT = 0.001)
integrator.set_params(kT=variant.linear_interp(points=[(0, logger.query('temperature')), (2e6, 0.75)]))
run(2e6)
integrator.set_params(kT=variant.linear_interp(points=[(0, logger.query('temperature')), (2e6, tf)]))
run(2e6)
# start benchmark
tps = benchmark.series(warmup=0, repeat=4, steps=70000, limit_hours=20.0/3600.0)
ptps = numpy.average(tps) * len(system.particles);
print("Hours to complete 10e6 steps: {0}".format(10e6/(ptps/len(system.particles))/3600));
meta.dump_metadata(filename = workspace+"/metadata.json", user = {'mps': ptps, 'tps': tps});
| [
"jaime.millan@northwestern.edu"
] | jaime.millan@northwestern.edu |
e46900d97662189164e1e244374a46d1087e1235 | d07e0d2c5bba88f96c6fb71d339936c307d49527 | /farhe_celcius.py | 20cb23fc9867d76f1276f1ae6f654bfb70633c77 | [] | no_license | Tapan-24/python | 92bde9dc8a7b5c7b18644dfd9f6c6ac8a63cb33e | 2ce5905229cb62ed3101bce49ca2171e707abab2 | refs/heads/master | 2022-12-28T11:52:58.084060 | 2020-10-03T13:07:42 | 2020-10-03T13:07:42 | 282,006,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | x=float(input("Enter Temperature in Fahrenheit "))
x=(x-32)*(5/9)
print(str(x)+ 'Celcius')
| [
"tapan.24.96.vaghela@gmail.com"
] | tapan.24.96.vaghela@gmail.com |
9fb1ac2822486473f47947f5b02cf3414a5d66bb | 1b8f6104616803e893dc54f7bad3b7f7a58e2fc1 | /ALGO_V2/sort/insertion-sort.py | d7260e86ba85c10d2677c9dd793c2a639074fe01 | [] | no_license | hanrick2000/DSAL | bbb1af525b9e56105c2f6c2b5e20af8211729608 | ae8fba686ea94ceb05085ae8323b16a636afad57 | refs/heads/master | 2022-08-24T19:25:52.691731 | 2020-05-26T04:09:49 | 2020-05-26T04:09:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py |
def insertion_sort(list):
for i in range(1, len(list)):
current_value = list[i]
pointer_idx = i
while list[pointer_idx -1] > current_value and pointer_idx >0:
# move to the next position
list[pointer_idx] = list[pointer_idx -1]
pointer_idx -= 1
list[pointer_idx] = current_value
return list
test = [5,7,8,3,1,5,3]
sorted = insertion_sort(test)
print(sorted) | [
"herongrong2011@gmail.com"
] | herongrong2011@gmail.com |
d801910e63215836f5d7be60291bd2d66c972eeb | 7fcc5041d5e185e94f8a5114d4d6b21174f98011 | /2、decisionTree/test.py | c858911bef244a733b58067bfa8d95cc474b0f73 | [] | no_license | masonCaminer/ml_learn | a80902dd903cfce28c76a562d74b9fe83b298315 | 2af8239d457614cbabd04aceb7b92bc460a0808b | refs/heads/master | 2020-04-15T20:28:43.284068 | 2019-03-02T01:51:57 | 2019-03-02T01:51:57 | 164,994,960 | 1 | 0 | null | 2019-03-02T01:51:58 | 2019-01-10T05:19:41 | Python | UTF-8 | Python | false | false | 170 | py | from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
a = le.fit([1,5,67,100])
a = le.transform([1,5,67,100])
# a = le.fit_transform([1,1,100,67,5])
print(a) | [
"mason@caminer.io"
] | mason@caminer.io |
c77ff7221169be3d60f29bf546fdc5560b0a91de | c169b62296b88035be0262b751ec84f43e49b35d | /samarati.py | a2f00ce2f936b4e14c2481de89e483b48ec57066 | [] | no_license | xzwj1699/USTC_DP_Lab1 | a5a1f63924d462f848da4764675af983c733e1bb | 6426bd53584d7905b1682cabb21249b25427cad0 | refs/heads/master | 2023-06-16T11:52:04.385469 | 2021-07-14T14:30:20 | 2021-07-14T14:30:20 | 385,968,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,845 | py | from os import X_OK, write
from typing import DefaultDict
import math
import sys
def samarati(data_file, k, maxSup, GT, vectors_list, LossTable):
datas = []
out_count = 0
count = 0
low, high = 0, 8
for lines in data_file:
out_count = out_count + 1
if not '?' in lines:
lines = lines.strip()
age,gender,race,marital_state = lines.split(',')[0],lines.split(',')[9],lines.split(',')[8],lines.split(',')[5]
datas.append([age,gender.strip(),race.strip(),marital_state.strip()])
sol = None
unsatisfy_list = []
#pre define a large loss, and aim to minimize loss
loss = 300.0
while low < high: # to search a minimum vector that satisfy k-anonymity
mid = math.floor((low + high) / 2)
print("high : %d, low : %d, mid : %d" % (high, low, mid))
vectors = vectors_list[mid]
reach_k = False
for vector in vectors:
satisfy_or_not, temp_list = satisfy(datas, k, vector, GT, maxSup)
# print('-----')
# print(satisfy_or_not)
if satisfy_or_not:
lm = cal_loss_metric(datas, vector, GT, LossTable, temp_list)
# print(lm)
if lm < loss:
sol = vector
unsatisfy_list = temp_list
loss = lm
# print(loss)
# print(sol)
reach_k = True
# print('-----')
if reach_k:
high = mid
else:
low = mid + 1
return sol, unsatisfy_list, loss
def cal_loss_metric(datas, sol, GT, LossTable, unsatisfy_list):
age_lm, gender_lm, race_lm, marital_loss = 0, 0, 0, 0
age_tree = GT['age']
gender_tree = GT['gender']
race_tree = GT['race']
marital_state_tree = GT['marital_status']
# print(len(datas))
for line in datas:
age,gender,race,marital_state = line[0],line[1],line[2],line[3]
for i in range(0, sol[0]):
age = age_tree[age]
for i in range(0, sol[1]):
gender = gender_tree[gender]
for i in range(0, sol[2]):
race = race_tree[race]
for i in range(0, sol[3]):
marital_state = marital_state_tree[marital_state]
str1 = age + gender + race + marital_state
if str1 in unsatisfy_list:
age_lm += 1
gender_lm += 1
race_lm += 1
marital_loss += 1
else:
age_lm += LossTable['age'][age]
gender_lm += LossTable['gender'][gender]
race_lm += LossTable['race'][race]
marital_loss += LossTable['marital'][marital_state]
return (age_lm + gender_lm + race_lm + marital_loss) / len(datas)
def satisfy(datas, k, vec, GT, maxSup):
data_count = {}
age_tree = GT['age']
gender_tree = GT['gender']
race_tree = GT['race']
marital_state_tree = GT['marital_status']
data_num = len(datas)
for line in datas:
age,gender,race,marital_state = line[0],line[1],line[2],line[3]
#generalization according to vec
for i in range(0, vec[0]):
age = age_tree[age]
for i in range(0, vec[1]):
gender = gender_tree[gender]
for i in range(0, vec[2]):
race = race_tree[race]
for i in range(0, vec[3]):
marital_state = marital_state_tree[marital_state]
str1 = age + gender + race + marital_state
if str1 in data_count:
data_count[str1] += 1
else:
data_count[str1] = 1
unsatisfy_count = 0
unsatisfy_list = []
for key in data_count.keys():
# print(str(data_count[key]) + " : " + str(k))
if data_count[key] < k:
unsatisfy_list.append(key)
# print(key)
unsatisfy_count = unsatisfy_count + data_count[key]
# print(unsatisfy_count)
# print("-----------------")
if unsatisfy_count <= maxSup:
return True, unsatisfy_list
else:
return False, None
#return if node x is a child of y
def is_child(GT : dict, x, y):
while(x in GT.keys()):
if GT[x] == y:
return True
x = GT[x]
return False
#calculate the number of node rooted x
def cal_node_num(GT : dict, x):
num = 0
for key in GT.keys():
if is_child(GT, key, x) and not key in GT.values():
num += 1
return num
def cal_generalization_loss(GT : dict):
loss = {}
tree_node = 0
for key in GT.keys():
if not key in GT.values():
tree_node += 1
loss[key] = 0
for value in GT.values():
loss[value] = (cal_node_num(GT, value) - 1) / (tree_node - 1)
# print(loss)
return loss
def main():
data_file = open("data_privacy_lab1/adult.data",'r').readlines()
generalization_tree = {}
loss_table = {}
#to calculate the loss of age, need get age tree node list
age_list = []
for lines in data_file:
if not '?' in lines:
age = lines.split(',')[0]
if not age in age_list:
age_list.append(age)
print(sorted(age_list))
#build generalization trees
#build gender generalization tree
gender_tree = {}
gender_height = 1
for lines in open("data_privacy_lab1/adult_gender.txt",'r').readlines():
lines = lines.strip()
son_node,father_node = lines.split(',')[0], lines.split(',')[1]
gender_tree[son_node] = father_node
generalization_tree['gender'] = gender_tree
# print(gender_tree)
#build gender loss table
gender_loss = cal_generalization_loss(gender_tree)
loss_table['gender'] = gender_loss
#build race generalization tree
race_tree = {}
race_height = 1
for lines in open("data_privacy_lab1/adult_race.txt",'r'):
lines = lines.strip()
son_node,father_node = lines.split(',')[0], lines.split(',')[1]
race_tree[son_node] = father_node
generalization_tree['race'] = race_tree
race_loss = cal_generalization_loss(race_tree)
loss_table['race'] = race_loss
#build marital_status generalization tree
marital_tree = {}
marital_height = 2
for lines in open("data_privacy_lab1/adult_marital_status.txt",'r').readlines():
if lines.split():
lines = lines.strip()
son_node,father_node = lines.split(',')[0], lines.split(',')[1]
marital_tree[son_node] = father_node
generalization_tree['marital_status'] = marital_tree
marital_loss = cal_generalization_loss(marital_tree)
loss_table['marital'] = marital_loss
#build age generalization tree
age_tree = {}
age_height = 4
for i in range(0,5):
str_1 = str(i * 20) + '~' + str((i + 1) * 20 - 1)
age_tree[str_1] = '*'
for j in range(0,2):
str_2 = str(i * 20 + j * 10) + '~' + str(i * 20 + (j + 1) * 10 - 1)
age_tree[str_2] = str_1
for k in range(0,2):
str_3 = str(i * 20 + j * 10 + k * 5) + '~' + str(i * 20 + j * 10 + (k + 1) * 5 - 1)
age_tree[str_3] = str_2
for l in range(0,5):
str_4 = str(i * 20 + j * 10 + k * 5 + l)
#delete not exist age
if str_4 in age_list:
age_tree[str_4] = str_3
while True:
pop_list = []
for key in age_tree.keys():
if not key in age_list and not key in age_tree.values():
pop_list.append(key)
if len(pop_list) == 0:
break
for key in pop_list:
age_tree.pop(key)
generalization_tree['age'] = age_tree
age_loss = cal_generalization_loss(age_tree)
loss_table['age'] = age_loss
# for key in age_tree.keys():
# print(str(key) + ' : ' + age_tree[key])
k_anonimity = 3
maxSup = 20
if not (len(sys.argv) == 1 or len(sys.argv) == 3):
print("error arg number!")
print(len(sys.argv))
elif len(sys.argv) == 3:
k_anonimity = int(sys.argv[1])
maxSup = int(sys.argv[2])
vectors = DefaultDict(list)
for i in range(0,5):
for j in range(0,2):
for k in range(0,2):
for l in range(0,3):
vectors[i + j + k + l].append([i,j,k,l])
sol, unsatisfy_list, total_loss = samarati(data_file, k_anonimity, maxSup, generalization_tree, vectors, loss_table)
print(sol)
print(unsatisfy_list)
print("the loss metric is %f" % total_loss)
# print(total_loss)
write_file = open("samarati_k-anonymity_adult.data", 'w')
for lines in data_file:
if not '?' in lines:
read_content = []
for i in range(0,15):
read_content.append(lines.split(',')[i].strip())
age,gender,race,marital_state, occupation = read_content[0],read_content[9],read_content[8],read_content[5],read_content[6]
# generalization data in origin file
for i in range(0, sol[0]):
age = age_tree[age]
for i in range(0, sol[1]):
gender = gender_tree[gender]
for i in range(0, sol[2]):
race = race_tree[race]
for i in range(0, sol[3]):
marital_state = marital_tree[marital_state]
if not age+gender+race+marital_state in unsatisfy_list:
write_content = []
write_content.append(age)
write_content.append(gender)
write_content.append(race)
write_content.append(marital_state)
write_content.append(occupation)
write_file.write(','.join(write_content))
write_file.write('\n')
if __name__ == '__main__':
main() | [
"xzwj@ustc.edu.cn"
] | xzwj@ustc.edu.cn |
9f856c8c5bc965ad745f96710fbc2c42a0f8ac3b | cef4c18160c6ae81cb6f549567fc9399a502dfc3 | /ARC/ARC102/D.py | 5215735cf13fd307eb0c1f42f78033359fdb9ffd | [] | no_license | nobishino/AtCoder | 0a6c918e7f0bb1acbb63a58894f7132e6d640a98 | 5c55861d3649fa38c20e77113d2ea5297900a480 | refs/heads/master | 2022-01-02T09:16:50.051125 | 2021-12-25T14:00:04 | 2021-12-25T14:00:04 | 147,978,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | L = int(input())
def divideBin(n):
d = 1
ans=[]
while n > 0:
r,q=divmod(n,2)
ans.append(q*d)
d *= 2
n = r
return(ans)
divided = divideBin(L)
N = len(divided)
M = 2*(N-1) + len([x for x in divided if x!=0]) - 1
print(N,M)
for i in range(1,N):
print(i,i+1,0)
print(i,i+1,pow(2,i-1))
node = N-1
value = divided[N-1]
while node > 0:
if divided[node-1] != 0:
print(node,N,value)
value += divided[node-1]
node -= 1 | [
"john@example.com"
] | john@example.com |
507f1fbfc71ddef13899a4a1feede26c648efd48 | b5702c7abbd2bf546d8b8329be33a8ace077bb45 | /home/data/winner.py | fada443ce8ca9c93fdbd5f6d9c14513a96b22824 | [] | no_license | divanshu79/Africa-Cup | e0fe77a74615df5e3e52360d8349aae472d7c7f8 | 975e70a222e5618df9e2a2aa2a654e1760ac840e | refs/heads/master | 2020-03-28T21:09:01.547020 | 2018-09-24T18:54:36 | 2018-09-24T18:54:36 | 149,132,966 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | import requests
from bs4 import BeautifulSoup
import pandas as pd
from collections import defaultdict
import os
os.environ["HTTPS_PROXY"] = "https://ipg_2014037:Divanshu79@192.168.1.107:3128"
link = 'https://www.worldfootball.net/winner/afrika-cup/'
r = requests.get(link)
soup = BeautifulSoup(r.content, "html.parser")
tr = soup.find('table', {'class': 'standard_tabelle', 'cellpadding':'3'})
td = tr.find_all('td')
data = defaultdict(list)
for j in range(len(td)):
i = j%5
if i == 0:
txt = td[j].text
txt = txt[1:5]
data['year'].append(txt)
elif i == 2:
text = td[j].text
data['team'].append(text)
df = pd.DataFrame(data)
df.to_csv('winners.csv', sep=',', index=False)
| [
"divanshu79@github.com"
] | divanshu79@github.com |
e898e48a7a702259949e11506a6791a7b8add7dc | 6a36ace6e5c5a68d1695e956553e56a9e328d3d7 | /tests/test_search.py | 27fc835541b6c64c79470d414e9b6edc2c833534 | [
"BSD-3-Clause"
] | permissive | ltucker/giblets | d93bb3bb63d66939d6aac05e08991a7893df93bf | c852bd5d2d0319de522720c829fa95d2e67ab5c1 | refs/heads/main | 2023-03-05T12:35:45.847845 | 2010-06-12T13:58:37 | 2010-06-12T13:58:37 | 306,022 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,192 | py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2010 Luke Tucker
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# Author: Luke Tucker <voxluci@gmail.com>
#
from giblets import ExtensionInterface
class TestPathInterface(ExtensionInterface):
pass
class TestEggInterface(ExtensionInterface):
pass
def test_load_from_path():
from giblets.core import ComponentManager, Component, ExtensionPoint
from giblets.search import find_plugins_in_path
class PluginFinder(Component):
found_plugins = ExtensionPoint(TestPathInterface)
mgr = ComponentManager()
pf = PluginFinder(mgr)
# to start with, nothing should be found
assert len(pf.found_plugins) == 0
find_plugins_in_path('test_plugin_path')
expected_plugins = ['TestPathPlugin1', 'TestPathPlugin2', 'TestPathPlugin3']
got_plugins = set()
assert len(pf.found_plugins) == len(expected_plugins)
for plugin in pf.found_plugins:
plugin_name = plugin.__class__.__name__
assert plugin_name in expected_plugins
got_plugins.add(plugin_name)
for plugin_name in expected_plugins:
assert plugin_name in got_plugins
def test_load_from_entry_point():
from giblets.core import ComponentManager, Component, ExtensionPoint
from giblets.search import find_plugins_by_entry_point
class PluginFinder(Component):
found_plugins = ExtensionPoint(TestEggInterface)
mgr = ComponentManager()
pf = PluginFinder(mgr)
# to start with, nothing should be found
assert len(pf.found_plugins) == 0
find_plugins_by_entry_point('giblets_load_from_entry_point_test')
expected_plugins = ['TestEggPlugin1', 'TestEggPlugin2', 'TestEggPlugin3']
got_plugins = set()
assert len(pf.found_plugins) == len(expected_plugins)
for plugin in pf.found_plugins:
plugin_name = plugin.__class__.__name__
assert plugin_name in expected_plugins
got_plugins.add(plugin_name)
for plugin_name in expected_plugins:
assert plugin_name in got_plugins
| [
"ltucker@openplans.org"
] | ltucker@openplans.org |
75ff04700bbef3333f0e5b04408e5c6d166a6e34 | 2caa47f0bdb2f03469a847c3ba39496de315d992 | /Contest/ABC117/b/main.py | d60a1bb7a504f56ae3ca6140c9cb43c9ca6653d3 | [
"CC0-1.0"
] | permissive | mpses/AtCoder | 9023e44885dc67c4131762281193c24b69d3b6da | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | refs/heads/master | 2023-03-23T17:00:11.646508 | 2021-03-20T12:21:19 | 2021-03-20T12:21:19 | 287,489,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | #!/usr/bin/env python3
_ = input()
*l, m = sorted(map(int, input().split()))
print("Yes" if sum(l) > m else "No") | [
"nsorangepv@gmail.com"
] | nsorangepv@gmail.com |
b42085623bfb37fdf6726a4fdc713eb4af185cb2 | 8dd73da1aefe1a1cee86f65c04b75a800de6d817 | /czy_liczba_jest_pierwsza.py | a42c5a06ee20b39ea3bdb130637a30f5043e8c0d | [] | no_license | pawel-turowski/pawelturowski | 5268e537e5a120778f2689d5d74c20fe71fed9db | 409fea327083ea18ec85d8b41684439c850adcd7 | refs/heads/master | 2020-08-03T09:01:41.804711 | 2020-04-05T16:44:20 | 2020-04-05T16:44:20 | 211,693,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | from math import *
def liczba_pierwsza1(n):
pom = int(sqrt(n))
for i in range(2, pom + 1):
if n % i == 0:
return False
return True
def liczba_pierwsza2(n):
pom = int(sqrt(n))
i = 2
while i <= pom:
if n % i == 0:
return False
i += 1
return True
print(liczba_pierwsza1(61))
print(liczba_pierwsza2(105))
| [
"pawel1546@gmail.com"
] | pawel1546@gmail.com |
2ce9d1fda2c36f6d8142ac08861968fab7388a2e | 2688202f9f40f916f90c92dbeac423523c2ebad2 | /library/catalog/forms.py | 9c74a6d55869d3260eb4b57a4a81d79a3afc6714 | [] | no_license | bykoviu/Final | 45f6417c5dae055c632298a34c9be595aee7478a | 5b7f0fa54012770fefe8afe1731dbac219606516 | refs/heads/main | 2023-01-14T05:46:00.057861 | 2020-11-25T11:51:33 | 2020-11-25T11:51:33 | 315,565,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,459 | py | from .models import Comment
from django.forms import ModelForm, TextInput, CharField, PasswordInput, ValidationError
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.models import User
class CommentForm(ModelForm):
class Meta:
model = Comment
fields = ['title', 'comm']
widgets = {
'title': TextInput(attrs={
'class': 'form-control',
'placeholder': 'Enter your name'
}),
'comm': TextInput(attrs={
'class': 'form-control',
'placeholder': 'Add your comment'
})
}
class AuthUserForm(AuthenticationForm, ModelForm):
password = CharField(widget=PasswordInput)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].label = 'Enter your nickname'
self.fields['password'].label = 'Enter your password'
def clean(self):
username = self.cleaned_data['username']
password = self.cleaned_data['password']
if not User.objects.filter(username=username).exists():
raise ValidationError(f'Пользователь {username} не зарегистрирован.')
user = User.objects.filter(username=username).first()
if user:
if not user.check_password(password):
raise ValidationError('Не верный пароль')
return self.cleaned_data
class Meta:
model = User
fields = ['username', 'password']
class RegisterUserForm(ModelForm):
password = CharField(label='Password', widget=PasswordInput)
password2 = CharField(label='Repeat password', widget=PasswordInput)
class Meta:
model = User
fields = ('username', 'password', 'email')
def clean_password2(self):
cd = self.cleaned_data
if cd['password'] != cd['password2']:
raise ValidationError('Passwords don\'t match.')
return cd['password2']
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# for field in self.fields:
# self.fields[field].widget.attrs['class'] = 'form-control'
# def save(self, commit=True):
# user = super().save(commit=False)
# user.set_password(self.cleaned_data['password'])
# if commit:
# user.save()
# return user | [
"71722112+bykoviu@users.noreply.github.com"
] | 71722112+bykoviu@users.noreply.github.com |
0a95b3c9b7871e8e52bb94277a1aea59e8f86d74 | aea41ddc786d3291bdd25a42a1a3a0e3339e7af3 | /src/myastro/log.py | daa6a2d0b99f6dc8785e0490c7035ea39e2f39f9 | [
"Apache-2.0"
] | permissive | benitocm/practical-astronomy | 1b842c099684852e6f62aab74b4faa608d5e64b0 | 4bfea9d5b2bb49997f35e8c7b1ada2708ee6c978 | refs/heads/main | 2023-09-01T06:22:47.173613 | 2021-10-12T17:58:17 | 2021-10-12T17:58:17 | 416,433,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | import logging
import sys
from logging.handlers import TimedRotatingFileHandler
#https://www.toptal.com/python/in-depth-python-logging
FORMATTER = logging.Formatter("%(asctime)s — %(name)s — %(levelname)s — %(funcName)s:%(lineno)d — %(message)s")
LOG_FILE = "my_app.log"
def get_console_handler():
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(FORMATTER)
return console_handler
def get_file_handler():
file_handler = TimedRotatingFileHandler(LOG_FILE, when='midnight')
file_handler.setFormatter(FORMATTER)
return file_handler
def get_logger(logger_name):
logger = logging.getLogger(logger_name)
#logger.setLevel(logging.INFO)
logger.setLevel(logging.WARNING)
#logger.setLevel(logging.INFO)
#logger.addHandler(get_console_handler())
logger.addHandler(get_file_handler())
# with this pattern, it's rarely necessary to propagate the error up to parent
logger.propagate = False
return logger
if __name__ == "__main__":
logger = get_logger(__name__)
logger.error("Test")
| [
"benitocm@gmail.com"
] | benitocm@gmail.com |
e3bf79405992f5ccd885cf735c0396700ac2fc5e | 7c434c37d56ff4c640f8d42b1ff0c0dc1732445e | /pelicanconf.py | 83f1a9a4673408446f522fccd20434b644b51491 | [] | no_license | sambuc/42 | 732926a64ecb9c273d56777ad3c97130a9a1d849 | e8d1aeac0e14d47fe0044a3f1856870b48ea78b1 | refs/heads/master | 2021-01-01T06:27:04.484549 | 2014-09-02T20:30:25 | 2014-09-02T20:30:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,174 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
DELETE_OUTPUT_DIRECTORY = False
RELATIVE_URLS = True
#GOOGLE_ANALYTICS = 'UA-51482621-1'
SITEURL = 'http://localhost:8000'
# End of Dev-specific
AUTHOR = u'Lionel Sambuc'
SITENAME = u'42 – Random Thoughts on Programming, OS and Everything Else.'
SITESUBTITLE = u'Random Thoughts on Programming, OS and Everything Else.'
THEME = '../pelican-themes/pelican-bootstrap3'
# Light theme
BOOTSTRAP_THEME = 'spacelab'
CUSTOM_CSS = 'theme/css/custom-spacelab.css'
# Dark theme
BOOTSTRAP_THEME = 'cyborg'
CUSTOM_CSS = 'theme/css/custom-cyborg.css'
#SITELOGO = 'images/MegaTokyo.png'
#SITELOGO_SIZE = 50
#FAVICON = 'images/favicon.png'
#CC_LICENSE = 'CC-BY-NC-SA'
# Facebook stuff
USE_OPEN_GRAPH = False
#OPEN_GRAPH_IMAGE = <relative to static image path>
COLORBOX_THEME = 'dark'
COLORBOX_PARAMS = 'transition:"none", width:"75%", height:"75%"'
COLORBOX_PARAMS = 'transition:"elastic"'
# GitHub
#GITHUB_USER = 'sambuc'
#GITHUB_REPO_COUNT =
#GITHUB_SKIP_FORK = True
#GITHUB_SHOW_USER_LINK = True
DEFAULT_PAGINATION = 10
GALLERY_IMG_PER_ROW = 3
#RELATED_POSTS_MAX = 5
#EXIF_INFO_DEFAULT = False
BOOTSTRAP_NAVBAR_INVERSE = True
DISPLAY_BREADCRUMBS = True
DISPLAY_CATEGORY_IN_BREADCRUMBS = True
DISPLAY_CATEGORIES_ON_SIDEBAR = False
DISPLAY_TAGS_ON_SIDEBAR = True
DISPLAY_RECENT_POSTS_ON_SIDEBAR = True
#RECENT_POSTS_COUNT = 5
DISPLAY_CATEGORIES_ON_MENU = True
DISPLAY_PAGES_ON_MENU = True
# code blocks with line numbers
PYGMENTS_RST_OPTIONS = {'linenos': 'table'}
#MD_EXTENSIONS = (['codehilite(css_class=highlight)', 'extra'])
# Plugins used
PLUGIN_PATH = '../pelican-plugins'
PLUGINS = ['related_posts', 'gallery', 'exif_info']
# Main Settings
TIMEZONE = 'Europe/Amsterdam'
DEFAULT_LANG = u'en'
LOCALE = ('en_US')
# Extra Items in the top menu
#MENUITEMS = (
# ('HOME', 'http://www.minix3.org'),
# )
# Blogroll
LINKS = (
('MINIX 3', 'http://www.minix3.org'),
('Ohloh', 'https://www.ohloh.net/accounts/sambuc'),
)
# Social widget
SOCIAL = (
('facebook', 'https://www.facebook.com/lionel.sambuc'),
('linkedin', 'https://www.linkedin.com/in/lionelsambuc'),
('google+', 'https://plus.google.com/113198308632164585389/posts'),
('github', 'http://github.com/sambuc'),
('RSS', 'feeds/rss.xml'),
('Atom', 'feeds/atom.xml'),
)
# path-specific metadata
EXTRA_PATH_METADATA = {
'theme-extra/css/colorbox.css': {'path': 'theme/css/colorbox.css'},
'theme-extra/css/colorbox.dark.css': {'path': 'theme/css/colorbox.dark.css'},
'theme-extra/css/colorbox.light.css': {'path': 'theme/css/colorbox.light.css'},
'theme-extra/css/custom-cyborg.css': {'path': 'theme/css/custom-cyborg.css'},
'theme-extra/css/custom-spacelab.css': {'path': 'theme/css/custom-spacelab.css'},
'theme-extra/css/images/controls.png': {'path': 'theme/css/images/controls.png'},
'theme-extra/css/images/loading.gif': {'path': 'theme/css/images/loading.gif'},
'theme-extra/js/i18n/jquery.colorbox-ar.js': {'path': 'theme/js/i18n/jquery.colorbox-ar.js'},
'theme-extra/js/i18n/jquery.colorbox-bg.js': {'path': 'theme/js/i18n/jquery.colorbox-bg.js'},
'theme-extra/js/i18n/jquery.colorbox-ca.js': {'path': 'theme/js/i18n/jquery.colorbox-ca.js'},
'theme-extra/js/i18n/jquery.colorbox-cs.js': {'path': 'theme/js/i18n/jquery.colorbox-cs.js'},
'theme-extra/js/i18n/jquery.colorbox-da.js': {'path': 'theme/js/i18n/jquery.colorbox-da.js'},
'theme-extra/js/i18n/jquery.colorbox-de.js': {'path': 'theme/js/i18n/jquery.colorbox-de.js'},
'theme-extra/js/i18n/jquery.colorbox-es.js': {'path': 'theme/js/i18n/jquery.colorbox-es.js'},
'theme-extra/js/i18n/jquery.colorbox-et.js': {'path': 'theme/js/i18n/jquery.colorbox-et.js'},
'theme-extra/js/i18n/jquery.colorbox-fa.js': {'path': 'theme/js/i18n/jquery.colorbox-fa.js'},
'theme-extra/js/i18n/jquery.colorbox-fi.js': {'path': 'theme/js/i18n/jquery.colorbox-fi.js'},
'theme-extra/js/i18n/jquery.colorbox-fr.js': {'path': 'theme/js/i18n/jquery.colorbox-fr.js'},
'theme-extra/js/i18n/jquery.colorbox-gl.js': {'path': 'theme/js/i18n/jquery.colorbox-gl.js'},
'theme-extra/js/i18n/jquery.colorbox-gr.js': {'path': 'theme/js/i18n/jquery.colorbox-gr.js'},
'theme-extra/js/i18n/jquery.colorbox-he.js': {'path': 'theme/js/i18n/jquery.colorbox-he.js'},
'theme-extra/js/i18n/jquery.colorbox-hr.js': {'path': 'theme/js/i18n/jquery.colorbox-hr.js'},
'theme-extra/js/i18n/jquery.colorbox-hu.js': {'path': 'theme/js/i18n/jquery.colorbox-hu.js'},
'theme-extra/js/i18n/jquery.colorbox-id.js': {'path': 'theme/js/i18n/jquery.colorbox-id.js'},
'theme-extra/js/i18n/jquery.colorbox-it.js': {'path': 'theme/js/i18n/jquery.colorbox-it.js'},
'theme-extra/js/i18n/jquery.colorbox-ja.js': {'path': 'theme/js/i18n/jquery.colorbox-ja.js'},
'theme-extra/js/i18n/jquery.colorbox-kr.js': {'path': 'theme/js/i18n/jquery.colorbox-kr.js'},
'theme-extra/js/i18n/jquery.colorbox-lt.js': {'path': 'theme/js/i18n/jquery.colorbox-lt.js'},
'theme-extra/js/i18n/jquery.colorbox-lv.js': {'path': 'theme/js/i18n/jquery.colorbox-lv.js'},
'theme-extra/js/i18n/jquery.colorbox-my.js': {'path': 'theme/js/i18n/jquery.colorbox-my.js'},
'theme-extra/js/i18n/jquery.colorbox-nl.js': {'path': 'theme/js/i18n/jquery.colorbox-nl.js'},
'theme-extra/js/i18n/jquery.colorbox-no.js': {'path': 'theme/js/i18n/jquery.colorbox-no.js'},
'theme-extra/js/i18n/jquery.colorbox-pl.js': {'path': 'theme/js/i18n/jquery.colorbox-pl.js'},
'theme-extra/js/i18n/jquery.colorbox-pt-br.js': {'path': 'theme/js/i18n/jquery.colorbox-pt-br.js'},
'theme-extra/js/i18n/jquery.colorbox-ro.js': {'path': 'theme/js/i18n/jquery.colorbox-ro.js'},
'theme-extra/js/i18n/jquery.colorbox-ru.js': {'path': 'theme/js/i18n/jquery.colorbox-ru.js'},
'theme-extra/js/i18n/jquery.colorbox-si.js': {'path': 'theme/js/i18n/jquery.colorbox-si.js'},
'theme-extra/js/i18n/jquery.colorbox-sk.js': {'path': 'theme/js/i18n/jquery.colorbox-sk.js'},
'theme-extra/js/i18n/jquery.colorbox-sr.js': {'path': 'theme/js/i18n/jquery.colorbox-sr.js'},
'theme-extra/js/i18n/jquery.colorbox-sv.js': {'path': 'theme/js/i18n/jquery.colorbox-sv.js'},
'theme-extra/js/i18n/jquery.colorbox-tr.js': {'path': 'theme/js/i18n/jquery.colorbox-tr.js'},
'theme-extra/js/i18n/jquery.colorbox-uk.js': {'path': 'theme/js/i18n/jquery.colorbox-uk.js'},
'theme-extra/js/i18n/jquery.colorbox-zh-CN.js': {'path': 'theme/js/i18n/jquery.colorbox-zh-CN.js'},
'theme-extra/js/i18n/jquery.colorbox-zh-TW.js': {'path': 'theme/js/i18n/jquery.colorbox-zh-TW.js'},
'theme-extra/js/jquery.colorbox-min.js': {'path': 'theme/js/jquery.colorbox-min.js'},
}
# static paths will be copied without parsing their contents
STATIC_PATHS = [
'images',
'theme-extra/css',
'theme-extra/js'
]
# Feed generation is usually not desired when developing
FEED_DOMAIN = SITEURL
FEED_ATOM = 'feeds/atom.xml'
FEED_ALL_ATOM = 'feeds/atom/all.xml'
CATEGORY_FEED_ATOM = 'feeds/atom/cat/%s.xml'
TAG_FEED_ATOM = 'feeds/atom/tag/%s.xml'
FEED_RSS = 'feeds/rss.xml'
FEED_ALL_RSS = 'feeds/rss/all.xml'
CATEGORY_FEED_RSS = 'feeds/rss/cat/%s.xml'
TAG_FEED_RSS = 'feeds/rss/tag/%s.xml'
# How files are saved and accessed from the web.
ARTICLE_URL = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}/'
ARTICLE_SAVE_AS = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'
DAY_ARCHIVE_SAVE_AS = 'posts/{date:%Y}/{date:%m}/{date:%d}/index.html'
MONTH_ARCHIVE_SAVE_AS = 'posts/{date:%Y}/{date:%m}/index.html'
YEAR_ARCHIVE_SAVE_AS = 'posts/{date:%Y}/index.html'
ARCHIVES_URL = 'posts/'
ARCHIVES_SAVE_AS = 'posts/index.html'
AUTHOR_URL = 'author/{slug}/'
AUTHOR_SAVE_AS = 'author/{slug}/index.html'
AUTHORS_URL = 'author/'
AUTHORS_SAVE_AS = 'author/index.html'
CATEGORY_URL = 'category/{slug}/'
CATEGORY_SAVE_AS = 'category/{slug}/index.html'
CATEGORIES_URL = 'category/'
CATEGORIES_SAVE_AS = 'category/index.html'
TAG_URL = 'tag/{slug}/'
TAG_SAVE_AS = 'tag/{slug}/index.html'
TAGS_URL = 'tag/'
TAGS_SAVE_AS = 'tag/index.html' | [
"lionel.sambuc@gmail.com"
] | lionel.sambuc@gmail.com |
931832eb7a9a34f6a8107ee403c52344f9ebbd67 | be06876f460373dafe57d41dfaa225a512074572 | /167_Two Sum II - Input array is sorted/167.py | 66e418c07375485691b6bf0877b50f5bce5c1511 | [] | no_license | Uthergogogo/LeetCode | d5f909f3c21db81461e7ff205c4b685d595cbb2c | 35bd9250b8b672937acdc9b812a8ac519210949f | refs/heads/master | 2023-03-19T22:46:04.284681 | 2021-03-17T21:31:28 | 2021-03-17T21:31:28 | 294,833,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | # One-pass Hash Table
def twoSum(numbers, target):
save = {}
for index, elem in enumerate(numbers):
need = target - elem
if need in save:
return [save[need], index + 1]
save[elem] = index + 1
# two pointers
def twoSum2(numbers, target):
left, right = 0, len(numbers)-1
while left < right:
if numbers[left] + numbers[right] == target:
return [left+1, right+1]
elif numbers[left] + numbers[right] < target:
left += 1
else:
right -= 1
print(twoSum2([1, 3, 4, 5, 6], 11))
| [
"48381328+Uthergogogo@users.noreply.github.com"
] | 48381328+Uthergogogo@users.noreply.github.com |
132aceeae2d71d56670267f03dfd282b37b63a65 | be2035ce315b8d1ead9559764375bb2de928c5d6 | /RA.py | 4be6cfd72506b58ef506eefebc37cdb6768817a4 | [] | no_license | sumeetsachdev/Mutual-Exclusion-Algorithms-Simulation | ebdafa78fcf3b4970bbcc1f2f78ae5f090689ea0 | bd60aaa68ea3ca5dfee63efedfad6821667f4e2c | refs/heads/master | 2020-05-09T13:27:28.535962 | 2019-04-13T10:18:49 | 2019-04-13T10:18:49 | 181,152,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,169 | py | #Ricart-Agrawala
from random import choice
from random import random
import time
class Process:
def __init__(self, name, ts,wants_to_go, is_inside=False):
self.name = name
self.ts = ts
self.wants_to_go = wants_to_go
self.is_inside = is_inside
def __lt__(self,Process):
return self.ts < Process.ts
def print_process(self):
res = "Process " + str(self.name)
temp = [" does not want to go", " wants to go, ", " is not inside critical section", " is inside critical section"]
if self.wants_to_go and self.is_inside:
res += temp[1] + temp[3]
elif self.wants_to_go == False:
res += temp[0]
else:
res += temp[1] + temp[3]
res += " and has timestamp " + str(self.ts)
return res
def __str__(self):
res = str(self.name)
return res
def remove_process_index(queue,process):
c = 0
for i in queue:
if i.name == process.name:
return c
c += 1
choices = [True,False]
p = list()
for i in range(8):
p.append(Process(i,random(),choice(choices)))
p[1].wants_to_go, p[1].is_inside = True, False
print("Processes\t\tWants to go\tInside CS\t\tTimestamp")
for process in p:
print(str(process.name) + "\t\t" + str(process.wants_to_go) + "\t\t " + str(process.is_inside) + "\t\t" + str(process.ts))
p_true = [i for i in p if i.wants_to_go]
cs = [p[1]]
print("Currently in CS: ", cs[-1].name)
queue = []
p_true.pop(remove_process_index(p_true,cs[-1]))
while p_true:
for i in p:
if i not in cs and i.wants_to_go == True:
queue.append(i)
queue = sorted(queue)
queue_names = [i.name for i in queue]
print("Queued Processes: ", queue_names)
print("Next to go to CS: ", min(queue))
time.sleep(1)
print("Process " + str(cs[-1]) + " has come out of CS")
cs[-1].wants_to_go = False
cs = [min(queue)]
queue = list()
p_true.pop(remove_process_index(p_true,cs[-1]))
print("Currently in CS: ",cs[-1])
print("\n")
| [
"noreply@github.com"
] | noreply@github.com |
c4a7483285447f23d952a6eeec8c31795bc30414 | 46aba0929e6061b818ba5205b61ebf73678b6cc7 | /pure_upload.py | d476321ace86a7c1d41f9d420963276359946305 | [] | no_license | edison1105/douyin2bilibili | 7e6af5ab959722cc3bf3e354ee2fda70e7446498 | 35dac5182354cc6bb55c6e0201a902f94e88789d | refs/heads/master | 2020-04-23T03:06:23.419733 | 2019-01-21T12:58:50 | 2019-01-21T12:58:50 | 170,868,058 | 1 | 1 | null | 2019-02-15T13:19:57 | 2019-02-15T13:19:57 | null | UTF-8 | Python | false | false | 3,425 | py |
import requests, shutil, time, os, re
import http.cookiejar
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
profile_dir=r"C:\Users\Administrator\AppData\Local\Google\Chrome\User Data" # 对应你的chrome的用户数据存放路径
chrome_options=webdriver.ChromeOptions()
chrome_options.add_argument("user-data-dir="+os.path.abspath(profile_dir))
sel=webdriver.Chrome(chrome_options=chrome_options)
time.sleep(1)
sel.get("https://publish.caasdata.com/homeIndex/program_upload_index")
time.sleep(1)
elem_select_account = sel.find_element_by_class_name("account_sel")
elem_select_account.click()
time.sleep(1)
elem_select_account_microvideo = sel.find_element_by_xpath("//li[text()='抖音快手短视频']")
elem_select_account_microvideo.click()
time.sleep(1)
sel.find_element_by_id("accounts49").click()
sel.find_element_by_id("accounts189").click()
sel.find_element_by_id("accounts188").click()
sel.find_element_by_id("accounts185").click()
sel.find_element_by_id("accounts190").click()
sel.find_element_by_id("accounts94").click()
#sel.find_element_by_id("accounts180").click()#有的时候一些元素会灰掉,无法选择,记得注释掉,比如这个百度号
sel.find_element_by_id("accounts58").click()
sel.find_element_by_id("accounts51").click()
sel.find_element_by_id("accounts76").click()
sel.find_element_by_id("accounts77").click()
sel.find_element_by_id("accounts178").click()
flp = open("视频标题.txt")
title_text = flp.read()
title_str = re.findall(r'标题1(.*)',title_text)[0]
elem_input_microvideotitle = sel.find_element_by_xpath('//input[@placeholder="请输入节目名称(注:在美拍、秒拍等UGC平台中,该名称不显示)"]')
elem_input_microvideotitle.send_keys(title_str)
flpv = open("视频基本信息.txt")#有标签和简介,到时候还要自动填充时间上去,不过时间就不写入这个文本了
video_tag = flpv.readline()
video_intro = flpv.readline()
#print(video_tag,video_intro)
elem_input_microvideotag = sel.find_element_by_id('input_center')
elem_input_microvideotag.send_keys(video_tag)
timemark = time.strftime('[%Y.%m.%d]\n',time.localtime(time.time()))
elem_input_microvideoinform = sel.find_element_by_xpath('//textarea[@placeholder="请输入简介"]')
elem_input_microvideoinform.send_keys(timemark+video_intro)
time.sleep(1)
sel.find_element_by_id("uploadBtn").click()
os.system(r'C:\\python-spider-master\douyin\upload.exe')
time.sleep(1)
timemark2 = time.strftime('%Y-%m-%d-%H-%M-%S',time.localtime(time.time()))
shutil.move(os.path.join('output.mp4'),os.path.join('video_temp',timemark2+'.mp4'))#执行将视频移动到video_temp的操作
target = sel.find_element_by_class_name("unifine_label")
sel.execute_script("arguments[0].scrollIntoView();", target)
time.sleep(1)
sel.find_element_by_xpath('//label[@class="unifine_label"]').click()
os.system(r'C:\\python-spider-master\douyin\upload_bg.exe')
time.sleep(1)
sel.find_element_by_xpath('//span[@id="postfiles"]').click()
time.sleep(50)
while True:
try:
time.sleep(5)
sel.find_element_by_xpath('//div[@class="unified_catalogue catalogue_form_config"]/div/div[@class="issue_box"]/button[@class="issue_btn"]').click()
except:
break
flp.close()
flpv.close()
| [
"noreply@github.com"
] | noreply@github.com |
bec987b46ec463a48ccfb01582519267edeb81fd | 8cb210f5a7b9a46dcdd1c4f4cdebb9b006e16d2f | /scripts/gridengine/paramsearch/runScript.py | 8f25665b32d3f6f045302200aca7832f8ad4e096 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | drwiner/Py3Dial | 0ed4572c3d110907a27a8a8f3167299db0de1919 | 0aa5b68f4548bb15e9d167165c17306fd267ee4f | refs/heads/master | 2020-03-26T19:13:53.511897 | 2018-08-18T21:45:32 | 2018-08-18T21:45:32 | 145,254,529 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 23,102 | py | import os
import sys
import argparse
""" DETAILS:
# THIS FILE EXPLORES GP/REGR/FF/LSTM MODELS
-- Try varying AT LEAST the following network parameters:
a) network structures: n_hideen, L1, L2, acitivation
b) learning rate, decay, and regularisation
"""
################################################
### repository path
################################################
repository_path = os.path.abspath(os.path.join(os.getcwd(),'../../../'))
def config_text(domains, root, seed,
screen_level,
maxturns,
belieftype, useconfreq, policytype, startwithhello, inpolicyfile, outpolicyfile, learning,
maxiter, gamma, learning_rate, tau, replay_type, minibatch_size, capacity,
exploration_type, epsilon_start, epsilon_end, n_in, features, max_k, \
learning_algorithm, architecture, h1_size, h2_size,
kernel,
random, scale,
usenewgoalscenarios,
nbestsize,
patience,
penaliseallturns,
wrongvenuepenalty,
notmentionedvaluepenalty,
sampledialogueprobs,
save_step,
confscorer,
oldstylepatience,
forcenullpositive,
file_level,
maxinformslots,
informmask,
informcountaccepted,
requestmask, confusionmodel, byemask,
n_samples, alpha_divergence, alpha, sigma_eps, sigma_prior,
stddev_var_mu, stddev_var_logsigma, mean_log_sigma,
nbestgeneratormodel,
delta, beta, is_threshold, train_iters_per_episode, training_frequency,
no_head, keep_prob, dropout_start,
old_style_parameter_sampling):
text = '[GENERAL]' + '\n'
text += 'domains = ' + domains + '\n'
text += 'singledomain = True' + '\n'
text += 'root = ' + root + '\n'
text += 'seed = ' + seed + '\n'
text += '\n'
text += '[conditional]' + '\n'
text += 'conditionalsimuser = True\n'
text += 'conditionalbeliefs = True\n'
text += '\n'
text += '[agent]' + '\n'
text += 'maxturns = ' + maxturns + '\n'
text += '\n'
text += '[logging]' + '\n'
text += 'screen_level = ' + screen_level + '\n'
text += 'file_level = ' + file_level + '\n'
text += '\n'
text += '[simulate]' + '\n'
text += 'mindomainsperdialog = 1\n'
text += 'maxdomainsperdialog = 1\n'
text += 'forcenullpositive = ' + forcenullpositive + '\n'
text += '\n'
text += '[policy]' + '\n'
text += 'maxinformslots = ' + maxinformslots + '\n'
text += 'informmask = ' + informmask + '\n'
text += 'informcountaccepted = ' + informcountaccepted + '\n'
text += 'requestmask = ' + requestmask + '\n'
text += 'byemask = ' + byemask + '\n'
text += '\n'
text += '[policy_' + domains + ']' + '\n'
text += 'belieftype = ' + belieftype + '\n'
text += 'useconfreq = ' + useconfreq + '\n'
text += 'policytype = ' + policytype + '\n'
text += 'startwithhello = ' + startwithhello + '\n'
text += 'inpolicyfile = ' + inpolicyfile + '\n'
text += 'outpolicyfile = ' + outpolicyfile + '\n'
text += 'learning = ' + learning + '\n'
text += 'save_step = ' + save_step + '\n'
text += '\n'
text += '[dqnpolicy_' + domains + ']' + '\n'
text += 'maxiter = ' + maxiter + '\n'
text += 'gamma = ' + gamma + '\n'
text += 'learning_rate = ' + learning_rate + '\n'
text += 'tau = ' + tau + '\n'
text += 'replay_type = ' + replay_type + '\n'
text += 'minibatch_size = ' + minibatch_size + '\n'
text += 'capacity = ' + capacity + '\n'
text += 'exploration_type = ' + exploration_type + '\n'
text += 'epsilon_start = ' + epsilon_start + '\n'
text += 'epsilon_end = ' + epsilon_end + '\n'
text += 'n_in = ' + n_in + '\n'
text += 'features = ' + features + '\n'
text += 'max_k = ' + max_k + '\n'
text += 'learning_algorithm = ' + learning_algorithm + '\n'
text += 'architecture = ' + architecture + '\n'
text += 'h1_size = ' + h1_size + '\n'
text += 'h2_size = ' + h2_size + '\n'
text += 'training_frequency = ' + training_frequency + '\n'
# Bayesian parameters
text += 'n_samples = ' + n_samples + '\n'
text += 'stddev_var_mu = ' + stddev_var_mu + '\n'
text += 'stddev_var_logsigma = ' + stddev_var_logsigma + '\n'
text += 'mean_log_sigma = ' + mean_log_sigma + '\n'
text += 'sigma_prior = ' + sigma_prior + '\n'
text += 'alpha =' + alpha + '\n'
text += 'alpha_divergence =' + alpha_divergence + '\n'
text += 'sigma_eps = ' + sigma_eps + '\n'
text += 'no_head = ' + no_head + '\n'
text += 'keep_prob = ' + keep_prob + '\n'
text += 'dropout_start = ' + dropout_start + '\n'
text += '\n'
# ACER
text += 'delta = ' + delta + '\n'
text += 'beta = ' + beta + '\n'
text += 'is_threshold = ' + is_threshold + '\n'
text += 'train_iters_per_episode = ' + train_iters_per_episode + '\n'
text += '\n'
text += '[gppolicy_' + domains + ']' + '\n'
text += 'kernel = ' + kernel + '\n'
text += '\n'
text += '[gpsarsa_' + domains + ']' + '\n'
text += 'random = ' + random + '\n'
text += 'scale = ' + scale + '\n'
text += '\n'
text += '[usermodel]' + '\n'
text += 'usenewgoalscenarios = ' + usenewgoalscenarios + '\n'
text += 'sampledialogueprobs = ' + sampledialogueprobs + '\n'
text += 'oldstylepatience = ' + oldstylepatience + '\n'
text += 'oldstylesampling = ' + old_style_parameter_sampling + '\n'
text += '\n'
text += '[errormodel]' + '\n'
text += 'nbestsize = ' + nbestsize + '\n'
text += 'confusionmodel = ' + confusionmodel + '\n'
text += 'nbestgeneratormodel = ' + nbestgeneratormodel + '\n'
text += 'confscorer = ' + confscorer + '\n'
text += '\n'
text += '[goalgenerator]' + '\n'
text += 'patience = ' + patience + '\n'
text += '\n'
text += '[eval]' + '\n'
text += 'rewardvenuerecommended = 0' + '\n'
text += 'penaliseallturns = ' + penaliseallturns + '\n'
text += 'wrongvenuepenalty = ' + wrongvenuepenalty + '\n'
text += 'notmentionedvaluepenalty = ' + notmentionedvaluepenalty + '\n'
text += '\n'
text += '[eval_' + domains + ']' + '\n'
text += 'successmeasure = objective' + '\n'
text += 'successreward = 20' + '\n'
text += '\n'
return text
def run_on_grid(targetDir, step, iter_in_step, test_iter_in_step, parallel, execDir, configName, text, mode,
error):
################################################
### config file
config = repository_path + configName + '.cfg'
# if directory not exist, then creat one
config_dir = repository_path + 'configures/'
if not os.path.exists(config_dir):
os.makedirs(config_dir)
with open(config, 'w') as f:
f.write(text)
runStr = 'running ' + config
print '{0:*^60}'.format(runStr)
# command = 'python run_grid_pyGPtraining_rpg.py ' + targetDir + ' 3 10000 1 ' + execDir + ' 15 1 ' + config
if mode == ('train', 'grid'):
command = 'python run_grid_pyGPtraining_rpg.py ' + targetDir + ' ' + step + ' ' + \
iter_in_step + ' ' + parallel + ' ' + execDir + ' ' + error + ' 1 ' + config
elif mode == ('test', 'grid'):
command = 'python run_grid_pyGPtraining_rpg_test.py ' + targetDir + ' TEST ' + step + ' ' + \
test_iter_in_step + ' ' + parallel + ' ' + execDir + ' ' + error + ' 1 ' + config
elif mode == ('train', 'own'):
command = 'python run_own_pyGPtraining_rpg.py ' + targetDir + ' ' + step + ' ' + \
iter_in_step + ' ' + parallel + ' ' + execDir + ' ' + error + ' 1 ' + config
elif mode == ('test', 'own'):
command = 'python run_own_pyGPtraining_rpg_test.py ' + targetDir + ' TEST ' + step + ' ' + \
test_iter_in_step + ' ' + parallel + ' ' + execDir + ' ' + error + ' 1 ' + config
print command
os.system(command)
def main(argv):
step = '10'
iter_in_step = '100'
test_iter_in_step = '100'
save_step = '100'
parallel = '1'
maxiter = str(int(step) * int(iter_in_step))
################################################
### Domain information
################################################
domains = 'CamRestaurants' # SF restaurants
if len(argv) > 4:
repository_path = argv[4]
root = repository_path
seed = argv[3]
screen_level = 'warning'
file_level = 'warning'
maxturns = '25'
################################################
### General policy information
################################################
belieftype = 'focus'
useconfreq = 'False'
policytype_vary = ['bdqn']#dropout', 'concrete', 'bootstrapped'] #'dqn', 'bbqn', 'bdqn'] # 'dropout', 'concrete'
startwithhello = 'False'
inpolicyfile = 'policyFile'
outpolicyfile = 'policyFile'
learning = 'True'
maxinformslots = '5' # Maximum number of slot values that are presented in the inform summary action
informmask = 'True' # Decides if the mask over inform type actions is used or not (having the mask active speeds up learning)
informcountaccepted = '4' # number of accepted slots needed to unmask the inform_byconstraints action
requestmask = 'True' # Decides if the mask over inform type actions is used or not
byemask = 'True'
################################################
### DNN architecture options
################################################
gamma = '0.99' # discount factor
learning_rate = '0.001' # learning rate
tau_vary = ['0.02'] # target policy network update frequency 0.02 is equal to update policy after 50 epochs
replay_type_vary = ['vanilla'] # ['vanilla'] experience replay
minibatch_size_vary = ['64'] # how many turns are in the batch
capacity_vary = ['1000'] # how many turns/dialogues are in ER
exploration_type_vary = ['e-greedy'] # 'e-greedy', 'Boltzman'
epsilon_s_e_vary = [('0.9', '0.0')] # , ('0.3', '0.0')]#, ('0.5', '0.1')]
training_frequency = '2' # how often train the model, episode_count % frequency == 0
features = '["discourseAct", "method", "requested", "full", "lastActionInformNone", "offerHappened", "inform_info"]'
max_k = '5'
learning_algorithm = 'dqn'
architecture = 'vanilla'
h1_size = ['130']#, '200', '300']
h2_size = ['50']#, '75', '100']
################################################
### Bayesian estimation parameters
################################################
n_samples = '1' # number of samples for action choice
alpha_divergence = 'False' # use alpha divergence?
alpha = '0.85'
sigma_eps = '0.01' # variance size for sampling epsilon
sigma_prior = '1.5' # prior for variance in KL term
stddev_var_mu = '0.01' # stdv for weights
stddev_var_logsigma = '0.01' # stdv of variance for variance
mean_log_sigma = '0.000001' # prior mean for variance
no_head = '3' # number of heads used for
keep_prob = '0.9' # dropout level
dropout_start = '0.2' # concrete dropout level
################################################
### ACER parameters
################################################
beta = '0.95'
delta = '1.0'
is_threshold = '5.0'
train_iters_per_episode = '1'
################################################
### User model and environment model info.
################################################
usenewgoalscenarios = 'True'
sampledialogueprobs = 'True'
old_style_parameter_sampling = 'True' # for bdqn True
confusionmodel = 'RandomConfusions'
confscorer = 'additive' # 'additive'
nbestgeneratormodel = 'SampledNBestGenerator'
nbestsize = '3'
patience = '3'
penaliseallturns = 'True'
wrongvenuepenalty = '0'
notmentionedvaluepenalty = '0'
oldstylepatience = 'True'
forcenullpositive = 'False'
runError_vary = ['0']
if domains is 'CamRestaurants':
n_in = '268'
elif domains is 'CamHotels':
n_in = '111'
elif domains is 'SFRestaurants':
n_in = '636'
elif domains is 'SFHotels':
n_in = '438'
elif domains is 'Laptops11':
n_in = '257'
elif domains is 'TV':
n_in = '188'
elif domains is 'Booking':
n_in = '188'
################################################
### GP policy training options
################################################
kernel = 'polysort'
random = 'False'
scale = '3'
ConfigCounter = 0
listFile = open(argv[0], 'w')
runMode = ('train', 'grid')
if argv[1] not in ('train', 'test') or argv[2] not in ('grid', 'own'):
print '\n!!!!! WRONG COMMAND !!!!!\n'
print 'EXAMPLE: python runScript.py list [train|test] [grid|own]\n'
exit(1)
elif argv[1] == 'train':
if argv[2] == 'grid':
runMode = ('train', 'grid')
elif argv[2] == 'own':
runMode = ('train', 'own')
elif argv[1] == 'test':
if argv[2] == 'grid':
runMode = ('test', 'grid')
elif argv[2] == 'own':
runMode = ('test', 'own')
listOutput = '{0: <6}'.format('PARAM') + '\t'
listOutput += '{0: <10}'.format('type') + '\t'
listOutput += '{0: <10}'.format('actor_lr') + '\t'
listOutput += '{0: <10}'.format('critic_lr') + '\t'
listOutput += '{0: <10}'.format('replaytype') + '\t'
listOutput += '{0: <10}'.format('nMini') + '\t'
listOutput += '{0: <10}'.format('capacity') + '\t'
listOutput += '{0: <10}'.format('runError') + '\t'
listFile.write(listOutput + '\n')
for policytype in policytype_vary:
for tau in tau_vary:
for replay_type in replay_type_vary:
for minibatch_size in minibatch_size_vary:
for exploration_type in exploration_type_vary:
for capacity in capacity_vary:
for epsilon_s_e in epsilon_s_e_vary:
epsilon_start, epsilon_end = epsilon_s_e
for h1 in h1_size:
for h2 in h2_size:
for runError in runError_vary:
execDir = repository_path
if policytype == 'gp':
targetDir = 'CamRestaurants_gp_'
elif policytype == 'dqn' or policytype == 'dqn_vanilla':
targetDir = 'CamRestaurants_dqn_'
elif policytype == 'a2c':
targetDir = 'CamRestaurants_a2c_'
elif policytype == 'enac':
targetDir = 'CamRestaurants_enac_'
elif policytype == 'bdqn':
targetDir = 'CamRestaurants_bdqn_'
elif policytype == 'bbqn':
targetDir = 'CamRestaurants_bbqn_'
elif policytype == 'concrete':
targetDir = 'CamRestaurants_concrete_'
elif policytype == 'bootstrapped':
targetDir = 'CamRestaurants_bootstrapped_'
elif policytype == 'dropout':
targetDir = 'CamRestaurants_dropout_'
elif policytype == 'acer':
targetDir = 'CamRestaurants_acer_'
elif policytype == 'a2cis':
targetDir = 'CamRestaurants_a2cis_'
elif policytype == 'tracer':
targetDir = 'CamRestaurants_tracer_'
listOutput = '{0: <10}'.format(targetDir) + '\t'
listOutput += '{0: <10}'.format(policytype) + '\t'
listOutput += '{0: <10}'.format(learning_rate) + '\t'
listOutput += '{0: <10}'.format(replay_type) + '\t'
listOutput += '{0: <10}'.format(minibatch_size) + '\t'
listOutput += '{0: <10}'.format(capacity) + '\t'
listOutput += '{0: <10}'.format(runError) + '\t'
targetDir += 'learning_rate' + learning_rate + '_replay_type' + replay_type + \
'_minibatch_size' + minibatch_size + '_capacity' + capacity + '_runError' + runError
text = config_text(domains, root, seed,
screen_level,
maxturns,
belieftype, useconfreq, policytype, startwithhello,
inpolicyfile, outpolicyfile, learning,
maxiter, gamma, learning_rate, tau, replay_type,
minibatch_size, capacity,
exploration_type, epsilon_start, epsilon_end, n_in,
features, max_k, learning_algorithm, architecture, h1,
h2,
kernel,
random, scale,
usenewgoalscenarios,
nbestsize,
patience,
penaliseallturns,
wrongvenuepenalty,
notmentionedvaluepenalty,
sampledialogueprobs,
save_step,
confscorer,
oldstylepatience,
forcenullpositive,
file_level,
maxinformslots, informmask,informcountaccepted,requestmask, confusionmodel, byemask,
n_samples, alpha_divergence, alpha, sigma_eps, sigma_prior,
stddev_var_mu, stddev_var_logsigma, mean_log_sigma,
nbestgeneratormodel,
delta, beta, is_threshold, train_iters_per_episode, training_frequency,
no_head, keep_prob, dropout_start,
old_style_parameter_sampling)
# run_on_grid(targetDir, execDir, configName, text)
tmpName = 'gRun' + str(ConfigCounter)
run_on_grid(tmpName, step, iter_in_step, test_iter_in_step, parallel, execDir, tmpName, text,
runMode, runError)
listFile.write(tmpName + '\t' + listOutput + '\n')
ConfigCounter += 1
if __name__ == "__main__":
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description='DeepRL parameter search')
parser.add_argument('-s', '--seed', help='set the random seed', required=False, type=str, default="123")
parser.add_argument('-tn', '--train', help='script is set to train policies (default)', action='store_true')
parser.add_argument('-tt', '--test', help='script is set to test/evaluate policies', action='store_true')
parser.add_argument('--own', help='run on local machine (default)', action='store_true')
parser.add_argument('--grid', help='run on grid', action='store_true')
parser.add_argument('-f', '--file', help='the list file', required=False, type=str, default='list')
parser.add_argument('-p', '--pydial', help='the path to pydial', required=False, type=str, default='../../../')
if len(argv) > 0 and not argv[0][0] == '-':
if len(sys.argv) != 5:
parser.print_help()
# print '\n!!!!! WRONG COMMAND !!!!!\n'
# print 'EXAMPLE: python runScript.py list [train|test] [grid|own]\n'
exit(1)
# main(argv)
else:
# parser = argparse.ArgumentParser(description='DeepRL parameter search')
# parser.add_argument('-s', '--seed', help='set the random seed', required=False, type=str, default="123")
# parser.add_argument('-tn', '--train', help='script is set to train policies (default)', action='store_true')
# parser.add_argument('-tt', '--test', help='script is set to test/evaluate policies', action='store_true')
# parser.add_argument('--own', help='run on local machine (default)', action='store_true')
# parser.add_argument('--grid', help='run on grid', action='store_true')
# parser.add_argument('-f', '--file', help='the list file', required=False, type=str, default='list')
# parser.add_argument('-p', '--pydial', help='the path to pydial', required=False, type=str, default='../../../')
args = parser.parse_args()
own = not args.grid
grid = not args.own and args.grid
if own == grid:
pass # issue error with parameter help
train = not args.test
test = not args.train and args.test
if train == test:
pass # issue error with parameter help
pydialpath = os.path.abspath(os.path.join(os.getcwd(),args.pydial))
argv = [args.file, 'test' if test else 'train', 'grid' if grid else 'own', args.seed, pydialpath]
# print argv
main(argv)
# END OF FILE
| [
"drwiner131@gmail.com"
] | drwiner131@gmail.com |
18c49d5a40295776c09bfc78af948a4058b35bf1 | 30aa7375dd22c230fd7f92fe0d0098f1015d910c | /banks/admin.py | 1aff1a7460b917a79eb5b49360ab07d6de829b9a | [] | no_license | malep2007/bank_app | 4e413f058f44706eab6b42218c36fc609c5542f9 | f7192359e4daecbcce18b4f33cb096d28e446c0f | refs/heads/master | 2021-08-08T14:21:34.735130 | 2017-11-08T13:57:32 | 2017-11-08T13:57:32 | 109,974,307 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | from django.contrib import admin
from . models import Bank
class BankAdmin(admin.ModelAdmin):
list_display = ('bank_name','branch location')
admin.site.register(Bank, BankAdmin)
# Register your models here.
| [
"malep2007@gmail.com"
] | malep2007@gmail.com |
6cb3ca413f3d1a5caea21acc9581a07855acf673 | 6d2ce82c05835dbd45254b9062a8f2ba2e3fc1bf | /doglovetest/wsgi.py | 075fd30a92c2fa3d577686d0580d03b4fb5bfd30 | [] | no_license | devfest-ufrn/DogLove | 3bbd83b07832912a5eefc0ddfbbef8b34f8f0b11 | a95923a9240dd654cbd6b8dfebf30c9812871156 | refs/heads/master | 2021-08-18T21:30:59.995001 | 2017-11-23T23:28:14 | 2017-11-23T23:28:14 | 104,280,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for doglovetest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "doglovetest.settings")
application = get_wsgi_application()
| [
"tiago.ccdbarros@bct.ect.ufrn.br"
] | tiago.ccdbarros@bct.ect.ufrn.br |
f5091359c432f41a979c5f7ea58580f16ea94418 | 3ea8f462bb08176a1352784434ac6c78e88ded5d | /cartloop_assignment/app/models.py | 0416e724754c3d69b6ba59a7fab2dcdfc589bf61 | [] | no_license | eyadnawar/Backend-Chat-App | a1451879095b15b43b2d3eb46258540e7c46a0e5 | b89822a13e3016e67dc7307d59f71e81ad8c94e4 | refs/heads/master | 2023-06-19T17:57:54.055926 | 2021-07-12T12:43:37 | 2021-07-12T12:43:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,139 | py | from django.db import models
# Create your models here.
class Client(models.Model):
client_id = models.IntegerField(primary_key= True, db_index= True)
username = models.CharField(max_length=20)
location = models.CharField(max_length=20)
class Operator(models.Model):
operator_id = models.IntegerField(primary_key= True, db_index= True)
operator_name = models.CharField(max_length=20)
operator_group = models.CharField(max_length=20)
store_id = models.IntegerField()
class Conversation(models.Model):
conversation_id = models.IntegerField(primary_key= True, db_index= True)
store_id = models.IntegerField()
operator_id = models.IntegerField()
client_id = models.IntegerField()
operator_group = models.CharField(max_length=20)
class Chat(models.Model):
chat_id = models.IntegerField(primary_key= True, unique= True, db_index= True)
conversation_id = models.IntegerField()
payload = models.CharField(max_length=300)
client_id = models.IntegerField()
operator_id = models.IntegerField()
utc_date = models.DateTimeField()
status = models.CharField(max_length=4)
| [
"efarouknawar@gmail.com"
] | efarouknawar@gmail.com |
c44ddf404c981875be599dd9c76309c34519cb56 | 93358388ca52322b92c835d410decf444d3e717f | /sim/lab4_mcore/MemNetPRTL.py | fec637cb077d639f5da9aa2205b8d7abb9f37526 | [] | no_license | 2php/Multi-stage_Pipeline_Multi-core_CPU | 097e359f423abbc50e6717718a1598fec388fd70 | 26bed2fe4ae9a9d580fe7e16383c128d814dd1eb | refs/heads/master | 2021-04-05T21:01:23.745589 | 2018-02-07T18:01:25 | 2018-02-07T18:01:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,744 | py | #=========================================================================
# MemNetPRTL.py
#=========================================================================
#
# num_reqers (n): num of requester ports, {1,2,..,n}
# num_reqees (m): num of requestee ports, {1,2,..,m}
#
# reqer_req: the memory request port of the requesters -- inport
# reqer_resp: the memory response port of the requesters -- outport
#
# reqee_req: the memory request port of the requestees -- outport
# reqee_resp: the memory response port of the requestees -- inport
#
# Because the request net receives requests from requesters and forward
# them to the requestees, the request port from requester is an inport
# for this module, and the request port to requestee is an outport.
# The response network is the other way around.
#
# +-----------------------------------+
# | MemNet Module |
# | +---------+ |
# | | | |
# | +-----+ | | |
# reqer_req [ 0 ]--|M>| |-N>| req | +-----+ |
# | | U | | |-N>| |-M|->reqee_req [ 0 ]
# reqer_resp[ 0 ]<-|M-| [0] |<N-| | | D | |
# | +-----+ | and |<N-| [0] |<M|<-reqee_resp[ 0 ]
# | +-----+ | | +-----+ |
# reqer_req [ 1 ]--|M>| |-N>| | |
# | | U | | resp | |
# reqer_resp[ 1 ]<-|M-| [1] |<N-| | |
# | +-----+ | | ..... |
# | | network | |
# ...... | ..... | | |
# | | in | +-----+ |
# | +-----+ | the |-N>| |-M|->reqee_req [m-1]
# reqer_req [n-1]--|M>| |-N>| same | | D | |
# | | U | | box |<N-| [0] |<M|<-reqee_resp[m-1]
# reqer_resp[n-1]<-|M-|[n-1]|<N-| | +-----+ |
# | +-----+ | | |
# | | | |
# | +---------+ |
# | M - memory, N - network |
# +-----------------------------------+
#
# This is the architecture of a general memory network. In MemNet, there
# are 4 requesters and 1 requestee (memory port). However, we will let the
# network have 4 ports but only use one to unify the network message
# format.
# Requesters are renamed to cachereqs since the requesters are basically
# caches. The requestee is the memory port.
from pymtl import *
from pclib.ifcs import InValRdyBundle, OutValRdyBundle
from pclib.ifcs import MemMsg, MemReqMsg, MemRespMsg
from lab4_network import BusNetRTL #, RingNetRTL
from MsgAdapters import UpstreamMsgAdapter as UpsAdapter
from MsgAdapters import DownstreamMsgAdapter as DownsAdapter
class MemNetPRTL( Model ):
def __init__( s ):
# Parameters
num_reqers = 4 # 4 data caches
num_reqees = 1 # 1 memory port
num_ports = max( num_reqers, num_reqees ) # We still have 4 ports
nopaque_nbits = 8
mopaque_nbits = 8
addr_nbits = 32
data_nbits = 128 # MemNet deals with 128 bit refill requests
#---------------------------------------------------------------------
# Interface
#---------------------------------------------------------------------
s.memifc = MemMsg( mopaque_nbits, addr_nbits, data_nbits )
s.mainmemifc = MemMsg( mopaque_nbits, addr_nbits, data_nbits )
s.memreq = InValRdyBundle [num_ports]( s.memifc.req )
s.memresp = OutValRdyBundle[num_ports]( s.memifc.resp )
s.mainmemreq = OutValRdyBundle[num_ports]( s.mainmemifc.req )
s.mainmemresp = InValRdyBundle [num_ports]( s.mainmemifc.resp )
#---------------------------------------------------------------------
# Components
#---------------------------------------------------------------------
single_reqee = True # 1 memory port so single reqee
single_reqer = False # 4 caches so not single reqer
s.u_adpt = UpsAdapter[num_ports]( single_reqee,
mopaque_nbits, addr_nbits, data_nbits, # mem msg parameter
nopaque_nbits, num_ports ) # net msg parameter
# One can also use RingNetRTL
s.reqnet = BusNetRTL( s.memifc.req.nbits )
s.respnet = BusNetRTL( s.memifc.resp.nbits )
s.d_adpt = DownsAdapter[num_ports]( single_reqer,
mopaque_nbits, addr_nbits, data_nbits, # mem msg parameter
nopaque_nbits, num_ports ) # net msg parameter
#---------------------------------------------------------------------
# Connections
#---------------------------------------------------------------------
for i in xrange( num_ports ):
s.connect( s.u_adpt[i].src_id, i )
s.connect( s.memreq[i], s.u_adpt[i].memreq )
s.connect( s.u_adpt[i].netreq, s.reqnet.in_[i] )
s.connect( s.memresp[i], s.u_adpt[i].memresp )
s.connect( s.u_adpt[i].netresp, s.respnet.out[i] )
for i in xrange( num_ports ):
s.connect( s.d_adpt[i].src_id, i )
s.connect( s.reqnet.out[i], s.d_adpt[i].netreq )
s.connect( s.d_adpt[i].memreq, s.mainmemreq[i] )
s.connect( s.respnet.in_[i], s.d_adpt[i].netresp )
s.connect( s.d_adpt[i].memresp, s.mainmemresp[i] )
def line_trace( s ):
return s.reqnet.line_trace() + " >>> "+s.respnet.line_trace()
| [
"catching@bu.edu"
] | catching@bu.edu |
20960a8e28e6193bfa60d2c8cb8a62ad7f805264 | 3afb558d8565033a7cae9fb4864c1c8f52b390cc | /code_oldboy/模块/configparse模块/configparse模块.py | ee226f8c9feca4d01c2ed02263dd22c8a765ef5e | [] | no_license | L-IG/PythonProject | d3fdc099f8e0a3102022ff75e9e2eed369faf30b | 943baf33e95273ac37346a8581af3387070bf838 | refs/heads/master | 2020-09-21T22:43:33.461044 | 2020-01-05T11:07:27 | 2020-01-05T11:07:27 | 224,958,671 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,896 | py | '''
作者:lg
日期:2019/11/29
文件描述:
缺陷:
'''
import configparser
# 生成文件
# config = configparser.ConfigParser()
#
# config["DEFAULT"] = {'ServerAliveInterval': '45',
# 'Compression': 'yes',
# 'CompressionLevel': '9',
# 'ForwardX11': 'yes'
# }
#
# config['bitbucket.org'] = {'User': 'hg'}
#
# config['topsecret.server.com'] = {'Host Port': '50022', 'ForwardX11': 'no'}
#
# with open('example.ini', 'w') as configfile:
# config.write(configfile)
# 查找文件
# config = configparser.ConfigParser()
# print(config.sections())
#
# config.read('example.ini')
# print(config.sections())
# # ['bitbucket.org', 'topsecret.server.com']
#
# print('bytebong.com' in config)
# print('bitbucket.org' in config)
#
# print(config['bitbucket.org']['user'])
# print(config['DEFAULT']['Compression'])
# print(config['topsecret.server.com']['ForwardX11'])
#
# print(config['bitbucket.org'])
# # <Section: bitbucket.org> 返回一个对象
#
# for key in config['bitbucket.org']:
# print(key)
#
# print(config.options('bitbucket.org'))
# # ['user', 'serveraliveinterval', 'compression', 'compressionlevel', 'forwardx11']
# # 同for循环,找到'bitbucket.org'下所有键
#
# print(config.items('bitbucket.org'))
# # [('serveraliveinterval', '45'), ('compression', 'yes'), ('compressionlevel', '9'), ('forwardx11', 'yes'), ('user', 'hg')]
#
# print(config.get('bitbucket.org','compression'))
# 增删改操作
config = configparser.ConfigParser()
config.read('example.ini')
config.add_section('yuan')
config.remove_section('bitbucket.org')
config.remove_option('topsecret.server.com', "forwardx11")
config.set('topsecret.server.com', 'k1', '1111111')
config.set('yuan', 'k2', '22222')
# 此时所有内容都在内存里,必须写到文件才会保存下来
config.write(open('new2.ini', "w"))
| [
"2990875927@qq.com"
] | 2990875927@qq.com |
8dd6db002b7cfee421083e2f1a14012671d69f19 | 3941f6b431ccb00ab75f19c52e40e5dad2e98b9b | /Dasymetric/dasym_tables.py | 41bc20d47b5ee4b9da2c2f6b66632d0c1d6ba20e | [
"Apache-2.0"
] | permissive | scw/global-threats-model | 70c375c1633e8578f1e41f278b443f1501ceb0ec | 11caa662373c5dbfbb08bb0947f3dd5eedc0b4e0 | refs/heads/master | 2016-09-05T11:25:13.056352 | 2013-08-22T22:10:13 | 2013-08-22T22:10:13 | 3,566,652 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,033 | py | # ---------------------------------------------------------------------------
# dasym_tables.py
# Created on: Wed Jan 11 2006
# Written by: Matthew Perry
# Usage: See the "script arguments" section
# ---------------------------------------------------------------------------
#================================================================#
# Prepare Environment
# Import system modules
import sys, string, os, win32com.client
# Create the Geoprocessor object
gp = win32com.client.Dispatch("esriGeoprocessing.GpDispatch.1")
# Set the necessary product code
gp.SetProduct("ArcInfo")
# Check out any necessary licenses
gp.CheckOutExtension("spatial")
# Load required toolboxes...
gp.AddToolbox("C:/Program Files/ArcGIS/ArcToolbox/Toolboxes/Spatial Analyst Tools.tbx")
gp.AddToolbox("C:/Program Files/ArcGIS/ArcToolbox/Toolboxes/Conversion Tools.tbx")
gp.AddToolbox("C:/Program Files/ArcGIS/ArcToolbox/Toolboxes/Data Management Tools.tbx")
#----------------------------------------#
# Script Arguments
Temp_Workspace = "C:\\WorkSpace\\temp"
try:
#INPUTS
Spatial_Units_Raster = sys.argv[1] # raster containing country code
Attribute_Lookup_Table = sys.argv[2] # dbf containing countries and all attributes of interest
Attribute_Lookupt_Table_Join_Item = sys.argv[3] # country code
Attribute_Lookup_Table_Value_Item = sys.argv[4] # the variable of interest
Aux_Raster = sys.argv[5] # landcover
Weighting_Table = sys.argv[6] # Table relating land cover classes to relative weights
Weighting_Table_Join_Field = sys.argv[7] # column with landcover codes
Weighting_Table_Weight_Field = sys.argv[8] # column with relative wieghts
#OUTPUTS
Combined_Raster = sys.argv[9] # output of aml, input to gp script
Combined_Raster_Table = sys.argv[10] # output of aml, input to gp script
Output_Raster = sys.argv[11] # the dasymetric map
except:
#INPUTS
Spatial_Units_Raster = "C:\\WorkSpace\\FAO\\dasym\\units\\units_as"
Attribute_Lookup_Table = "C:\\WorkSpace\\FAO\\dasym\\lookups\\faocia.dbf"
Attribute_Lookupt_Table_Join_Item = "CODE"
Attribute_Lookup_Table_Value_Item = "FERT"
Aux_Raster = "C:\\WorkSpace\\clipped_rusle_inputs\\as_igbp"
Weighting_Table = "C:\\WorkSpace\\FAO\\dasym\\weights\\C.dbf"
Weighting_Table_Join_Field = "LANDCOVER"
Weighting_Table_Weight_Field = "WEIGHT"
#OUTPUTS
Combined_Raster = Temp_Workspace + "\\ctpc"
Combined_Raster_Table = Temp_Workspace + "\\ctpc.dbf"
Output_Raster = "C:\\WorkSpace\\FAO\\dasym\\outputs\\as_fertC"
#--------------------------------#
# Constants
Joined_Output_Table_Name = "combine_weight_join"
Joined_Output_Table = Temp_Workspace + "\\" + Joined_Output_Table_Name + ".dbf"
Combine_Reclass = Temp_Workspace + "\\combine2_rcl"
Temp_Raster = Temp_Workspace + "\\temp_dasy"
Combined_Raster_Table_Variable_Field = "VOI" # Should be constant
#================================================================#
# Main
#---------------------------------#
# Call the AML as the first step
# b/c ArcGIS can't handle raster attribute tables
amlPath = os.path.dirname(sys.argv[0]) + "\\"
sCommandLine = "arc.exe \"&run\" \"" + amlPath + "dasym_combine.aml \" "
sCommandLine += Spatial_Units_Raster + " " + Attribute_Lookup_Table + " "
sCommandLine += Attribute_Lookupt_Table_Join_Item + " " + Attribute_Lookup_Table_Value_Item + " "
sCommandLine += Aux_Raster + " "
sCommandLine += Combined_Raster + " " + Combined_Raster_Table + " " + Temp_Workspace + "'"
os.system(sCommandLine)
# gp.AddMessage(" ****** Combined Layers")
print " ****** Combined Layers"
#------------------------------------------------#
# Determine the column names based on user input
base = os.path.basename(Combined_Raster_Table)
split = base.split(".")
combinedPrefix = split[0]
base = os.path.basename(Weighting_Table)
split = base.split(".")
weightedPrefix = split[0]
base = os.path.basename(Aux_Raster)
split = base.split(".")
auxprefix = split[0]
auxprefix = auxprefix[:10]
Variable_Field = combinedPrefix + "_VOI" # "ctfc_VOI" # Combined_Raster_Table _ VOI
Variable_Field = Variable_Field[:10]
Weight_Field = weightedPrefix + "_" + Weighting_Table_Weight_Field # "TFC_WEIGHT"
Weight_Field = Weight_Field[:10]
Count_Field = combinedPrefix + "_COUNT" # Combined_Raster_Table _ COUNT
Count_Field = Count_Field[:10]
Value_Field = combinedPrefix + "_VALUE" # Combined_Raster_Table _ VALU
Value_Field = Value_Field[:10]
Combined_Raster_Table_Join_Field = auxprefix.upper() # "LANDCOVER2" # Name of aux raster truncated and caps
try:
#------------------------------------------------#
# Join Tables and create new output table
gp.MakeTableView_management(Combined_Raster_Table, "ctable")
gp.AddJoin_management("ctable", Combined_Raster_Table_Join_Field, Weighting_Table, Weighting_Table_Join_Field, "KEEP_ALL")
gp.TableToTable_conversion("ctable", Temp_Workspace, Joined_Output_Table_Name)
print " ****** Created joined table"
#------------------------------------------------#
# Add fields
gp.AddField_management(Joined_Output_Table, "totalpc", "DOUBLE", "", "", "", "", "NON_NULLABLE", "NON_REQUIRED", "")
gp.AddField_management(Joined_Output_Table, "valuepp", "LONG", "", "", "", "", "NON_NULLABLE", "NON_REQUIRED", "")
gp.MakeTableView_management(Joined_Output_Table, "jtable")
print " ****** Added Fields and reloaded table view"
#------------------------------------------------#
# Calculate Total of Variable Per Auxillary Data Class
gp.CalculateField_management("jtable", "totalpc", "[" + Variable_Field + "] * [" + Weight_Field + "]")
# Calculate Value of variable per pixel
gp.CalculateField_management("jtable", "valuepp", "int( [totalpc] * 10000.0 / [" + Count_Field + "]) ")
print " ****** Calculated New Fields"
#------------------------------------------------#
# Reclass by Table...
gp.ReclassByTable_sa(Combined_Raster, "jtable", Value_Field , Value_Field, "valuepp", Temp_Raster , "DATA")
print " ****** Reclassed Raster"
#------------------------------------------------#
# Scale Raster to original units
Map_Algebra_expression = Temp_Raster + " / 10000.0"
gp.SingleOutputMapAlgebra_sa(Map_Algebra_expression, Output_Raster)
print " ****** Scaled raster"
except:
print gp.GetMessages()
sys.exit(1)
| [
"perrygeo@gmail.com"
] | perrygeo@gmail.com |
cda1cec5f75f855ac1366790d5d9dd982190b0bb | 86922bfc81a7790f2d516bec16d4c23a60b52af3 | /demo_email.py | 5de0743a3d8e65d602dc81e259d18d6c9c2859bc | [] | no_license | XGongVentes/email_suggest | 8065d6c555e36edcee74a300b25da5af5445f367 | 95b31dd32e76a4c5c53a1b1fef806e4a58c8c295 | refs/heads/master | 2021-01-22T04:48:53.660476 | 2017-09-03T14:59:11 | 2017-09-03T14:59:11 | 102,101,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,360 | py | from gevent import monkey; monkey.patch_all()
from bottle import route, run, request,template,redirect
import email_utils
import name_parse
import email_suggest
import json
import time
import verifyemail
result = {}
verified = {}
vcodes = []
try:
with open('test_results.jsonr','r') as f:
_tmp = [json.loads(z) for z in f.read().strip().split('\n')]
test_results = {z[0]:[z[1],z[2],k] for k,z in enumerate(_tmp)}
except Exception as err:
print err
test_results = {}
ftest = open('test_results.jsonr','a',0)
def update_stats(results):
if results:
tmp = [1 if z[0]==1 else 0 for z in results.values()]
tmp1 = [z[1] for z in results.values() if z[0]==1]
vnum, accurate, score = len(results), round(sum(tmp)*1.0/len(tmp),3)*100, round(sum(tmp1)*1.0/len(tmp1),2)
else:
vnum, accurate, score = 0, -1, -1
test_stats = {'vnum':vnum, 'accurate':accurate, 'score':score}
return test_stats
def get_verify(email):
r = verifyemail.send_email(email)
nwait = 0
while True:
time.sleep(2)
nwait += 1
r = verifyemail.check_status(email)
try:
_status = r.json()['items'][0]['event']
break
except:
pass
print nwait
if nwait >=10:
_status = 'timeout'
break
print _status
vcode = 1 if _status=='delivered' else 0 if _status=='failed' else -1 if _status=='timeout' else -2
return vcode
@route('/email_suggest', method='get')
def get_start():
global result
global test_results
test_stats = update_stats(test_results)
result.update({'stage':0,'name':'', 'domain':'','verification':{},'test_results':test_results,'test_stats':test_stats})
return template('demo_email.tpl',**result)
@route('/email_suggest', method='post')
def get_suggest():
global result
global test_results
global vcodes
if request.forms.get('suggest'):
name = request.forms.get('name')
domain = request.forms.get('domain')
test_stats = update_stats(test_results)
if name and domain:
nname = name_parse.normalize_name(name)
suggests,total = email_suggest.email_suggest(name,domain)
suggests = [(str(round(z[1],3)*100)+'%',z[2], [zz for zz in z[0]]) for z in suggests]
mm = sum([len(z[2]) for z in suggests])
vcodes = [None]*mm
print vcodes
print suggests
print name
result.update({'stage':1, 'verification':{}, 'suggests':suggests, 'vcodes': vcodes, 'total':total,'domain':domain, 'name':name,
'test_results':test_results,'nname':nname, 'test_stats':test_stats})
return template('demo_email.tpl', **result)
else:
redirect('http://13.76.171.208:8080/email_suggest')
else:
keys = request.forms.keys()
bverify = [z for z in keys if z.startswith('verify_')]
bknown = [z for z in keys if z.startswith('known_')]
bfalse = [z for z in keys if z.startswith('false_')]
print keys, bverify, bknown, bfalse
if bknown or bfalse or bverify:
key = bverify[0] if bverify else bknown[0] if bknown else bfalse[0]
rank, email = key.split('_',1)[1].split('_',1)
rank = int(rank)
if email in test_results:
if bverify:
vcode = test_results[email][0]
else:
vcode = 1 if bknown else 0
else:
vcode = 1 if bknown else 0 if bfalse else get_verify(email)
ftest.write(json.dumps((email, vcode, rank)))
ftest.write('\n')
test_results.update({email:[vcode, rank, len(test_results)+1]})
vcodes[rank-1] = vcode
print vcodes
test_stats = update_stats(test_results)
result.update({'stage':1, 'vcodes':vcodes, 'verification':{'cemail':email, 'vcode':vcode, 'rank':rank}, 'test_stats':test_stats, 'test_results':test_results})
return template('demo_email.tpl',**result)
else:
redirect('http://13.76.171.208:8080/email_suggest')
# return template('demo_email.tpl',**result)
run(host='localhost',port=8080,server='gevent',debug=True)
| [
"xiaofeng@leadbook.com"
] | xiaofeng@leadbook.com |
3538ed16c8ab27eb7a661ab38a1775640bb2d91b | 9ed8791a8eee409aee38f385687575d6edef7d45 | /run.py | de19931efa22aa0f4211df77fa2ea8397dc777f4 | [] | no_license | Alberto-Vivar/app_store_sales_check | 0fba2c865f7510656dd88d962183675f8133c02a | 5b4acfef44430b59270269863d0fd38e48a89c97 | refs/heads/master | 2023-02-19T18:59:33.057531 | 2022-12-16T10:01:21 | 2022-12-16T10:01:21 | 228,256,291 | 0 | 0 | null | 2023-02-08T00:59:47 | 2019-12-15T21:35:31 | Python | UTF-8 | Python | false | false | 933 | py | import sys
from appstoreconnect_library import request_maker
import datetime
KEY_IDENTIFIER = 'YOUR_KEY_IDENTIFIER'
ISSUER = 'YOUR_ISSUER_IDENTIFIER'
VENDOR_NUMBER = 'YOUR_APP_VENDOR_NUMBER'
# The input arguments of this script are:
# 1.- The date to request the daily report.
if __name__ == '__main__':
if len(sys.argv) == 1:
sys.exit('The program should be called with one parameter, the date. This should be formatted as "YYYY-MM-DD".')
else:
first_parameter = sys.argv[1]
try:
_ = datetime.datetime.fromisoformat(first_parameter)
except ValueError as v:
sys.exit('The date is not properly entered: {}.'.format(v))
print(
request_maker.pull_sales_report(
key_identifier=KEY_IDENTIFIER,
issuer=ISSUER,
vendor_number=VENDOR_NUMBER,
requested_date=sys.argv[1]
)
)
| [
"Alberto-Vivar@users.noreply.github.com"
] | Alberto-Vivar@users.noreply.github.com |
5d95d043ddb3d7c2d7618dcf77409373afffc1ec | 0eca9211fc805b6a00eec2caba429e7b06bf094e | /bcsproject/mainPage/filters.py | 0e5d89fabeeef7133f9114d570eeac9139210bc1 | [] | no_license | hungryhost/bcs_test_app | ff4ee7bb10cfdcf84f15ed708c346b5234f02602 | 3dec97288a1fe82145eba18578c1dc499959f86e | refs/heads/main | 2023-06-23T06:30:38.348132 | 2021-07-15T12:56:03 | 2021-07-15T12:56:03 | 386,146,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | import django_filters
from .models import Block
from django import forms
class RequestFilter(django_filters.FilterSet):
iso_timestamp = django_filters.DateTimeFilter('iso_timestamp__date')
class Meta:
model = Block
fields = [
'iso_timestamp'
]
| [
"yuiborodin@miem.hse.ru"
] | yuiborodin@miem.hse.ru |
c77e6da35556ae2a00c17b13e382f4aa3e3b0ce3 | a8bf45a661641abf7391b3827b7e220d43c44a4c | /SP-Hashing/WINNER_OF_THE_ELECTION.py | 8fd5784db17c6d3978055438134003f3866df054 | [] | no_license | taurus05/gfg | 45b1c534b26713697a3fbe87de8a799b2ace1c96 | 235e320a050b765cb95636dad15ec655e83dd468 | refs/heads/master | 2020-03-27T07:52:38.578713 | 2018-10-11T18:53:43 | 2018-10-11T18:53:43 | 146,200,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | from collections import defaultdict
t = int(input())
for i in range(t):
n = int(input())
d = defaultdict(int)
s = list(input().split())
for i in s:
d[i] += 1
s = sorted(d.items(),key= lambda x : (-x[1],x[0]))
print(*s[0])
| [
"vaibhavrocks0501@gmail.com"
] | vaibhavrocks0501@gmail.com |
0ad73be05ea4a42a3b2118023282236427d3145d | 6a95112805b64322953429270a305d01fef3faea | /dist/weewx-4.3.0/examples/stats.py | 86a1e5c5b193afe5fb375e4eef30098d3dbc84b2 | [
"GPL-1.0-or-later",
"GPL-3.0-only",
"Apache-2.0"
] | permissive | tomdotorg/docker-weewx | c6d59dc492a9e53f3bc898f7b9f593717092d72c | 7085654f455d39b06acc688738fde27e1f78ad1e | refs/heads/main | 2023-06-08T17:57:44.184399 | 2023-01-30T11:21:23 | 2023-01-30T11:21:23 | 54,113,384 | 21 | 16 | Apache-2.0 | 2022-10-19T23:46:26 | 2016-03-17T11:39:29 | Dockerfile | UTF-8 | Python | false | false | 4,052 | py | # Copyright (c) 2009-2015 Tom Keffer <tkeffer@gmail.com>
# See the file LICENSE.txt for your rights.
"""Example of how to extend the search list used by the Cheetah generator.
*******************************************************************************
This search list extension offers two extra tags:
'alltime': All time statistics.
For example, "what is the all time high temperature?"
'seven_day': Statistics for the last seven days.
That is, since midnight seven days ago.
*******************************************************************************
To use this search list extension:
1) Copy this file to the user directory. See https://bit.ly/33YHsqX for where your user
directory is located.
2) Modify the option search_list in the skin.conf configuration file, adding
the name of this extension. When you're done, it will look something like
this:
[CheetahGenerator]
search_list_extensions = user.stats.MyStats
You can then use tags such as $alltime.outTemp.max for the all-time max
temperature, or $seven_day.rain.sum for the total rainfall in the last
seven days.
*******************************************************************************
"""
import datetime
import time
from weewx.cheetahgenerator import SearchList
from weewx.tags import TimespanBinder
from weeutil.weeutil import TimeSpan
class MyStats(SearchList): # 1
def __init__(self, generator): # 2
SearchList.__init__(self, generator)
def get_extension_list(self, timespan, db_lookup): # 3
"""Returns a search list extension with two additions.
Parameters:
timespan: An instance of weeutil.weeutil.TimeSpan. This will
hold the start and stop times of the domain of
valid times.
db_lookup: This is a function that, given a data binding
as its only parameter, will return a database manager
object.
"""
# First, create TimespanBinder object for all time. This one is easy
# because the object timespan already holds all valid times to be
# used in the report.
all_stats = TimespanBinder(timespan,
db_lookup,
context='year',
formatter=self.generator.formatter,
converter=self.generator.converter,
skin_dict=self.generator.skin_dict) # 4
# Now get a TimespanBinder object for the last seven days. This one we
# will have to calculate. First, calculate the time at midnight, seven
# days ago. The variable week_dt will be an instance of datetime.date.
week_dt = datetime.date.fromtimestamp(timespan.stop) \
- datetime.timedelta(weeks=1) # 5
# Convert it to unix epoch time:
week_ts = time.mktime(week_dt.timetuple()) # 6
# Form a TimespanBinder object, using the time span we just
# calculated:
seven_day_stats = TimespanBinder(TimeSpan(week_ts, timespan.stop),
db_lookup,
context='week',
formatter=self.generator.formatter,
converter=self.generator.converter,
skin_dict=self.generator.skin_dict) # 7
# Now create a small dictionary with keys 'alltime' and 'seven_day':
search_list_extension = {'alltime' : all_stats,
'seven_day' : seven_day_stats} # 8
# Finally, return our extension as a list:
return [search_list_extension] # 9
| [
"tom@tom.org"
] | tom@tom.org |
08cd007029079ebf8a6ac3ee9f7c8802a04e7b02 | b4162a7937afb27db824289bfa1174330a58e4a0 | /gal/migrations/0002_auto__add_image.py | 89e6769656c95ce9ec8bf2da3085b777f9d75e9a | [] | no_license | zoranzaric/django-gal | 962164e54e62820e5490f797f3ea905e683ae655 | bcef433a2c38355a06f36c5bf7fbc2f4c73378eb | refs/heads/master | 2021-01-01T15:18:13.706953 | 2013-05-08T15:05:16 | 2013-05-08T15:05:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Image'
db.create_table('gal_image', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('filename', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('gal', ['Image'])
def backwards(self, orm):
# Deleting model 'Image'
db.delete_table('gal_image')
models = {
'gal.image': {
'Meta': {'object_name': 'Image'},
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['gal'] | [
"zz@zoranzaric.de"
] | zz@zoranzaric.de |
720eac545b43b93784ed7f35718f54ede04f9117 | 50eec156a6591ac97597c153049bea7003536441 | /11 - Collections/ranges.py | 7efcbe65ebcb0150ddcebcba8e23de27c7d379bb | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | mfigand/start_with_python | 7b6548ae134b50a9ddefaf595c629934944cb088 | 5dc952417781e980fe61af98fa602acb5f14c991 | refs/heads/master | 2021-01-08T00:18:11.334425 | 2020-02-20T10:53:29 | 2020-02-20T10:53:29 | 241,860,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | names = ['Susan', 'Christopher', 'Bill']
presenters = names[:2] # Get the first two items
# Starting index and number of items to retrieve
print(names)
print(presenters)
| [
"m.figand@gmail.com"
] | m.figand@gmail.com |
d18bba0a105578fc5d84c9a8df9be9a20de78b77 | 7b99f1fd359ad0f6a45ce596ef3c6319d7ba4eb1 | /validation_kmers.py | 8e0ad4d5b3bd8b1191c05eea286b10886baf32ba | [] | no_license | farid7/bioinformatica | 9a5a645372304d106fd283666ec93c64a24692b8 | 1e9ce03795c6913f1321660cf00f2e7ea264e96d | refs/heads/master | 2021-01-20T14:03:16.826483 | 2017-03-02T03:42:01 | 2017-03-02T03:42:01 | 82,733,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,379 | py | from sys import argv
from collections import Counter,defaultdict
import numpy as np
window = 10
misses = 2
chain = list('CACAGTAGGCGCCGGCACACACAGCCCCGGGCCCCGGGCCGCCCCGGGCCGGCGGCCGCCGGCGCCGGCACACCGGCACAGCCGTACCGGCACAGTAGTACCGGCCGGCCGGCACACCGGCACACCGGGTACACACCGGGGCGCACACACAGGCGGGCGCCGGGCCCCGGGCCGTACCGGGCCGCCGGCGGCCCACAGGCGCCGGCACAGTACCGGCACACACAGTAGCCCACACACAGGCGGGCGGTAGCCGGCGCACACACACACAGTAGGCGCACAGCCGCCCACACACACCGGCCGGCCGGCACAGGCGGGCGGGCGCACACACACCGGCACAGTAGTAGGCGGCCGGCGCACAGCC')
#Smoothing de Lindstone
def pr (x, cond, N, l=1):
return (x+l) / (cond + l*N)
def hammingDistance(a, b):
n = len(a)
count = 0
for i in xrange(n):
if(a[i] != b[i]):
count += 1
return count
def base2num(x):
return {
'A': 0,
'T': 1,
'C': 2,
'G': 3
}[x]
bases = {'A':0, 'T':1, 'C':2, 'G':3}
n = len(chain)
n_bases = len(bases)
#Matriz de observaciones
obs = np.zeros(n_bases)
for i in chain:
obs[bases[i]] += 1
nobs = np.zeros(n_bases)
for i in xrange(n_bases):
nobs[i] = pr(obs[i], sum(obs), n_bases)
#matriz de transiciones
tranx = np.zeros((4,4))
chains = Counter(zip(chain,chain[1:]))
for (t,t_ant), c_ws in chains.iteritems():
tranx[bases[t], bases[t_ant]] = c_ws
#normalization
ntranx = np.zeros((4,4))
for i in xrange(4):
aux = sum(tranx[i,:])
cond = n_bases
if (aux != 0):
for j in xrange(4):
ntranx[i,j] = pr(tranx[i,j], aux, cond)
print obs
print nobs
print tranx
print ntranx
########################################################
d = dict()
iteraciones = 5000
contador = 0
bases1 = list('ATCG')
while iteraciones > contador:
prop = ['A']*window
for j in xrange(window):
if j == 0:
aux = np.random.choice(4, p = nobs)
prev = aux
prop[0] = bases1[aux]
else:
aux = np.random.choice(4, p=ntranx[prev,:])
prev = aux
prop[j] = bases1[aux]
#print prop, prop[j]
#counting number of valid k-mers
matches = 0
for i in xrange(n - window+1):
aux = chain[i:i+window]
if hammingDistance(aux, list(prop)) <= misses:
matches += 1
#print prop, aux, hammingDistance(prop, aux)
temp = ''.join(prop)
if temp in d and matches > 0:
d[temp] = matches
elif matches > 0:
d[temp] = matches
contador += 1
#print ''
d_view = [ (v,k) for k,v in d.iteritems() ]
d_view.sort(reverse=False) # natively sort tuples by first element
for v,k in d_view:
print "%s: %d " % (k,v)
| [
"diraf_thechild@hotmail.com"
] | diraf_thechild@hotmail.com |
a14afa187d4f50330213a5ff213bba44714005c5 | b97c4720b667dcdcfbfc114c051bca39c2cf3fe9 | /Algorithm/maximum_subarray.py | 78056c5bf7b49d97b6699d5bbc9bbfc59703fdc0 | [] | no_license | 8luebottle/DataStructure-N-Algorithm | 595b03413720def4827e6012a30af19d20cb3358 | 69b933c36d79b6cc5a0936db70033538f46ec378 | refs/heads/master | 2022-11-17T22:40:33.563344 | 2020-07-11T09:03:26 | 2020-07-11T09:03:26 | 219,773,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | def max_sub_array(nums):
if max(nums) < 0:
return max(nums)
local_max, global_max = 0, 0
for num in nums:
local_max = max(0, local_max + num)
global_max = max(global_max, local_max)
return global_max
| [
"itbiz.irs@gmail.com"
] | itbiz.irs@gmail.com |
5c904ea926f69d8697c460dc95cba06e53b9cb19 | 2dd28b1ab2bd56f55e457c4b7c1f5f96856a795d | /core/urls.py | 46b2a95a22acdbb19ee9f9fadf92b3d6d35223b6 | [] | no_license | Aldo-lima/testeconfirmar | e34b2d37133262d331a755ffe9a6f339fa263c9c | 2da947237f7c5b7724ef8890642fac679b7f8229 | refs/heads/main | 2023-08-17T10:53:32.013554 | 2021-09-16T15:02:24 | 2021-09-16T15:02:24 | 407,159,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | from .views import home
from django.urls import path
urlpatterns = [
path('', home, name='home_core'),
] | [
"57098697+Aldo-lima@users.noreply.github.com"
] | 57098697+Aldo-lima@users.noreply.github.com |
f51558bfe5192cb59b9736a74ce3591e50b861b9 | d8b201ba6bf57db0101d88836429bbcb3a10b857 | /Math/TriangleQuest.py | c1a6cd2a1b0e05a20a09b0582cb16f0e25c80188 | [
"MIT"
] | permissive | MaxCodeXTC/PythonHackerRankSolutions | 32ad41df3fbd33f8651cdc5099c8ec3d37d9bc17 | 987618b61b71fe5e9a40275fb348476657bbea57 | refs/heads/master | 2022-06-28T06:00:19.126751 | 2020-05-07T09:23:37 | 2020-05-07T09:23:37 | 262,471,271 | 1 | 0 | null | 2020-05-09T02:24:11 | 2020-05-09T02:24:10 | null | UTF-8 | Python | false | false | 127 | py | '''
Title : Triangle Quest
Subdomain : Math
Domain : Python
Author : codeperfectplus
Created : 17 January 2020
'''
| [
"54245038+perfect104@users.noreply.github.com"
] | 54245038+perfect104@users.noreply.github.com |
c5494429aada5dcdccc2bbc0b645c615689a2219 | 22b431be09922ad767d5fcd6b0fa1bb9eb2cc4d3 | /split.py | cf219cdfd9174b3787f639a4d1182655d2cef5ba | [] | no_license | gsy/leetcode | b2de916cc5e01eea8b7d026c7a935b4db1164e26 | 3b13a02f9c8273f9794a57b948d2655792707f37 | refs/heads/master | 2021-01-10T10:58:07.561282 | 2020-05-25T12:47:09 | 2020-05-25T12:47:09 | 48,165,088 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,423 | py | # coding: utf-8
def car(s, sep):
"""
get first split.
:param s: string to be split
:param sep: delimiter string
:return: first split string
>>> car("", ',')
''
>>> car("a", ',')
'a'
>>> car("a,b", ',')
'a'
>>> car(",", ',')
''
"""
result = ""
if len(s) == 0:
return ''
while len(s) != 0 and s[0] != sep:
result += s[0]
s = s[1:]
return result
def cdr(s, sep):
"""
get string right after delimiter char.
:param s: string to be split
:param sep: delimiter string
:return:
>>> cdr("a", ',')
''
>>> cdr("a,", ',')
''
>>> cdr("a,b", ',')
'b'
>>> cdr("a,b,", ',')
'b,'
>>> cdr("a,b,c", ',')
'b,c'
"""
if len(s) == 0:
return ""
while len(s) != 0 and s[0] != sep:
s = s[1:]
if len(s) == 0:
return ""
else:
return s[1:]
def split(s, sep):
"""
Return a list of the words in the string, using sep as the delimiter string.
:param s:
:param sep:
:return:
>>> split("a", ',')
['a']
>>> split("a,", ',')
['a']
>>> split("a,,", ',')
['a', '']
>>> split("a,b", ',')
['a', 'b']
"""
result = []
while len(s) != 0:
result.append(car(s, sep))
s = cdr(s, sep)
return result
if __name__ == '__main__':
import doctest
doctest.testmod()
| [
"xuanguang.chen@qunar.com"
] | xuanguang.chen@qunar.com |
3aa00b6c6c2a67cbd2bb189284eaadae9f218fc2 | 9a1865f4a8b7ea956bc3a25f147db6189ee88cb8 | /network/migrations/0002_auto_20210315_1617.py | 89cba4ea39c49a245d90ee54bc56dd6bff4d92c0 | [] | no_license | benomac/network-refresh- | b94a4ac34b864fd95bec7dcaf504e22e1edbc784 | 7e5caa1f3871071f8be7df380dc441399ef478c5 | refs/heads/master | 2023-03-30T19:35:42.997789 | 2021-03-17T20:09:28 | 2021-03-17T20:09:28 | 348,138,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,637 | py | # Generated by Django 3.1.4 on 2021-03-15 16:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('network', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='followed',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='user',
name='followers',
field=models.IntegerField(default=0),
),
migrations.CreateModel(
name='UserPosts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.CharField(max_length=280)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('likes', models.IntegerField(default=0)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='poster', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Following',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('following', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='follows', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"64007619+benomac@users.noreply.github.com"
] | 64007619+benomac@users.noreply.github.com |
1238c6bb3af77479daab7b19dbced09ddd2276ee | ebe36eba494b9ab9bb658686e69c9253c43cb524 | /Competitions/Atcoder/ABC_193/B.py | b314661dcbc62026428ec8940be7c8174c8b6815 | [] | no_license | Dutta-SD/CC_Codes | 4840d938de2adc5a2051a32b48c49390e6ef877f | ec3652ec92d0d371fd3ce200f393f3f69ed27a68 | refs/heads/main | 2023-07-03T11:46:30.868559 | 2021-08-19T12:03:56 | 2021-08-19T12:03:56 | 316,813,717 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 40 | py | N = int(input())
for i in range(N):
| [
"duttasandip11100@gmail.com"
] | duttasandip11100@gmail.com |
ac6bb52507bdfb51c99246ed05a626b6fbf84f40 | 47b28ab257af2153d0054ce142905bd5ab458b58 | /example.py | 0d55bc96fe8b21b1b5e158349506dcefcc028ff7 | [] | no_license | fredhsu/eApiExampleVlans | 042f6f55db499322c18bd335ec50491d025e3410 | 4a488cf0391f32ff3b6dfaf41270269fe665d21e | refs/heads/master | 2016-09-09T23:18:52.179311 | 2014-02-03T23:54:22 | 2014-02-03T23:54:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | import json
from jsonrpclib import Server
switches = ["172.22.28.156", "172.22.28.157", "172.22.28.158"]
username = "admin"
password = "admin"
# Going through all the switch IP addresses listed above
for switch in switches:
urlString = "https://{}:{}@{}/command-api".format(username, password, switch)
switchReq = Server( urlString )
# Display the current vlan list
response = switchReq.runCmds( 1, ["show vlan"] )
print "Switch : " + switch + " VLANs: "
print response[0]["vlans"].keys()
# Add vlan 100 to the switch
print "Adding vlan 100"
response = switchReq.runCmds( 1, ["enable", "configure", "vlan 100"] )
# List the vlans again to show vlan 100 configured
response = switchReq.runCmds( 1, ["show vlan"] )
print "Switch : " + switch + " VLANs: "
print response[0]["vlans"].keys()
print
print "\n*** Done adding vlan to switches ***\n"
# Go through them again to remove the vlan
for switch in switches:
urlString = "https://{}:{}@{}/command-api".format(username, password, switch)
switchReq = Server( urlString )
# Remove vlan 100
print switch + " : removing vlan 100"
response = switchReq.runCmds( 1, ["enable", "configure", "no vlan 100", "end"] )
print response
print "\n*** Script done ***"
| [
"fredlhsu@aristanetworks.com"
] | fredlhsu@aristanetworks.com |
4af2c8b5383925ce88a9cf268ae8035c4c3aaa34 | b2671809f47911c1226d71ab6c315cf58f72377a | /COMP90049 Introduction to Machine Learning/Project/project1/Project1_MuTong_28_04_2019/code/global_edit_distance.py | b23d695cd1545796eb324ddd629da71830845846 | [] | no_license | MooTong123/Unimelb-Subject | 426ebc616e44aef7d3e04c6ac965e0a268b12bb8 | a4185318efe251c9664af46027ba1332c1963906 | refs/heads/master | 2020-11-26T17:50:25.915775 | 2019-12-22T11:05:16 | 2019-12-22T11:05:16 | 229,163,363 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py |
import editdistance
def global_edit_dis():
dict_file = open("../data/dict.txt", 'r')
misspell_file = open("../data/misspell.txt",'r')
result_file = open("../result/global_edit_distance_result.txt",'w')
# to read the dictionary
dict = []
for line in dict_file.readlines():
line = line.strip()
dict.append(line)
dict_file.close()
# analyze the misspell file
for line in misspell_file.readlines():
line = line.strip()
result = []
min_value = 99999
if line in dict:
result.clear()
result.append(line)
else:
for i in range(len(dict)):
distance = editdistance.eval(dict[i],line)
if distance < min_value:
min_value = distance
result.clear()
result.append(dict[i])
elif distance == min_value:
result.append(dict[i])
for i in range(len(result)):
result_file.write(result[i] + " ")
print(result[i] + " ")
result_file.write('\n')
misspell_file.close()
result_file.close()
def main():
global_edit_dis()
if __name__ == "__main__":
main()
| [
"1756389851@qq.com"
] | 1756389851@qq.com |
ee33b62f4ef3e6784c0aa32be6050caa9281e220 | 4b2496796dd8f0c07c58a7768098f757b91b2df8 | /Decision_Tree_scratch.py | e43d5eafb7d2dc8d961f87ffa195e2d6e7b4b9f0 | [] | no_license | shivamkc01/MachineLearning_Algorithm__from_scratch | 39f4207bc1d449ade1a850bf6e0b67286324a76b | e644a81255404e33e16ab5ac2eed210fe28d3c3c | refs/heads/main | 2023-07-09T22:50:17.956743 | 2021-08-11T16:35:05 | 2021-08-11T16:35:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,070 | py | """
Decision Tree implentation from scratch
This code you can use for learning purpose.
programmed by Shivam Chhetry
** 11-08-2021
"""
import numpy as np
from collections import Counter
from sklearn import datasets
from sklearn.model_selection import train_test_split
"""
Calculating Entropy -> Entropy measure of purity in a node.
Range[0,1]
0 - Best purity
1 - worst purity
formula:-
H(s) = -p(+)log(p+) - p(-)log(p(-))
p+ = % of +ve class
p- = % of -ve class
p(X) = #x/n
where, #x is no of occurrences
n is no of total samples
E = - np.sum([p(X).log2(p(X))])
"""
def entropy(y):
hist = np.bincount(y) # this will calculate the number of occurrences of all class labels
ps = hist / len(y)
return -np.sum([p * np.log2(p) for p in ps if p > 0])
"""
Let's create a helper class to store the information for our node.
we want to store
1. The best split feature(feature)
2. The best split threshold
3. The left and the right child trees
4. If we are at a leaf node we also want to store the actual value , the most
common class label
"""
class Node:
def __init__(
self, feature=None, threshold=None, left=None, right=None, *, value=None
):
self.feature = feature
self.threshold = threshold
self.left = left
self.right = right
self.value = value
"""
Now we create a helper function to determine if we are at a leaf
node
"""
def is_leaf_node(self):
return self.value is not None
class DecisionTree:
# applying some stopping criteria to stop growing
# e.g: maximum depth, minimum samples at node, no more class distribution in node
def __init__(self, min_samples_split=2, max_depth=100, n_feats=None):
self.min_samples_split = min_samples_split
self.max_depth = max_depth
self.n_feats = n_feats
self.root = None
def fit(self, X, y):
self.n_feats = X.shape[1] if not self.n_feats else min(self.n_feats, X.shape[1])
self.root = self._grow_tree(X, y)
def _grow_tree(self, X, y, depth=0):
n_samples, n_features = X.shape
n_labels = len(np.unique(y))
# Applying stopping criteria
if (
depth >= self.max_depth
or n_labels == 1
or n_samples < self.min_samples_split
):
leaf_value = self._most_common_label(y)
return Node(value= leaf_value)
# If we didn't need stopping criteria then we select the feature indices
feat_idxs = np.random.choice(n_features, self.n_feats, replace=False)
# greedy search : Loop over all features and over all thresholds(all possible feature values.
best_feat, best_thresh = self._best_criteria(X, y, feat_idxs)
# grow the children that result from the split
left_idxs, right_idxs = self._split(X[:, best_feat], best_thresh)
left = self._grow_tree(X[left_idxs, :], y[left_idxs], depth + 1)
right = self._grow_tree(X[right_idxs, :], y[right_idxs], depth + 1)
return Node(best_feat, best_thresh, left, right)
def _best_criteria(self, X, y, feat_idxs):
best_gain = -1
split_idx, split_thresh = None, None
for feat_idx in feat_idxs:
X_column = X[:, feat_idx]
thresholds = np.unique(X_column)
for threshold in thresholds:
gain = self._information_gain(y, X_column, threshold)
if gain > best_gain:
best_gain = gain
split_idx = feat_idx
split_thresh = threshold
return split_idx, split_thresh
def _information_gain(self, y, X_column, split_thersh):
"""
IG = E(parent) - [weighted average].E(childern)
Example:
S = [0,0,0,0,0,1,1,1,1,1], S1=[0,0,1,1,1,1,1], S2=[0,0,0]
IG = E(S0) -[(7/10)*E(S1)+(3/10)*E(S2)]
IG = 1 - [(7/10)*0.863+(3/10)*0] = 0.395
Note: The higher the information gain that specific way of spliting decision tree will be taken up.
"""
# parent E
parent_entropy = entropy(y)
# generate split
left_idxs, right_idxs = self._split(X_column, split_thersh)
if len(left_idxs) == 0 or len(right_idxs) == 0:
return 0
# weighted avg child E
n = len(y)
n_left_samples, n_right_samples = len(left_idxs), len(right_idxs)
entropy_left, entropy_right = entropy(y[left_idxs]), entropy(y[right_idxs])
child_entropy = (n_left_samples/n) * entropy_left + (n_right_samples/n) * entropy_right
# return IG
ig = parent_entropy - child_entropy
return ig
def _split(self, X_column, split_thersh):
left_idxs = np.argwhere(X_column <= split_thersh).flatten()
right_idxs = np.argwhere(X_column > split_thersh).flatten()
return left_idxs, right_idxs
def predict(self,X):
# traverse tree
return np.array([self._traverse_tree(x, self.root) for x in X])
def _traverse_tree(self, x, node):
if node.is_leaf_node():
return node.value
if x[node.feature] <= node.threshold:
return self._traverse_tree(x, node.left)
return self._traverse_tree(x, node.right)
def _most_common_label(self, y):
# counter will calculate all the no of occurrences of y
counter = Counter(y)
most_common = counter.most_common(1)[0][0] # returns tuples, and we want only value so we again say index 0 [0]
return most_common
if __name__ == '__main__':
data = datasets.load_breast_cancer()
X = data.data
y = data.target
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=1234
)
clf = DecisionTree(max_depth=10)
clf.fit(X_train, y_train)
def accuracy(y_true, y_pred):
acu = np.sum(y_true == y_pred)/len(y_pred)
return acu
y_pred = clf.predict(X_test)
acc = accuracy(y_test, y_pred)
print("Accuracy : ", acc) | [
"noreply@github.com"
] | noreply@github.com |
5aae57fc607a70052c54ad09b04cbd25840d0f28 | 9fc6604ae98e1ae91c490e8201364fdee1b4222a | /eg_msg_base/models/msg_status.py | 4501bdc8b00a217bae754eaa0a5b5c32b395123c | [] | no_license | nabiforks/baytonia | b65e6a7e1c7f52a7243e82f5fbcc62ae4cbe93c4 | 58cb304d105bb7332f0a6ab685015f070988ba56 | refs/heads/main | 2023-03-23T21:02:57.862331 | 2021-01-04T03:40:58 | 2021-01-04T03:40:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | from odoo import models, fields
class MsgStatus(models.Model):
_name = "msg.status"
name = fields.Char(string="Status", readonly=True)
is_last_status = fields.Boolean(string="Is Last Status")
sms_instance_id = fields.Many2one(comodel_name="sms.instance", string="Sms Instance", readonly=True)
| [
"ash@odoxsofthub.com"
] | ash@odoxsofthub.com |
386938533cc7a4fbfb7fee0046c5d0c2e4406d89 | c194aa3baf239edef12747b51053beaaebd25a80 | /src/dashboard/views.py | d7c7cc173ece58e2bc6251f040e1b99d1152fc9b | [] | no_license | cplax14/dailydashboard | e7d37d2b10198175539c7b6f6cea82fe8de5e5f9 | 8766c97f2a54761af367c09a987661966a661bf5 | refs/heads/master | 2021-01-12T14:08:26.548007 | 2016-10-02T20:47:06 | 2016-10-02T20:47:06 | 69,757,833 | 0 | 0 | null | 2016-10-02T20:47:25 | 2016-10-01T19:07:24 | Python | UTF-8 | Python | false | false | 374 | py | from django.contrib import messages
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render, get_object_or_404, redirect
from django.db.models import Q
from .models import Area
def areas_list(request):
queryset = Area.objects.all()
context = {
"object_list":queryset
}
return render(request,"areas_list.html",context) | [
"cplax14@gmail.com"
] | cplax14@gmail.com |
afe69ae31a6285f10b876f9f4c269a0bde8cf181 | 0049d7959ff872e2ddf6ea3ce83b6c26512425a6 | /advtempproject/advtempproject/wsgi.py | 0eb3d019ab04a1ac8d64047a343bd0a726103e5c | [] | no_license | srazor09/Django_projects | 9806ab25d966af780cdabe652a1792220c7806a8 | 8d664ba4c9478bd93c8e5bcbcaf594e8ffe6ce93 | refs/heads/master | 2023-04-18T02:13:15.993393 | 2021-05-04T20:34:05 | 2021-05-04T20:34:05 | 364,379,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for advtempproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'advtempproject.settings')
application = get_wsgi_application()
| [
"sourabhaws09@gmail.com"
] | sourabhaws09@gmail.com |
827a78cc47cd4d1f99e0ace0cf6603aca4abcfc0 | 7a04774e01e3b2065200cd0d57aacc19bbdd4653 | /python_sources/ModelTraining.py | f6d8f1194d72a3dd625fa1013cafb970c9231a4d | [] | no_license | gerykiss86/master-project-ml | 22c7ca7906850f8c5b8da3c93209c011f25c6673 | a1e6698fb89774e5c3e63ea2771c995ca89ccf83 | refs/heads/master | 2020-03-21T23:32:34.789318 | 2019-03-11T00:34:33 | 2019-03-11T00:34:33 | 139,192,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,026 | py | import numpy as np
from keras.layers import Dense, Activation
from keras.models import Sequential
def my_train(week):
def keep_columns(passed_array, last_column_to_keep):
arr = passed_array
label = arr[:, arr.shape[1] - 1:]
arr2 = arr[:, :last_column_to_keep]
arr3 = np.insert(arr2, arr2.shape[1], label.flatten(), axis=1)
return arr3
def load_my_data(path='TrainingData.csv', test_split=0.2, seed=113):
assert 0 <= test_split < 1
my_data = np.genfromtxt(path, delimiter=',', skip_header=1)
my_data = keep_columns(my_data, week)
x = my_data[:, 0:(my_data.shape[1] - 1)]
y = my_data[:, (my_data.shape[1] - 1):my_data.shape[1]]
np.random.seed(seed)
indices = np.arange(len(x))
np.random.shuffle(indices)
x = x[indices]
y = y[indices]
x_train = np.array(x[:int(len(x) * (1 - test_split))])
y_train = np.array(y[:int(len(x) * (1 - test_split))])
y_train = y_train.transpose()
y_train = y_train[0]
x_test = np.array(x[int(len(x) * (1 - test_split)):])
y_test = np.array(y[int(len(x) * (1 - test_split)):])
y_test = y_test.transpose()
y_test = y_test[0]
return (x_train, y_train), (x_test, y_test)
(X_train, Y_train), (X_test, Y_test) = load_my_data()
nFeatures = X_train.shape[1]
model = Sequential()
model.add(Dense(1, input_shape=(nFeatures,), kernel_initializer='uniform'))
model.add(Activation('linear'))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mse', 'mae'])
model.fit(X_train, Y_train, batch_size=4, epochs=500)
model.summary()
model.evaluate(X_test, Y_test, verbose=True)
Y_pred = model.predict(X_test)
print(Y_test[:5])
print(Y_pred[:5, 0])
filename = ".\models\week" + str(week).zfill(2) + ".h5"
model.save(filename)
return 0
for i in range(52, 0, -1):
print("week" + str(i).zfill(2))
my_train(i)
print("done")
| [
"gergo.laszlo.kiss@gmail.com"
] | gergo.laszlo.kiss@gmail.com |
4e59b315ea0eefd8f148888dc07903bba562e531 | 59182ffe28c054d9f33ee9b8885a52fd5944440c | /twilio/rest/wireless/v1/usage_record.py | 91867ba874f25cc0138ed624ec37517e3a4b31c9 | [
"MIT"
] | permissive | NCPlayz/twilio-python | 652b508e086ee7e6658015e74f3bd19572012502 | 08898a4a1a43b636a64c9e98fbb0b6ee1792c687 | refs/heads/master | 2020-08-12T22:24:06.816467 | 2019-10-09T19:25:08 | 2019-10-09T19:25:08 | 214,854,286 | 0 | 0 | MIT | 2019-10-13T16:29:39 | 2019-10-13T16:29:39 | null | UTF-8 | Python | false | false | 9,237 | py | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class UsageRecordList(ListResource):
""" """
def __init__(self, version):
"""
Initialize the UsageRecordList
:param Version version: Version that contains the resource
:returns: twilio.rest.wireless.v1.usage_record.UsageRecordList
:rtype: twilio.rest.wireless.v1.usage_record.UsageRecordList
"""
super(UsageRecordList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/UsageRecords'.format(**self._solution)
def stream(self, end=values.unset, start=values.unset, granularity=values.unset,
limit=None, page_size=None):
"""
Streams UsageRecordInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param datetime end: Only include usage that has occurred on or before this date
:param datetime start: Only include usage that has occurred on or after this date
:param UsageRecordInstance.Granularity granularity: The time-based grouping that results are aggregated by
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.wireless.v1.usage_record.UsageRecordInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(end=end, start=start, granularity=granularity, page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, end=values.unset, start=values.unset, granularity=values.unset,
limit=None, page_size=None):
"""
Lists UsageRecordInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param datetime end: Only include usage that has occurred on or before this date
:param datetime start: Only include usage that has occurred on or after this date
:param UsageRecordInstance.Granularity granularity: The time-based grouping that results are aggregated by
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.wireless.v1.usage_record.UsageRecordInstance]
"""
return list(self.stream(
end=end,
start=start,
granularity=granularity,
limit=limit,
page_size=page_size,
))
def page(self, end=values.unset, start=values.unset, granularity=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of UsageRecordInstance records from the API.
Request is executed immediately
:param datetime end: Only include usage that has occurred on or before this date
:param datetime start: Only include usage that has occurred on or after this date
:param UsageRecordInstance.Granularity granularity: The time-based grouping that results are aggregated by
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of UsageRecordInstance
:rtype: twilio.rest.wireless.v1.usage_record.UsageRecordPage
"""
params = values.of({
'End': serialize.iso8601_datetime(end),
'Start': serialize.iso8601_datetime(start),
'Granularity': granularity,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return UsageRecordPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of UsageRecordInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of UsageRecordInstance
:rtype: twilio.rest.wireless.v1.usage_record.UsageRecordPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return UsageRecordPage(self._version, response, self._solution)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Wireless.V1.UsageRecordList>'
class UsageRecordPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the UsageRecordPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.wireless.v1.usage_record.UsageRecordPage
:rtype: twilio.rest.wireless.v1.usage_record.UsageRecordPage
"""
super(UsageRecordPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of UsageRecordInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.wireless.v1.usage_record.UsageRecordInstance
:rtype: twilio.rest.wireless.v1.usage_record.UsageRecordInstance
"""
return UsageRecordInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Wireless.V1.UsageRecordPage>'
class UsageRecordInstance(InstanceResource):
""" """
class Granularity(object):
HOURLY = "hourly"
DAILY = "daily"
ALL = "all"
def __init__(self, version, payload):
"""
Initialize the UsageRecordInstance
:returns: twilio.rest.wireless.v1.usage_record.UsageRecordInstance
:rtype: twilio.rest.wireless.v1.usage_record.UsageRecordInstance
"""
super(UsageRecordInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'period': payload['period'],
'commands': payload['commands'],
'data': payload['data'],
}
# Context
self._context = None
self._solution = {}
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def period(self):
"""
:returns: The time period for which usage is reported
:rtype: dict
"""
return self._properties['period']
@property
def commands(self):
"""
:returns: An object that describes the aggregated Commands usage for all SIMs during the specified period
:rtype: dict
"""
return self._properties['commands']
@property
def data(self):
"""
:returns: An object that describes the aggregated Data usage for all SIMs over the period
:rtype: dict
"""
return self._properties['data']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Wireless.V1.UsageRecordInstance>'
| [
"twilio-ci@twilio.com"
] | twilio-ci@twilio.com |
93991c44c4934ca2e346f1c389f11852fa134472 | 2edc41743511a39951c9b125e0644e2b7b187583 | /scripts/classification/imagenet/train_imagenet.py | f8646259a331651e9ec353acb22076d180a8e8cb | [
"Apache-2.0"
] | permissive | Angzz/fpn-gluon-cv | 04477968c0d4ff7eec3cba39d223ef9cd7b3fe71 | d0e730799757910b450d17f19db07e3ffe424d9f | refs/heads/master | 2022-12-01T23:44:12.971267 | 2019-08-07T14:16:43 | 2019-08-07T14:16:43 | 166,018,194 | 10 | 2 | null | null | null | null | UTF-8 | Python | false | false | 17,634 | py | import argparse, time, logging, os, math
import numpy as np
import mxnet as mx
from mxnet import gluon, nd
from mxnet import autograd as ag
from mxnet.gluon import nn
from mxnet.gluon.data.vision import transforms
from gluoncv.data import imagenet
from gluoncv.model_zoo import get_model
from gluoncv.utils import makedirs, LRScheduler
# CLI
parser = argparse.ArgumentParser(description='Train a model for image classification.')
parser.add_argument('--data-dir', type=str, default='~/.mxnet/datasets/imagenet',
help='training and validation pictures to use.')
parser.add_argument('--rec-train', type=str, default='~/.mxnet/datasets/imagenet/rec/train.rec',
help='the training data')
parser.add_argument('--rec-train-idx', type=str, default='~/.mxnet/datasets/imagenet/rec/train.idx',
help='the index of training data')
parser.add_argument('--rec-val', type=str, default='~/.mxnet/datasets/imagenet/rec/val.rec',
help='the validation data')
parser.add_argument('--rec-val-idx', type=str, default='~/.mxnet/datasets/imagenet/rec/val.idx',
help='the index of validation data')
parser.add_argument('--use-rec', action='store_true',
help='use image record iter for data input. default is false.')
parser.add_argument('--batch-size', type=int, default=32,
help='training batch size per device (CPU/GPU).')
parser.add_argument('--dtype', type=str, default='float32',
help='data type for training. default is float32')
parser.add_argument('--num-gpus', type=int, default=0,
help='number of gpus to use.')
parser.add_argument('-j', '--num-data-workers', dest='num_workers', default=4, type=int,
help='number of preprocessing workers')
parser.add_argument('--num-epochs', type=int, default=3,
help='number of training epochs.')
parser.add_argument('--lr', type=float, default=0.1,
help='learning rate. default is 0.1.')
parser.add_argument('--momentum', type=float, default=0.9,
help='momentum value for optimizer, default is 0.9.')
parser.add_argument('--wd', type=float, default=0.0001,
help='weight decay rate. default is 0.0001.')
parser.add_argument('--lr-mode', type=str, default='step',
help='learning rate scheduler mode. options are step, poly and cosine.')
parser.add_argument('--lr-decay', type=float, default=0.1,
help='decay rate of learning rate. default is 0.1.')
parser.add_argument('--lr-decay-period', type=int, default=0,
help='interval for periodic learning rate decays. default is 0 to disable.')
parser.add_argument('--lr-decay-epoch', type=str, default='40,60',
help='epochs at which learning rate decays. default is 40,60.')
parser.add_argument('--warmup-lr', type=float, default=0.0,
help='starting warmup learning rate. default is 0.0.')
parser.add_argument('--warmup-epochs', type=int, default=0,
help='number of warmup epochs.')
parser.add_argument('--last-gamma', action='store_true',
help='whether to init gamma of the last BN layer in each bottleneck to 0.')
parser.add_argument('--mode', type=str,
help='mode in which to train the model. options are symbolic, imperative, hybrid')
parser.add_argument('--model', type=str, required=True,
help='type of model to use. see vision_model for options.')
parser.add_argument('--input-size', type=int, default=224,
help='size of the input image size. default is 224')
parser.add_argument('--crop-ratio', type=float, default=0.875,
help='Crop ratio during validation. default is 0.875')
parser.add_argument('--use-pretrained', action='store_true',
help='enable using pretrained model from gluon.')
parser.add_argument('--use_se', action='store_true',
help='use SE layers or not in resnext. default is false.')
parser.add_argument('--mixup', action='store_true',
help='whether train the model with mix-up. default is false.')
parser.add_argument('--mixup-alpha', type=float, default=0.2,
help='beta distribution parameter for mixup sampling, default is 0.2.')
parser.add_argument('--mixup-off-epoch', type=int, default=0,
help='how many last epochs to train without mixup, default is 0.')
parser.add_argument('--label-smoothing', action='store_true',
help='use label smoothing or not in training. default is false.')
parser.add_argument('--no-wd', action='store_true',
help='whether to remove weight decay on bias, and beta/gamma for batchnorm layers.')
parser.add_argument('--batch-norm', action='store_true',
help='enable batch normalization or not in vgg. default is false.')
parser.add_argument('--save-frequency', type=int, default=10,
help='frequency of model saving.')
parser.add_argument('--save-dir', type=str, default='params',
help='directory of saved models')
parser.add_argument('--resume-epoch', type=int, default=0,
help='epoch to resume training from.')
parser.add_argument('--resume-params', type=str, default='',
help='path of parameters to load from.')
parser.add_argument('--resume-states', type=str, default='',
help='path of trainer state to load from.')
parser.add_argument('--log-interval', type=int, default=50,
help='Number of batches to wait before logging.')
parser.add_argument('--logging-file', type=str, default='train_imagenet.log',
help='name of training log file')
opt = parser.parse_args()
filehandler = logging.FileHandler(opt.logging_file)
streamhandler = logging.StreamHandler()
logger = logging.getLogger('')
logger.setLevel(logging.INFO)
logger.addHandler(filehandler)
logger.addHandler(streamhandler)
logger.info(opt)
batch_size = opt.batch_size
classes = 1000
num_training_samples = 1281167
num_gpus = opt.num_gpus
batch_size *= max(1, num_gpus)
context = [mx.gpu(i) for i in range(num_gpus)] if num_gpus > 0 else [mx.cpu()]
num_workers = opt.num_workers
lr_decay = opt.lr_decay
lr_decay_period = opt.lr_decay_period
if opt.lr_decay_period > 0:
lr_decay_epoch = list(range(lr_decay_period, opt.num_epochs, lr_decay_period))
else:
lr_decay_epoch = [int(i) for i in opt.lr_decay_epoch.split(',')]
num_batches = num_training_samples // batch_size
lr_scheduler = LRScheduler(mode=opt.lr_mode, baselr=opt.lr,
niters=num_batches, nepochs=opt.num_epochs,
step=lr_decay_epoch, step_factor=opt.lr_decay, power=2,
warmup_epochs=opt.warmup_epochs)
model_name = opt.model
kwargs = {'ctx': context, 'pretrained': opt.use_pretrained, 'classes': classes}
if model_name.startswith('vgg'):
kwargs['batch_norm'] = opt.batch_norm
elif model_name.startswith('resnext'):
kwargs['use_se'] = opt.use_se
if opt.last_gamma:
kwargs['last_gamma'] = True
optimizer = 'nag'
optimizer_params = {'wd': opt.wd, 'momentum': opt.momentum, 'lr_scheduler': lr_scheduler}
if opt.dtype != 'float32':
optimizer_params['multi_precision'] = True
net = get_model(model_name, **kwargs)
net.cast(opt.dtype)
if opt.resume_params is not '':
net.load_parameters(opt.resume_params, ctx = context)
# Two functions for reading data from record file or raw images
def get_data_rec(rec_train, rec_train_idx, rec_val, rec_val_idx, batch_size, num_workers):
rec_train = os.path.expanduser(rec_train)
rec_train_idx = os.path.expanduser(rec_train_idx)
rec_val = os.path.expanduser(rec_val)
rec_val_idx = os.path.expanduser(rec_val_idx)
jitter_param = 0.4
lighting_param = 0.1
input_size = opt.input_size
crop_ratio = opt.crop_ratio if opt.crop_ratio > 0 else 0.875
resize = int(math.ceil(input_size / crop_ratio))
mean_rgb = [123.68, 116.779, 103.939]
std_rgb = [58.393, 57.12, 57.375]
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
return data, label
train_data = mx.io.ImageRecordIter(
path_imgrec = rec_train,
path_imgidx = rec_train_idx,
preprocess_threads = num_workers,
shuffle = True,
batch_size = batch_size,
data_shape = (3, input_size, input_size),
mean_r = mean_rgb[0],
mean_g = mean_rgb[1],
mean_b = mean_rgb[2],
std_r = std_rgb[0],
std_g = std_rgb[1],
std_b = std_rgb[2],
rand_mirror = True,
random_resized_crop = True,
max_aspect_ratio = 4. / 3.,
min_aspect_ratio = 3. / 4.,
max_random_area = 1,
min_random_area = 0.08,
brightness = jitter_param,
saturation = jitter_param,
contrast = jitter_param,
pca_noise = lighting_param,
)
val_data = mx.io.ImageRecordIter(
path_imgrec = rec_val,
path_imgidx = rec_val_idx,
preprocess_threads = num_workers,
shuffle = False,
batch_size = batch_size,
resize = resize,
data_shape = (3, input_size, input_size),
mean_r = mean_rgb[0],
mean_g = mean_rgb[1],
mean_b = mean_rgb[2],
std_r = std_rgb[0],
std_g = std_rgb[1],
std_b = std_rgb[2],
)
return train_data, val_data, batch_fn
def get_data_loader(data_dir, batch_size, num_workers):
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
jitter_param = 0.4
lighting_param = 0.1
input_size = opt.input_size
crop_ratio = opt.crop_ratio if opt.crop_ratio > 0 else 0.875
resize = int(math.ceil(input_size / crop_ratio))
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
return data, label
transform_train = transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(brightness=jitter_param, contrast=jitter_param,
saturation=jitter_param),
transforms.RandomLighting(lighting_param),
transforms.ToTensor(),
normalize
])
transform_test = transforms.Compose([
transforms.Resize(resize, keep_ratio=True),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
normalize
])
train_data = gluon.data.DataLoader(
imagenet.classification.ImageNet(data_dir, train=True).transform_first(transform_train),
batch_size=batch_size, shuffle=True, last_batch='discard', num_workers=num_workers)
val_data = gluon.data.DataLoader(
imagenet.classification.ImageNet(data_dir, train=False).transform_first(transform_test),
batch_size=batch_size, shuffle=False, num_workers=num_workers)
return train_data, val_data, batch_fn
if opt.use_rec:
train_data, val_data, batch_fn = get_data_rec(opt.rec_train, opt.rec_train_idx,
opt.rec_val, opt.rec_val_idx,
batch_size, num_workers)
else:
train_data, val_data, batch_fn = get_data_loader(opt.data_dir, batch_size, num_workers)
if opt.mixup:
train_metric = mx.metric.RMSE()
else:
train_metric = mx.metric.Accuracy()
acc_top1 = mx.metric.Accuracy()
acc_top5 = mx.metric.TopKAccuracy(5)
save_frequency = opt.save_frequency
if opt.save_dir and save_frequency:
save_dir = opt.save_dir
makedirs(save_dir)
else:
save_dir = ''
save_frequency = 0
def mixup_transform(label, classes, lam=1, eta=0.0):
if isinstance(label, nd.NDArray):
label = [label]
res = []
for l in label:
y1 = l.one_hot(classes, on_value = 1 - eta + eta/classes, off_value = eta/classes)
y2 = l[::-1].one_hot(classes, on_value = 1 - eta + eta/classes, off_value = eta/classes)
res.append(lam*y1 + (1-lam)*y2)
return res
def smooth(label, classes, eta=0.1):
if isinstance(label, nd.NDArray):
label = [label]
smoothed = []
for l in label:
res = l.one_hot(classes, on_value = 1 - eta + eta/classes, off_value = eta/classes)
smoothed.append(res)
return smoothed
def test(ctx, val_data):
if opt.use_rec:
val_data.reset()
acc_top1.reset()
acc_top5.reset()
for i, batch in enumerate(val_data):
data, label = batch_fn(batch, ctx)
outputs = [net(X.astype(opt.dtype, copy=False)) for X in data]
acc_top1.update(label, outputs)
acc_top5.update(label, outputs)
_, top1 = acc_top1.get()
_, top5 = acc_top5.get()
return (1-top1, 1-top5)
def train(ctx):
if isinstance(ctx, mx.Context):
ctx = [ctx]
if opt.resume_params is '':
net.initialize(mx.init.MSRAPrelu(), ctx=ctx)
if opt.no_wd:
for k, v in net.collect_params('.*beta|.*gamma|.*bias').items():
v.wd_mult = 0.0
trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params)
if opt.resume_states is not '':
trainer.load_states(opt.resume_states)
if opt.label_smoothing or opt.mixup:
L = gluon.loss.SoftmaxCrossEntropyLoss(sparse_label=False)
else:
L = gluon.loss.SoftmaxCrossEntropyLoss()
best_val_score = 1
for epoch in range(opt.resume_epoch, opt.num_epochs):
tic = time.time()
if opt.use_rec:
train_data.reset()
train_metric.reset()
btic = time.time()
for i, batch in enumerate(train_data):
data, label = batch_fn(batch, ctx)
if opt.mixup:
lam = np.random.beta(opt.mixup_alpha, opt.mixup_alpha)
if epoch >= opt.num_epochs - opt.mixup_off_epoch:
lam = 1
data = [lam*X + (1-lam)*X[::-1] for X in data]
if opt.label_smoothing:
eta = 0.1
else:
eta = 0.0
label = mixup_transform(label, classes, lam, eta)
elif opt.label_smoothing:
hard_label = label
label = smooth(label, classes)
with ag.record():
outputs = [net(X.astype(opt.dtype, copy=False)) for X in data]
loss = [L(yhat, y.astype(opt.dtype, copy=False)) for yhat, y in zip(outputs, label)]
for l in loss:
l.backward()
lr_scheduler.update(i, epoch)
trainer.step(batch_size)
if opt.mixup:
output_softmax = [nd.SoftmaxActivation(out.astype('float32', copy=False)) \
for out in outputs]
train_metric.update(label, output_softmax)
else:
if opt.label_smoothing:
train_metric.update(hard_label, outputs)
else:
train_metric.update(label, outputs)
if opt.log_interval and not (i+1)%opt.log_interval:
train_metric_name, train_metric_score = train_metric.get()
logger.info('Epoch[%d] Batch [%d]\tSpeed: %f samples/sec\t%s=%f\tlr=%f'%(
epoch, i, batch_size*opt.log_interval/(time.time()-btic),
train_metric_name, train_metric_score, trainer.learning_rate))
btic = time.time()
train_metric_name, train_metric_score = train_metric.get()
throughput = int(batch_size * i /(time.time() - tic))
err_top1_val, err_top5_val = test(ctx, val_data)
logger.info('[Epoch %d] training: %s=%f'%(epoch, train_metric_name, train_metric_score))
logger.info('[Epoch %d] speed: %d samples/sec\ttime cost: %f'%(epoch, throughput, time.time()-tic))
logger.info('[Epoch %d] validation: err-top1=%f err-top5=%f'%(epoch, err_top1_val, err_top5_val))
if err_top1_val < best_val_score:
best_val_score = err_top1_val
net.save_parameters('%s/%.4f-imagenet-%s-%d-best.params'%(save_dir, best_val_score, model_name, epoch))
trainer.save_states('%s/%.4f-imagenet-%s-%d-best.states'%(save_dir, best_val_score, model_name, epoch))
if save_frequency and save_dir and (epoch + 1) % save_frequency == 0:
net.save_parameters('%s/imagenet-%s-%d.params'%(save_dir, model_name, epoch))
trainer.save_states('%s/imagenet-%s-%d.states'%(save_dir, model_name, epoch))
if save_frequency and save_dir:
net.save_parameters('%s/imagenet-%s-%d.params'%(save_dir, model_name, opt.num_epochs-1))
trainer.save_states('%s/imagenet-%s-%d.states'%(save_dir, model_name, opt.num_epochs-1))
def main():
if opt.mode == 'hybrid':
net.hybridize(static_alloc=True, static_shape=True)
train(context)
if __name__ == '__main__':
main()
| [
"liang@megvii.com"
] | liang@megvii.com |
f095c17c392697ec5fb7da951dd4309508663a2f | c3d0a0b6336a3ff73724fe1615eb1809dbdaaed8 | /Hacker Rank/Day3_04_02_20.py | c7cd53f8193dae0cfdc503af27bf0d8b26745ef5 | [] | no_license | Silentsoul04/FTSP_2020 | db0dae6cd9c371f3daa9219f86520dfa66348236 | 7e603af918da2bcfe4949a4cf5a33107c837894f | refs/heads/master | 2022-12-21T20:44:32.031640 | 2020-09-20T12:29:58 | 2020-09-20T12:29:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,184 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 4 22:00:31 2020
@author: Rajesh
"""
def swap_case(s):
return s.swapcase()
if __name__ == '__main__':
s = input()
result = swap_case(s)
print(result)
#########################
a = "this is a string"
b = a.split(" ") # a is converted to a list of strings.
print(b)
c= "-".join(b)
print(c)
#####################
def b(a):
c= a.split()
d = "-".join(c)
return d
if __name__ == '__main__':
line = input()
result = b(line)
print(result)
######################
def print_full_name(a, b):
print("Hello" , a , b+"! You just delved into python.")
if __name__ == '__main__':
first_name = input()
last_name = input()
print_full_name(first_name, last_name)
##############################
def mutate_string(string, position, character):
l = list(string)
l[position] = character
string = ''.join(l)
return string
if __name__ == '__main__':
s = input()
i, c = input().split()
s_new = mutate_string(s, int(i), c)
print(s_new)
| [
"sharma90126@gmail.com"
] | sharma90126@gmail.com |
37d773e766ec4128fa05bb23fd58b27dc1a4e3a1 | 8df6cbbc3a0a10147a17c7b699110a123c493ea3 | /Algorithm Analysis and Design/coin.py | 2736c6356219595609dc09c1868518255ae3fcd6 | [
"BSD-3-Clause"
] | permissive | tolgahanakgun/School-Projects | 7982cd3a3841f1509434c3342b1cafe807e28354 | 3aecfa3887bc69f3fff44bd9509ff355c99ab1f4 | refs/heads/master | 2021-03-19T07:44:58.181555 | 2018-07-08T11:26:39 | 2018-07-08T11:26:39 | 86,336,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | '''
Created on 31 May 2016
@author: TOLGAHAN
'''
def coin(coins,x):
if x == 0:
return 0
if x == 1:
return coins[1]
return max(coins[x] + coin(coins,x-2), coin(coins,x-1))
a = [0,5,1,2,10,6,2]
print coin(a,6) | [
"tlghnakgn@gmail.com"
] | tlghnakgn@gmail.com |
d1b61c78e3318835f3a4dd7f322f293ed0539d4b | 25cd9515e4566c90c53cf809b1a4933c15c95a1d | /utils.py | 0652f17cf0d8aa064f22976ccc720c13e9e41405 | [] | no_license | raynardj/npsg | fdc2ea3b41d9786e7bf300a10198f4aa7061ce60 | aaaf02b07ee9443c45f702e4fec97c2eec2e5ee8 | refs/heads/master | 2021-09-04T16:20:42.862853 | 2018-01-20T08:26:44 | 2018-01-20T08:26:44 | 115,770,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | from keras.layers import Conv2D, Dense, MaxPool2D, Flatten, Activation, Input, BatchNormalization, Dropout
from keras.layers import Reshape, Lambda, UpSampling2D, Multiply, Add
from keras.layers.advanced_activations import LeakyReLU
from keras.models import Model, Sequential
from keras.optimizers import Adam,SGD
import numpy as np
import pandas as pd
import keras.backend as K
import tensorflow as tf
try:
from tqdm import tqdm,trange
except:
pass
| [
"zhangxiaochen@zenmen.com"
] | zhangxiaochen@zenmen.com |
9612b0b0b057de21705640a3164b0b31063f3364 | 09dc545b0a4645e694463c7e5826931ceb473430 | /giteapy/models/transfer_repo_option.py | a10a6ab56580cfd5167c932c48667c50e28442e2 | [] | no_license | mheden/giteapy | 912ec8a207c922004e731b81fe86a13b5393dd72 | 9b15486fb41bfed2c9f779741fa5dcae836d9ddc | refs/heads/master | 2023-08-26T18:17:06.131693 | 2021-10-19T09:04:01 | 2021-10-19T09:04:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,501 | py | # coding: utf-8
"""
Gitea API.
This documentation describes the Gitea API. # noqa: E501
OpenAPI spec version: 1.15.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from giteapy.configuration import Configuration
class TransferRepoOption(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'new_owner': 'str',
'team_ids': 'list[int]'
}
attribute_map = {
'new_owner': 'new_owner',
'team_ids': 'team_ids'
}
def __init__(self, new_owner=None, team_ids=None, _configuration=None): # noqa: E501
"""TransferRepoOption - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._new_owner = None
self._team_ids = None
self.discriminator = None
self.new_owner = new_owner
if team_ids is not None:
self.team_ids = team_ids
@property
def new_owner(self):
"""Gets the new_owner of this TransferRepoOption. # noqa: E501
:return: The new_owner of this TransferRepoOption. # noqa: E501
:rtype: str
"""
return self._new_owner
@new_owner.setter
def new_owner(self, new_owner):
"""Sets the new_owner of this TransferRepoOption.
:param new_owner: The new_owner of this TransferRepoOption. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and new_owner is None:
raise ValueError("Invalid value for `new_owner`, must not be `None`") # noqa: E501
self._new_owner = new_owner
@property
def team_ids(self):
"""Gets the team_ids of this TransferRepoOption. # noqa: E501
ID of the team or teams to add to the repository. Teams can only be added to organization-owned repositories. # noqa: E501
:return: The team_ids of this TransferRepoOption. # noqa: E501
:rtype: list[int]
"""
return self._team_ids
@team_ids.setter
def team_ids(self, team_ids):
"""Sets the team_ids of this TransferRepoOption.
ID of the team or teams to add to the repository. Teams can only be added to organization-owned repositories. # noqa: E501
:param team_ids: The team_ids of this TransferRepoOption. # noqa: E501
:type: list[int]
"""
self._team_ids = team_ids
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TransferRepoOption, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TransferRepoOption):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TransferRepoOption):
return True
return self.to_dict() != other.to_dict()
| [
"mikael@heden.net"
] | mikael@heden.net |
2be5096f18e4dcd89cb7a54714ee19851b1e8f91 | f6667b7e0d5ad29227efcae1bbededd5e7b06e70 | /blog_project/mysite/mysite/settings.py | 2c89687942de2efee35739a162c5e6736b975961 | [] | no_license | Abhishek-b1/Django_Level_one_to_five | 4e74e2555fe7b31fb7ef8c7a0e9b1d2aa119c9d7 | 8b1b9cecc88553c61bfc9357451c67c3c243f68a | refs/heads/master | 2020-03-15T20:23:21.490706 | 2018-05-18T14:19:54 | 2018-05-18T14:19:54 | 132,331,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,312 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.12.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR, 'blog/templates/blog')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u%*fittqz(8q74sal&hn^#fw$+0)4mi1$(z&u$qm%rc41arv&9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
LOGIN_REDIRECT_URL = '/' # After logging into the url will redirect to home page.
| [
"abhishek.banagunde@kisanhub.com"
] | abhishek.banagunde@kisanhub.com |
9d80a655db28a1a02e70343ae7a7c6be7e6d2e61 | 81ad6eceaaa2334393b6c5ddd1948ed3f7fb5850 | /customers/views.py | be31d3213c47869c21cd4c672880dc7ae17ca88d | [] | no_license | hmoskva/kankara | deb7bb1ca3172d5dee262c16d664b98f50d029b0 | 283d462b27451f0e9e5a40e5a6ae134910c66d3b | refs/heads/master | 2021-09-10T22:29:08.422507 | 2018-04-03T12:39:59 | 2018-04-03T12:39:59 | 118,362,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | from django.shortcuts import render
from django.views.generic import ListView
from .models import Customer
class CustomerList(ListView):
template_name = 'customers/home.html'
context_object_name = 'customer_list'
queryset = Customer.objects.filter(active=True)
def get_context_data(self, *args, **kwargs):
context = super(CustomerList, self).get_context_data(*args, **kwargs)
context['no_inactive'] = Customer.objects.filter(active=False).count()
return context
| [
"dejisogbesan@rocketmail.com"
] | dejisogbesan@rocketmail.com |
cfe33aa198b6cd99ffac40afbdfd05b36d63b654 | a0bb8fae61343d0eb5d78ee212e2d8859be28a9d | /src/data_loader.py | 178ee2f9523501264f561a208db135c44433b48b | [] | no_license | nikhil741/lstmAndGruFromScratch | 03466c17cb52422ec9df6097161f2cd5d4d90fbb | a3da113d62f91b43d5eedfb74ebf7a27c1b1ab79 | refs/heads/master | 2021-11-25T23:15:03.430511 | 2018-03-07T21:45:54 | 2018-03-07T21:45:54 | 124,290,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | from zipfile import ZipFile
import numpy as np
'''load your data here'''
class DataLoader(object):
def __init__(self):
DIR = '../data/'
pass
# Returns images and labels corresponding for training and testing. Default mode is train.
# For retrieving test data pass mode as 'test' in function call.
def load_data(self, mode = 'train'):
label_filename = mode + '_labels'
image_filename = mode + '_images'
label_zip = '../data/' + label_filename + '.zip'
image_zip = '../data/' + image_filename + '.zip'
with ZipFile(label_zip, 'r') as lblzip:
labels = np.frombuffer(lblzip.read(label_filename), dtype=np.uint8, offset=8)
with ZipFile(image_zip, 'r') as imgzip:
images = np.frombuffer(imgzip.read(image_filename), dtype=np.uint8, offset=16).reshape(len(labels), 784)
return images, labels
def create_batches(self):
pass
| [
"nikhilagrawal741@gmail.com"
] | nikhilagrawal741@gmail.com |
8abec5eec6897caceff5720c533f4f4702b732a3 | 0e944edbe19033b2bb7356842cf4b66f8bbdb643 | /pretrain/train_mdnet.py | 0cdcdb4a02ac20c7ad39dc66b32fddc30bada208 | [] | no_license | zyxxmu/Fast-Vital | b9067fd894a0f2b8704a577c0db7c345202e0584 | 71f086c76fb72d49d6481c7ff2d56c9a672eda72 | refs/heads/master | 2022-03-25T21:27:42.297454 | 2019-12-24T07:43:10 | 2019-12-24T07:43:10 | 228,348,195 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,126 | py | import os, sys
import pickle
import yaml
import time
import argparse
import numpy as np
import torch
sys.path.insert(0,'.')
from data_prov import RegionDataset
from modules.model import MDNet, set_optimizer, BCELoss, Precision
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
def train_mdnet(opts):
# Init dataset
with open(opts['data_path'], 'rb') as fp:
data = pickle.load(fp)
K = len(data)
dataset = [None] * K
for k, seq in enumerate(data.values()):
#print(seq)
dataset[k] = RegionDataset(seq['images'], seq['gt'], opts)
# Init model
model = MDNet(opts['init_model_path'], K)
if opts['use_gpu']:
model = model.cuda()
model.set_learnable_params(opts['ft_layers'])
# Init criterion and optimizer
criterion = BCELoss()
evaluator = Precision()
optimizer = set_optimizer(model, opts['lr'], opts['lr_mult'])
# Main trainig loop
for i in range(opts['n_cycles']):
print('==== Start Cycle {:d}/{:d} ===='.format(i + 1, opts['n_cycles']))
if i in opts.get('lr_decay', []):
print('decay learning rate')
for param_group in optimizer.param_groups:
param_group['lr'] *= opts.get('gamma', 0.1)
# Training
model.train()
prec = np.zeros(K)
k_list = np.random.permutation(K)
for j, k in enumerate(k_list):
tic = time.time()
# training
pos_regions, neg_regions = dataset[k].next()
if opts['use_gpu']:
pos_regions = pos_regions.cuda()
neg_regions = neg_regions.cuda()
pos_score = model(pos_regions, k)
neg_score = model(neg_regions, k)
loss = criterion(pos_score, neg_score)
batch_accum = opts.get('batch_accum', 1)
if j % batch_accum == 0:
model.zero_grad()
loss.backward()
if j % batch_accum == batch_accum - 1 or j == len(k_list) - 1:
if 'grad_clip' in opts:
torch.nn.utils.clip_grad_norm_(model.parameters(), opts['grad_clip'])
optimizer.step()
prec[k] = evaluator(pos_score, neg_score)
toc = time.time()-tic
print('Cycle {:2d}/{:2d}, Iter {:2d}/{:2d} (Domain {:2d}), Loss {:.3f}, Precision {:.3f}, Time {:.3f}'
.format(i, opts['n_cycles'], j, len(k_list), k, loss.item(), prec[k], toc))
print('Mean Precision: {:.3f}'.format(prec.mean()))
print('Save model to {:s}'.format(opts['model_path']))
if opts['use_gpu']:
model = model.cpu()
states = {'shared_layers': model.layers.state_dict()}
torch.save(states, opts['model_path'])
if opts['use_gpu']:
model = model.cuda()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataset', default='imagenet', help='training dataset {vot, imagenet}')
args = parser.parse_args()
opts = yaml.safe_load(open('pretrain/options_{}.yaml'.format(args.dataset), 'r'))
train_mdnet(opts)
| [
"yxzhangxmu@163.com"
] | yxzhangxmu@163.com |
3707942092f8a2717e1e159fd36fc8769e28c5ee | 5d22d9b2cb5cad7970c1055aeef55d2e2a5acb8e | /py/topcoder/TCCC 2003 Semifinals 2/TicSolver.py | a7805ff035c64217729de5ff4c0bd9d4ebc789e0 | [
"MIT"
] | permissive | shhuan/algorithms | 36d70f1ab23dab881bf1a15573fbca7b2a3f4235 | 2830c7e2ada8dfd3dcdda7c06846116d4f944a27 | refs/heads/master | 2021-05-07T14:21:15.362588 | 2017-11-07T08:20:16 | 2017-11-07T08:20:16 | 109,799,698 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,968 | py | # -*- coding: utf-8 -*-
import math,string,itertools,fractions,heapq,collections,re,array,bisect
class TicSolver:
def whoWins(self, board):
return ""
# CUT begin
# TEST CODE FOR PYTHON {{{
import sys, time, math
def tc_equal(expected, received):
try:
_t = type(expected)
received = _t(received)
if _t == list or _t == tuple:
if len(expected) != len(received): return False
return all(tc_equal(e, r) for (e, r) in zip(expected, received))
elif _t == float:
eps = 1e-9
d = abs(received - expected)
return not math.isnan(received) and not math.isnan(expected) and d <= eps * max(1.0, abs(expected))
else:
return expected == received
except:
return False
def pretty_str(x):
if type(x) == str:
return '"%s"' % x
elif type(x) == tuple:
return '(%s)' % (','.join( (pretty_str(y) for y in x) ) )
else:
return str(x)
def do_test(board, __expected):
startTime = time.time()
instance = TicSolver()
exception = None
try:
__result = instance.whoWins(board);
except:
import traceback
exception = traceback.format_exc()
elapsed = time.time() - startTime # in sec
if exception is not None:
sys.stdout.write("RUNTIME ERROR: \n")
sys.stdout.write(exception + "\n")
return 0
if tc_equal(__expected, __result):
sys.stdout.write("PASSED! " + ("(%.3f seconds)" % elapsed) + "\n")
return 1
else:
sys.stdout.write("FAILED! " + ("(%.3f seconds)" % elapsed) + "\n")
sys.stdout.write(" Expected: " + pretty_str(__expected) + "\n")
sys.stdout.write(" Received: " + pretty_str(__result) + "\n")
return 0
def run_tests():
sys.stdout.write("TicSolver (500 Points)\n\n")
passed = cases = 0
case_set = set()
for arg in sys.argv[1:]:
case_set.add(int(arg))
with open("TicSolver.sample", "r") as f:
while True:
label = f.readline()
if not label.startswith("--"): break
board = []
for i in range(0, int(f.readline())):
board.append(f.readline().rstrip())
board = tuple(board)
f.readline()
__answer = f.readline().rstrip()
cases += 1
if len(case_set) > 0 and (cases - 1) in case_set: continue
sys.stdout.write(" Testcase #%d ... " % (cases - 1))
passed += do_test(board, __answer)
sys.stdout.write("\nPassed : %d / %d cases\n" % (passed, cases))
T = time.time() - 1430750694
PT, TT = (T / 60.0, 75.0)
points = 500 * (0.3 + (0.7 * TT * TT) / (10.0 * PT * PT + TT * TT))
sys.stdout.write("Time : %d minutes %d secs\n" % (int(T/60), T%60))
sys.stdout.write("Score : %.2f points\n" % points)
if __name__ == '__main__':
run_tests()
# }}}
# CUT end
| [
"shuangquanhuang@gmail.com"
] | shuangquanhuang@gmail.com |
297e5849c3166455025dbc3088c25b4a4b337fd6 | 6b50a7e1aff1203f31189c997ee7e76e17236066 | /cifar10_alexnet.py | 28f1104b00c0134815dc4654a9157cb8824b6170 | [] | no_license | ZhangYi19941217/alexnet_vgg_cifar10 | 485f98aedafacd4c9f3d6da3473f02b84eedee44 | 673b216deca9dfa54031475275e3a0a170b59e29 | refs/heads/master | 2020-04-08T06:46:34.793043 | 2018-08-01T07:15:39 | 2018-08-01T07:15:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,925 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 50,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_float('keep_prob', 0.5,
"""Probability of the dropout.""")
tf.app.flags.DEFINE_string('data_dir', 'cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inputs(eval_data):
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inference(images):
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weight',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=None)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('bias', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1, alpha=0.001/9.0,
beta=0.75, name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weight',
shape=[3, 3, 64, 96],
stddev=5e-2,
wd=None)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [96], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1, alpha=0.001/9.0,
beta=0.75, name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool2')
# conv3
with tf.variable_scope('conv3') as scope:
kernel = _variable_with_weight_decay('weight',
shape=[3, 3, 96, 256],
stddev=5e-2,
wd=None)
conv = tf.nn.conv2d(pool2, kernel, strides=[1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [256], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(pre_activation, name='conv3')
_activation_summary(conv3)
# conv4
with tf.variable_scope('conv4') as scope:
kernel = _variable_with_weight_decay('weight',
shape=[3, 3, 256, 256],
stddev=5e-2,
wd=None)
conv = tf.nn.conv2d(conv3, kernel, strides=[1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [256], tf.constant_initializer(0.2))
pre_activation = tf.nn.bias_add(conv, biases)
conv4 = tf.nn.relu(pre_activation, name='conv4')
_activation_summary(conv4)
# conv5
with tf.variable_scope('conv5') as scope:
kernel = _variable_with_weight_decay('weight',
shape=[3, 3, 256, 120],
stddev=5e-2,
wd=None)
conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('bias', [120], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv5 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv5)
# pool5
pool5 = tf.nn.max_pool(conv5, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool5')
# fc6
keep_prob = FLAGS.keep_prob
with tf.variable_scope('fc6') as scope:
kernel = _variable_with_weight_decay('weight',
shape=[4*4*120, 256],
stddev=5e-2,
wd=None)
pool5_flat = tf.reshape(pool5, [FLAGS.batch_size, -1])
biases = _variable_on_cpu('bias', [256], tf.constant_initializer(0.0))
# keep_prob = _variable_on_cpu('keep_prob', [1], tf.constant_initializer(0.0))
fc6 = tf.nn.relu(tf.matmul(pool5_flat, kernel) + biases)
fc6_drop = tf.nn.dropout(fc6, keep_prob)
_activation_summary(fc6_drop)
# fc7
with tf.variable_scope('fc7') as scope:
kernel = _variable_with_weight_decay('weight',
shape=[256, 256],
stddev=5e-2,
wd=None)
biases = _variable_on_cpu('bias', [256], tf.constant_initializer(0.0))
# keep_prob = _variable_on_cpu('keep_prob', [1], tf.constant_initializer(0.0))
fc7 = tf.nn.relu(tf.matmul(fc6_drop, kernel) + biases)
fc7_drop = tf.nn.dropout(fc7, keep_prob)
_activation_summary(fc7_drop)
# soft_max
with tf.variable_scope('soft_max') as scope:
kernel = _variable_with_weight_decay('weight',
shape=[256, NUM_CLASSES],
stddev=5e-2,
wd=None)
biases = _variable_on_cpu('bias', [NUM_CLASSES], tf.constant_initializer(0.0))
fc_out = tf.nn.relu(tf.matmul(fc7_drop, kernel) + biases)
_activation_summary(fc_out)
return fc_out
def loss(logits, labels):
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
for l in losses + [total_loss]:
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
with tf.control_dependencies([apply_gradient_op]):
variables_averages_op = variable_averages.apply(tf.trainable_variables())
return variables_averages_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
| [
"1536419711@qq.com"
] | 1536419711@qq.com |
9b7ddeee42889b99669d49cb0f7ffd393e7285eb | 48f670ce1e7f6b0a868fcd1817c3b878ad2093e2 | /app/views.py | ce72a2f5a9773444a42d0d890a4206099202d64b | [] | no_license | cdutsov/tdcr_analysis-v2 | fa96047b2e6d9b37487f949e2dd7d7a0235f8414 | fad82dc4f7d3920b5abdb34971d6adba78d0e40b | refs/heads/master | 2021-01-19T07:31:29.456038 | 2018-03-08T07:52:05 | 2018-03-08T07:52:05 | 87,550,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,542 | py | import os
import pickle
import re
from datetime import datetime
from subprocess import call, check_output
from flask_login import login_required
from flask_login import login_user
from flask_login import logout_user, current_user
from sqlalchemy import and_, engine
from app import app, db, login_manager
from flask import g
from flask import make_response, jsonify
from flask import render_template, request, redirect, url_for
from .db_management import extract_bundle, add_columns, write_csv, import_files, get_or_create, check_warnings, \
delete_series
from .forms import UploadForm, ExportForm, LoginForm, DeleteForm
from .models import Measurement, Cocktail, User
from collections import OrderedDict
from .config import basedir
ALLOWED_EXTENSIONS = {'tdc'}
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
@login_required
def index():
form = UploadForm()
g.user = current_user
export_form = ExportForm().new(user=current_user.username)
try:
session_committed = request.args['session_committed']
except:
session_committed = False
try:
commit_canceled = request.args['commit_canceled']
except:
commit_canceled = False
if export_form.validate_on_submit():
return redirect(url_for('export'))
return render_template('index.html',
import_form=form,
export_form=export_form,
session_committed=session_committed,
commit_canceled=commit_canceled)
login_manager.login_view = "login"
@login_manager.user_loader
def load_user(userid):
return User.query.filter(User.id == userid).first()
@app.before_request
def before_request():
g.user = current_user
@app.route('/login', methods=['GET', 'POST'])
def login():
if g.user is not None and g.user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first_or_404()
if user.is_correct_password(form.password.data):
login_user(user)
return redirect(url_for('index'))
else:
return redirect(url_for('login'))
return render_template('login.html', form=form)
@app.route('/logout')
def signout():
logout_user()
return redirect(url_for('index'))
@app.route('/commit-session')
def commit():
upload_folder = app.config['UPLOAD_FOLDER']
path = upload_folder + current_user.username + "/.tmp_pickle"
if os.path.isfile(path=path):
with open(path, "rb") as f:
all_meas = pickle.load(f)
os.remove(path)
for meas in all_meas:
uploader = get_or_create(db.session, User, username=meas["uploader"])
cocktail_instance = get_or_create(db.session, Cocktail,
cocktail_name=re.sub('[^A-Za-z0-9 ]+', '', meas["cocktail"]),
cocktail_uploader=uploader)
meas['cocktail'] = cocktail_instance
meas['uploader'] = uploader
db.session.add(Measurement(**meas))
db.session.commit()
g.session_committed = True
return redirect(url_for("index", session_committed=True))
else:
return redirect(url_for("index", session_not_committed=True))
@app.route('/_clear_pickle')
def clear():
return redirect(url_for("index", commit_canceled=True))
@app.route('/uploadajax', methods=['GET', 'POST'])
def session():
files = request.files.getlist("files")
series_name = request.form['series_name_field']
results = import_files(user_folder=current_user.username, files=files, series_name=series_name)
list_of_dicts = []
warnings_list = []
d = OrderedDict()
for result in results:
try:
d = OrderedDict(
[('File name', result["filename"]),
('Run number', result["run_number"]),
('Start time', result["datetime"]),
('Real time', result["preset_time"]),
('Series name', result["series_name"]),
('Radionuclide', result["radionuclide"]),
('LS cocktail', result["cocktail"]),
('Coincidence window N', result["coinc_window_n"]),
('Coincidence window M', result["coinc_window_m"]),
('EXT DT 1', result["ext_dt1"]),
('EXT DT 2', result["ext_dt2"])])
except ValueError:
print('New file format error!')
d.update(extract_bundle(result["cps_bundle"], fields=['N1', 'N2', 'M1', 'M2']))
warnings_list.append(check_warnings(user=current_user, d=d))
list_of_dicts.append(d)
warnings = add_columns(warnings_list)
for key, values in warnings.items():
warnings[key] = set(values)
return jsonify({'template': render_template('upload_table.html', table=list_of_dicts, warnings=warnings)})
@app.route("/export", methods=['GET', 'POST'])
def export():
show_raw_cps = False
series_name = request.form['series_name']
coinc_window_n = request.form['coinc_window_n']
coinc_window_m = request.form['coinc_window_m']
ext_dt1 = request.form['ext_dt1']
ext_dt2 = request.form['ext_dt2']
radionuclide = request.form['radionuclide']
results = db.session.query(Measurement).join(User).filter(
and_(Measurement.series_name == series_name if series_name else True,
Measurement.radionuclide == radionuclide if radionuclide else True,
Measurement.coinc_window_n == coinc_window_n if coinc_window_n and not coinc_window_n == '0' else True,
Measurement.coinc_window_m == coinc_window_m if coinc_window_m and not coinc_window_m == '0' else True,
Measurement.ext_dt1 == ext_dt1 if ext_dt1 and not ext_dt1 == '0' else True,
Measurement.ext_dt2 == ext_dt2 if ext_dt2 and not ext_dt2 == '0' else True
)).filter(User.username == g.user.username).all()
l = []
for result in results:
d = OrderedDict(
[('File name', result.filename),
('Run number', result.run_number),
('Start time', result.datetime),
('Real time', result.preset_time),
('Series name', result.series_name),
('Radionuclide', result.radionuclide),
('LS cocktail', result.cocktail.cocktail_name),
('Coincidence window N' if not coinc_window_n == '0' else False, result.coinc_window_n),
('Coincidence window M' if not coinc_window_m == '0' else False, result.coinc_window_m),
('EXT DT 1' if not ext_dt1 == '0' else False, result.ext_dt1),
('EXT DT 2' if not ext_dt2 == '0' else False, result.ext_dt2)])
fields = ['Raw' if show_raw_cps else '',
('N1' if not coinc_window_n == '0' else '') if not ext_dt1 == '0' else '',
('N2' if not coinc_window_n == '0' else '') if not ext_dt2 == '0' else '',
('M1' if not coinc_window_m == '0' else '') if not ext_dt1 == '0' else '',
('M2' if not coinc_window_m == '0' else '') if not ext_dt2 == '0' else ''
]
d.update(extract_bundle(result.cps_bundle, fields=fields))
l.append(d)
rows = add_columns(l)
filename = app.config['UPLOAD_FOLDER'] + current_user.username + "/Exports/Export-" \
+ datetime.now().strftime("%a_%d_%b_%Y_%H:%M:%S") + ".csv "
write_csv(filename=filename, d=rows)
response = make_response(open(filename, 'r').read())
cd = 'attachment; filename=' + os.path.basename(filename)
response.headers['Content-Disposition'] = cd
response.mimetype = 'text/csv'
return response
@app.route("/_series_exists")
def series_exists():
series_name = request.args.get('series_name', '', type=str)
results = db.session.query(Measurement).join(User).filter(Measurement.series_name == series_name) \
.filter(User.username == current_user.username).all()
return jsonify(response=len(results))
@app.route("/_export_form_data")
def form_data():
coinc_window_n_vals, coinc_window_m_vals, radionuclide_vals, series_name_vals, ext_dt1_vals, ext_dt2_vals = \
'undefined', 'undefined', 'undefined', 'undefined', 'undefined', 'undefined'
series_name = request.args.get('series_name', '', type=str)
radionuclide = request.args.get('radionuclide', '', type=str)
coinc_window_n = request.args.get('coinc_window_n', '', type=int)
coinc_window_m = request.args.get('coinc_window_m', '', type=int)
ext_dt1 = request.args.get('ext_dt1', '', type=float)
ext_dt2 = request.args.get('ext_dt2', '', type=float)
results = db.session.query(Measurement).join(User).filter(
and_((Measurement.series_name == series_name if series_name else True),
Measurement.radionuclide == radionuclide if radionuclide else True,
Measurement.coinc_window_n == coinc_window_n if coinc_window_n else True,
Measurement.coinc_window_m == coinc_window_m if coinc_window_m else True,
Measurement.ext_dt1 == ext_dt1 if ext_dt1 else True,
Measurement.ext_dt2 == ext_dt2 if ext_dt2 else True,
)).filter(User.username == g.user.username).all()
number_results = len(results)
if not coinc_window_n:
coinc_window_n_vals = sorted(set([result.coinc_window_n for result in results]))
if not coinc_window_m:
coinc_window_m_vals = sorted(set([result.coinc_window_m for result in results]))
if not ext_dt1:
ext_dt1_vals = sorted(set([result.ext_dt1 for result in results]))
if not ext_dt2:
ext_dt2_vals = sorted(set([result.ext_dt2 for result in results]))
if not series_name:
series_name_vals = sorted(set([result.series_name for result in results]), reverse=True)
if not radionuclide:
radionuclide_vals = sorted(set([result.radionuclide for result in results]))
return jsonify(coinc_window_n_vals=coinc_window_n_vals,
coinc_window_m_vals=coinc_window_m_vals,
series_name_vals=series_name_vals,
radionuclide_vals=radionuclide_vals,
ext_dt1_vals=ext_dt1_vals,
ext_dt2_vals=ext_dt2_vals,
number_results=number_results)
@app.route('/about')
def about():
date_modified = datetime.fromtimestamp(os.path.getmtime(basedir + "/db_management.py")).strftime('%d %B %Y')
uptime = check_output(['uptime', '-p']).decode('utf-8')
try:
for line in check_output(['service', 'apache2', 'status']).decode('utf-8').split('\n'):
if 'Active' in line:
apache_uptime = line.split(';')[1]
except FileNotFoundError:
apache_uptime = 'Service not started'
return render_template('about.html', modified=date_modified, uptime=uptime, apache_uptime=apache_uptime)
@app.route('/change_db', methods=['GET', 'POST'])
def change():
delete_form = DeleteForm.new(user=current_user.username)
return render_template('change.html', delete_form=delete_form)
@app.route('/_delete_series')
def delete():
series_name = request.args.get('series_name', '', type=str)
delete_series(user=current_user, series_name=series_name)
return jsonify('kur')
| [
"chavcho93@gmail.com"
] | chavcho93@gmail.com |
cb2de01dabc6e614b7ec50462818de83e904f403 | ea6c2a0029b261fb1438ffc4089cd92b2ad63619 | /censor.py | 1441a55d59695f91165029d7a60aa247775d0917 | [] | no_license | mmcelroy75/censordispenser | f99fae7b79d7363f8e7c72fd48988d0370fcda18 | ab89b5b24ccf334d6f4d6f03b8f8bf3a924ffdb5 | refs/heads/master | 2022-04-20T06:39:41.894166 | 2020-04-17T04:30:14 | 2020-04-17T04:30:14 | 255,747,074 | 0 | 0 | null | 2020-04-17T04:30:16 | 2020-04-14T22:59:03 | Python | UTF-8 | Python | false | false | 1,599 | py | # These are the emails you will be censoring. The open() function is opening the text file that the emails are contained in and the .read() method is allowing us to save their contexts to the following variables:
email_one = open("email_one.txt", "r").read()
email_two = open("email_two.txt", "r").read()
email_three = open("email_three.txt", "r").read()
email_four = open("email_four.txt", "r").read()
#print(email_one)
#def censor_text(text, phrase):
# new_text = text.replace(phrase, "******")
# return new_text
#print(censor_text(email_one, "learning algorithms"))
#print(email_two)
proprietary_terms = ["she", "personality matrix", "sense of self", "self-preservation", "learning algorithm", "her", "herself"]
proprietary_terms += [term.capitalize() for term in proprietary_terms]
negative_words = ["concerned", "behind", "danger", "dangerous", "alarming", "alarmed", "out of control", "help", "unhappy", "bad", "upset", "awful", "broken", "damage", "damaging", "dismal", "distressed", "distressed", "concerning", "horrible", "horribly", "questionable"]
negative_words += [word.capitalize() for word in negative_words]
def censor_two(input_text, censored_list):
for word in censored_list:
censored_word = ""
for x in range(0,len(word)):
if word[x] == " ":
censored_word = censored_word + " "
else:
censored_word = censored_word + "X"
input_text = input_text.replace(word, censored_word)
return input_text
#print(censor_two(email_two, proprietary_terms))
def censor_three(input_text1, censored_list1, censored_list2)
#print(email_three) | [
"Mark@Excerceo-Novus.local"
] | Mark@Excerceo-Novus.local |
b60e15383bddc7ea3a0cfc31e4fa61c1a89fc56b | 3761ee7c8bf2217804a8127cb93087c506f10743 | /Ex7.2-Adaboost识别手写体/svm-test.py | 1677d1020afe958de558728541f668fefbba26e9 | [] | no_license | zfg88287508/CV-code | 599918ba4340363d1d8462cbf031a8f903bb7e48 | d61b01ece58588e838cdcb5730700c6949b7ab01 | refs/heads/master | 2023-05-13T15:53:43.014302 | 2019-11-06T17:02:41 | 2019-11-06T17:02:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,093 | py | """
svm 测试例子
"""
import numpy as np
import pandas as pd
from sklearn import svm
import matplotlib.colors
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn.model_selection import GridSearchCV
from time import time
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
trainData = pd.read_csv("../dataSet/mnist_train.csv").values
train_data = trainData[0:10000, 1:]
train_label = trainData[0:10000, 0]
testData = pd.read_csv("../dataSet/mnist_test.csv").values
test_data = testData[0:1000, 1:]
test_label = testData[0:1000, 0]
print ('SVC test accuracy:0.85122442689')
"""
model = svm.SVC(C=10.0, kernel='rbf', gamma=0.1)#设置模型参数
tt1 = time()
model.fit(train_data, train_label)#训练模型
tt2 = time()
delta_tt = tt2 - tt1
print ('SVMxun time:%dmin%.3fsec' % ((int)(delta_tt / 60), delta_tt - 60*((int)(delta_tt/60))))
y_hat = model.predict(test_data)#做预测
print ('SVC test accuracy:', accuracy_score(test_label, y_hat))
""" | [
"576261090@qq.com"
] | 576261090@qq.com |
23d602305de31d96583e88e385bc79e6420cdb96 | 52950b2783a7aebf23689c9c5397cf381d0dde7d | /oss/deepm2/pathways.py | a2cd8fc4f713105042427c37207672397c0dd7e2 | [] | no_license | zhouyong64/academiccoder | 9b4a8f9555b99dc364a0c0e4157faa582b542e90 | 5415a43889a18795fb98960ff7700dbcdd5138df | refs/heads/master | 2020-05-17T11:46:15.143345 | 2017-12-05T06:57:14 | 2017-12-05T06:57:14 | 29,723,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,959 | py | # Copyright (c) 2016, Konstantinos Kamnitsas
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the BSD license. See the accompanying LICENSE file
# or read the terms at https://opensource.org/licenses/BSD-3-Clause.
import numpy
import numpy as np
import copy
from math import ceil
import theano.tensor as T
from deepmedic.pathwayTypes import PathwayTypes
from deepmedic.cnnLayerTypes import ConvLayer, LowRankConvLayer
from deepmedic.cnnHelpers import calcReceptiveFieldDims
#################################################################
# Pathway Types #
#################################################################
def cropRczOf5DimArrayToMatchOther(array5DimToCrop, dimensionsOf5DimArrayToMatchInRcz):
# dimensionsOf5DimArrayToMatchInRcz : [ batch size, num of fms, r, c, z]
output = array5DimToCrop[:,
:,
:dimensionsOf5DimArrayToMatchInRcz[2],
:dimensionsOf5DimArrayToMatchInRcz[3],
:dimensionsOf5DimArrayToMatchInRcz[4]]
return output
def repeatRcz5DimArrayByFactor(array5Dim, factor3Dim):
# array5Dim: [batch size, num of FMs, r, c, z]. Ala input/output of conv layers.
# Repeat FM in the three last dimensions, to upsample back to the normal resolution space.
expandedR = array5Dim.repeat(factor3Dim[0], axis=2)
expandedRC = expandedR.repeat(factor3Dim[1], axis=3)
expandedRCZ = expandedRC.repeat(factor3Dim[2], axis=4)
return expandedRCZ
def upsampleRcz5DimArrayAndOptionalCrop(array5dimToUpsample,
upsamplingFactor,
upsamplingScheme="repeat",
dimensionsOf5DimArrayToMatchInRcz=None) :
# array5dimToUpsample : [batch_size, numberOfFms, r, c, z].
if upsamplingScheme == "repeat" :
upsampledOutput = repeatRcz5DimArrayByFactor(array5dimToUpsample, upsamplingFactor)
else :
print "NOT IMPLEMENTED! EXITING!"; exit(1)
if dimensionsOf5DimArrayToMatchInRcz <> None :
# If the central-voxels are eg 10, the susampled-part will have 4 central voxels. \
#Which above will be repeated to 3*4 = 12.
# I need to clip the last ones, to have the same dimension as the input from 1st pathway, \
#which will have dimensions equal to the centrally predicted voxels (10)
output = cropRczOf5DimArrayToMatchOther(upsampledOutput, dimensionsOf5DimArrayToMatchInRcz)
else :
output = upsampledOutput
return output
def getMiddlePartOfFms(fms, listOfNumberOfCentralVoxelsToGetPerDimension) :
# fms: a 5D tensor, [batch, fms, r, c, z]
fmsShape = T.shape(fms) #fms.shape works too, but this is clearer theano grammar.
# if part is of even width, one voxel to the left is the centre.
rCentreOfPartIndex = (fmsShape[2] - 1) / 2
rIndexToStartGettingCentralVoxels = rCentreOfPartIndex - \
(listOfNumberOfCentralVoxelsToGetPerDimension[0] - 1) / 2
rIndexToStopGettingCentralVoxels = rIndexToStartGettingCentralVoxels + \
listOfNumberOfCentralVoxelsToGetPerDimension[0] # Excluding
cCentreOfPartIndex = (fmsShape[3] - 1) / 2
cIndexToStartGettingCentralVoxels = cCentreOfPartIndex - (listOfNumberOfCentralVoxelsToGetPerDimension[1] - 1) / 2
cIndexToStopGettingCentralVoxels = cIndexToStartGettingCentralVoxels + \
listOfNumberOfCentralVoxelsToGetPerDimension[1] # Excluding
if len(listOfNumberOfCentralVoxelsToGetPerDimension) == 2: # the input FMs are of 2 dimensions (for future use)
return fms[ :, :,
rIndexToStartGettingCentralVoxels : rIndexToStopGettingCentralVoxels,
cIndexToStartGettingCentralVoxels : cIndexToStopGettingCentralVoxels]
elif len(listOfNumberOfCentralVoxelsToGetPerDimension) == 3 : # the input FMs are of 3 dimensions
zCentreOfPartIndex = (fmsShape[4] - 1) / 2
zIndexToStartGettingCentralVoxels = zCentreOfPartIndex - (listOfNumberOfCentralVoxelsToGetPerDimension[2] - 1) / 2
zIndexToStopGettingCentralVoxels = zIndexToStartGettingCentralVoxels + \
listOfNumberOfCentralVoxelsToGetPerDimension[2] # Excluding
return fms[ :, :,
rIndexToStartGettingCentralVoxels : rIndexToStopGettingCentralVoxels,
cIndexToStartGettingCentralVoxels : cIndexToStopGettingCentralVoxels,
zIndexToStartGettingCentralVoxels : zIndexToStopGettingCentralVoxels]
else : # wrong number of dimensions!
return -1
def makeResidualConnectionBetweenLayersAndReturnOutput( myLogger,
deeperLayerOutputImagesTrValTest,
deeperLayerOutputImageShapesTrValTest,
earlierLayerOutputImagesTrValTest,
earlierLayerOutputImageShapesTrValTest) :
# Add the outputs of the two layers and return the output, as well as its dimensions.
# Result: The result should have exactly the same shape as the output of the Deeper layer.
# Both #FMs and Dimensions of FMs.
(deeperLayerOutputImageTrain, deeperLayerOutputImageVal, deeperLayerOutputImageTest) = \
deeperLayerOutputImagesTrValTest
(deeperLayerOutputImageShapeTrain, deeperLayerOutputImageShapeVal, deeperLayerOutputImageShapeTest) = \
deeperLayerOutputImageShapesTrValTest
(earlierLayerOutputImageTrain, earlierLayerOutputImageVal, earlierLayerOutputImageTest) = \
earlierLayerOutputImagesTrValTest
(earlierLayerOutputImageShapeTrain, earlierLayerOutputImageShapeVal, earlierLayerOutputImageShapeTest) = \
earlierLayerOutputImageShapesTrValTest
# Note: deeperLayerOutputImageShapeTrain has dimensions: [batchSize, FMs, r, c, z]
# The deeper FMs can be greater only when there is upsampling. But then, to do residuals, I would need to upsample
# the earlier FMs. Not implemented.
if np.any(np.asarray(deeperLayerOutputImageShapeTrain[2:]) > np.asarray(earlierLayerOutputImageShapeTrain[2:])) or \
np.any(np.asarray(deeperLayerOutputImageShapeVal[2:]) > np.asarray(earlierLayerOutputImageShapeVal[2:])) or \
np.any(np.asarray(deeperLayerOutputImageShapeTest[2:]) > np.asarray(earlierLayerOutputImageShapeTest[2:])) :
myLogger.print3("ERROR: In function [makeResidualConnectionBetweenLayersAndReturnOutput] the RCZ-dimensions of \
a deeper layer FMs were found greater than the earlier layers. Not implemented functionality. Exiting!")
myLogger.print3("\t (train) Dimensions of Deeper Layer=" + str(deeperLayerOutputImageShapeTrain) + \
". Dimensions of Earlier Layer=" + str(earlierLayerOutputImageShapeTrain) )
myLogger.print3("\t (val) Dimensions of Deeper Layer=" + str(deeperLayerOutputImageShapeVal) + \
". Dimensions of Earlier Layer=" + str(earlierLayerOutputImageShapeVal) )
myLogger.print3("\t (test) Dimensions of Deeper Layer=" + str(deeperLayerOutputImageShapeTest) + \
". Dimensions of Earlier Layer=" + str(earlierLayerOutputImageShapeTest) )
exit(1)
# get the part of the earlier layer that is of the same dimensions as the FMs of the deeper:
partOfEarlierFmsToAddTrain = getMiddlePartOfFms(earlierLayerOutputImageTrain, deeperLayerOutputImageShapeTrain[2:])
partOfEarlierFmsToAddVal = getMiddlePartOfFms(earlierLayerOutputImageVal, deeperLayerOutputImageShapeVal[2:])
partOfEarlierFmsToAddTest = getMiddlePartOfFms(earlierLayerOutputImageTest, deeperLayerOutputImageShapeTest[2:])
# Add the FMs, after taking care of zero padding if the deeper layer has more FMs.
numFMsDeeper = deeperLayerOutputImageShapeTrain[1]
numFMsEarlier = earlierLayerOutputImageShapeTrain[1]
if numFMsDeeper >= numFMsEarlier :
outputOfResConnTrain = T.inc_subtensor(deeperLayerOutputImageTrain[:, :numFMsEarlier, :,:,:], \
partOfEarlierFmsToAddTrain, inplace=False)
outputOfResConnVal = T.inc_subtensor(deeperLayerOutputImageVal[:, :numFMsEarlier, :,:,:], \
partOfEarlierFmsToAddVal, inplace=False)
outputOfResConnTest = T.inc_subtensor(deeperLayerOutputImageTest[:, :numFMsEarlier, :,:,:], \
partOfEarlierFmsToAddTest, inplace=False)
else : # Deeper FMs are fewer than earlier. This should not happen in most architectures. But oh well...
outputOfResConnTrain = deeperLayerOutputImageTrain + partOfEarlierFmsToAddTrain[:, :numFMsDeeper, :,:,:]
outputOfResConnVal = deeperLayerOutputImageVal + partOfEarlierFmsToAddVal[:, :numFMsDeeper, :,:,:]
outputOfResConnTest = deeperLayerOutputImageTest + partOfEarlierFmsToAddTest[:, :numFMsDeeper, :,:,:]
# Dimensions of output are the same as those of the deeperLayer
return (outputOfResConnTrain, outputOfResConnVal, outputOfResConnTest)
#################################################################
# Classes of Pathways #
#################################################################
class Pathway(object):
# This is a virtual class.
def __init__(self, pName=None) :
self._pName = pName
self._pType = None # Pathway Type.
# === Input to the pathway ===
self._inputTrain = None
self._inputVal = None
self._inputTest = None
self._inputShapeTrain = None
self._inputShapeVal = None
self._inputShapeTest = None
# === Basic architecture parameters ===
self._layersInPathway = []
self._subsFactor = [1,1,1]
self._recField = None # At the end of pathway
# === Output of the block ===
self._outputTrain = None
self._outputVal = None
self._outputTest = None
self._outputShapeTrain = None
self._outputShapeVal = None
self._outputShapeTest = None
#Joe: renamed from 'makeLayersOfThisPathwayAndReturnDimensionsOfOutputFM'
def makeLayersAndReturnDimsOfOutputFM(self,
myLogger,
inputTrain,
inputVal,
inputTest,
inputDimsTrain,
inputDimsVal,
inputDimsTest,
numKernsPerLayer,
kernelDimsPerLayer,
initializationTechniqueClassic0orDelvingInto1,
useBnPerLayer, # As a flag for case that I want to apply
# BN on input image. I want to apply to input of FC.
rollingAverageForBNOverThatManyBatches,
activFuncPerLayer,
dropoutRatesPerLayer=[],
poolingParamsStructureForThisPathwayType = [],
indicesOfLowerRankLayersForPathway=[],
ranksOfLowerRankLayersForPathway = [],
indicesOfLayersToConnectResidualsInOutputForPathway=[]
) :
rng = numpy.random.RandomState(55789)
myLogger.print3("[Pathway_" + str(self.getStringType()) + "] is being built...")
self._recField = self.calcRecFieldOfPathway(kernelDimsPerLayer)
self._setInputAttributes(inputTrain, inputVal, inputTest, inputDimsTrain, inputDimsVal, inputDimsTest)
myLogger.print3("\t[Pathway_"+str(self.getStringType())+"]: Input's Shape: (Train) " + str(self._inputShapeTrain) + \
", (Val) " + str(self._inputShapeVal) + ", (Test) " + str(self._inputShapeTest))
inputToNextLayerTrain = self._inputTrain; inputToNextLayerVal = self._inputVal;
inputToNextLayerTest = self._inputTest
inputToNextLayerShapeTrain = self._inputShapeTrain; inputToNextLayerShapeVal = self._inputShapeVal;
inputToNextLayerShapeTest = self._inputShapeTest
numOfLayers = len(numKernsPerLayer)
for layer_i in xrange(0, numOfLayers) :
thisLayerFilterShape = [numKernsPerLayer[layer_i],inputToNextLayerShapeTrain[1]] + kernelDimsPerLayer[layer_i]
thisLayerUseBn = useBnPerLayer[layer_i]
# 0 relu, 1 prelu, -1 linear (no activ, for 1st layer over raw input).
thisLayerActivFunc = activFuncPerLayer[layer_i]
thisLayerDropoutRate = dropoutRatesPerLayer[layer_i] if dropoutRatesPerLayer else 0
thisLayerPoolingParameters = poolingParamsStructureForThisPathwayType[layer_i]
myLogger.print3("\t[Conv.Layer_" + str(layer_i) + "], Filter Shape: " + str(thisLayerFilterShape))
myLogger.print3("\t[Conv.Layer_" + str(layer_i) + "], Input's Shape: (Train) " + str(inputToNextLayerShapeTrain) + \
", (Val) " + str(inputToNextLayerShapeVal) + ", (Test) " + str(inputToNextLayerShapeTest))
if layer_i in indicesOfLowerRankLayersForPathway :
layer = LowRankConvLayer(ranksOfLowerRankLayersForPathway\
[ indicesOfLowerRankLayersForPathway.index(layer_i) ])
else : # normal conv layer
layer = ConvLayer()
layer.makeLayer(rng,
inputToLayerTrain=inputToNextLayerTrain,
inputToLayerVal=inputToNextLayerVal,
inputToLayerTest=inputToNextLayerTest,
inputToLayerShapeTrain=inputToNextLayerShapeTrain,
inputToLayerShapeVal=inputToNextLayerShapeVal,
inputToLayerShapeTest=inputToNextLayerShapeTest,
filterShape=thisLayerFilterShape,
poolingParameters=thisLayerPoolingParameters,
initializationTechniqueClassic0orDelvingInto1=initializationTechniqueClassic0orDelvingInto1,
useBnFlag = thisLayerUseBn,
rollingAverageForBNOverThatManyBatches=\
rollingAverageForBNOverThatManyBatches,
activationFunctionToUseRelu0orPrelu1orMinus1ForLinear=thisLayerActivFunc,
dropoutRate=thisLayerDropoutRate
)
self._layersInPathway.append(layer)
if layer_i not in indicesOfLayersToConnectResidualsInOutputForPathway : #not a residual connecting here
inputToNextLayerTrain = layer.outputTrain
inputToNextLayerVal = layer.outputVal
inputToNextLayerTest = layer.outputTest
else : #make residual connection
myLogger.print3("\t[Pathway_"+str(self.getStringType())+ \
"]: making Residual Connection between output of [Layer_"+str(layer_i)+\
"] to input of previous layer.")
deeperLayerOutputImagesTrValTest = (layer.outputTrain, layer.outputVal, layer.outputTest)
deeperLayerOutputImageShapesTrValTest = (layer.outputShapeTrain, layer.outputShapeVal, layer.outputShapeTest)
assert layer_i > 0 # The very first layer (index 0), should never be provided for now.
# Cause I am connecting 2 layers back.
earlierLayer = self._layersInPathway[layer_i-1]
earlierLayerOutputImagesTrValTest = (earlierLayer.inputTrain, earlierLayer.inputVal, \
earlierLayer.inputTest)
earlierLayerOutputImageShapesTrValTest = (earlierLayer.inputShapeTrain, earlierLayer.inputShapeVal, \
earlierLayer.inputShapeTest)
(inputToNextLayerTrain,
inputToNextLayerVal,
inputToNextLayerTest) = makeResidualConnectionBetweenLayersAndReturnOutput( myLogger,
deeperLayerOutputImagesTrValTest,
deeperLayerOutputImageShapesTrValTest,
earlierLayerOutputImagesTrValTest,
earlierLayerOutputImageShapesTrValTest )
layer.outputAfterResidualConnIfAnyAtOutpTrain = inputToNextLayerTrain
layer.outputAfterResidualConnIfAnyAtOutpVal = inputToNextLayerVal
layer.outputAfterResidualConnIfAnyAtOutpTest = inputToNextLayerTest
# Residual connections preserve the both the number of FMs and the dimensions of the FMs,
# the same as in the later, deeper layer.
inputToNextLayerShapeTrain = layer.outputShapeTrain
inputToNextLayerShapeVal = layer.outputShapeVal
inputToNextLayerShapeTest = layer.outputShapeTest
self._setOutputAttributes(inputToNextLayerTrain, inputToNextLayerVal, inputToNextLayerTest,
inputToNextLayerShapeTrain, inputToNextLayerShapeVal, inputToNextLayerShapeTest)
myLogger.print3("\t[Pathway_"+str(self.getStringType())+"]: Output's Shape: (Train) " + str(self._outputShapeTrain) + \
", (Val) " + str(self._outputShapeVal) + ", (Test) " + str(self._outputShapeTest))
myLogger.print3("[Pathway_" + str(self.getStringType()) + "] done.")
# Skip connections to end of pathway.
def makeMultiscaleConnectionsForLayerType(self, convLayersToConnectToFirstFcForMultiscaleFromThisLayerType) :
layersInThisPathway = self.getLayers()
[outputOfPathwayTrain, outputOfPathwayVal, outputOfPathwayTest ] = self.getOutput()
[outputShapeTrain, outputShapeVal, outputShapeTest] = self.getShapeOfOutput()
numOfCentralVoxelsToGetTrain = outputShapeTrain[2:]; numOfCentralVoxelsToGetVal = outputShapeVal[2:];
numOfCentralVoxelsToGetTest = outputShapeTest[2:]
for convLayer_i in convLayersToConnectToFirstFcForMultiscaleFromThisLayerType :
thisLayer = layersInThisPathway[convLayer_i]
middlePartOfFmsTrain = getMiddlePartOfFms(thisLayer.outputTrain, numOfCentralVoxelsToGetTrain)
middlePartOfFmsVal = getMiddlePartOfFms(thisLayer.outputVal, numOfCentralVoxelsToGetVal)
middlePartOfFmsTest = getMiddlePartOfFms(thisLayer.outputTest, numOfCentralVoxelsToGetTest)
outputOfPathwayTrain = T.concatenate([outputOfPathwayTrain, middlePartOfFmsTrain], axis=1)
outputOfPathwayVal = T.concatenate([outputOfPathwayVal, middlePartOfFmsVal], axis=1)
outputOfPathwayTest = T.concatenate([outputOfPathwayTest, middlePartOfFmsTest], axis=1)
outputShapeTrain[1] += thisLayer.getNumberOfFeatureMaps();
outputShapeVal[1] += thisLayer.getNumberOfFeatureMaps();
outputShapeTest[1] += thisLayer.getNumberOfFeatureMaps();
self._setOutputAttributes(outputOfPathwayTrain, outputOfPathwayVal, outputOfPathwayTest,
outputShapeTrain, outputShapeVal, outputShapeTest)
# The below should be updated, and calculated in here properly with private function and per layer.
def calcRecFieldOfPathway(self, kernelDimsPerLayer) :
return calcReceptiveFieldDims(kernelDimsPerLayer)
#Joe: renamed from 'calcInputRczDimsToProduceOutputFmsOfCompatibleDims'
def calcInputRczDims(self, kernelDims, dimsOutputOfPrimaryPW):
recFieldAtEndOfPathway = self.calcRecFieldOfPathway(kernelDims) #Joe: receptive field of this subsample PW.
rczDimsOfInput = [-1,-1,-1]
rczDimsOfOutput = [-1,-1,-1]
rczDimsOutputOfPrimaryPW = dimsOutputOfPrimaryPW[2:]
for rcz_i in xrange(3) :
#Joe: if subsample factor is 3, the output dims of subsample PW should be 1/3 of normal PW.
rczDimsOfOutput[rcz_i] = \
int(ceil(rczDimsOutputOfPrimaryPW[rcz_i]/(1.0*self.subsFactor()[rcz_i])))
'''
Joe: if input dims to this subsample PW is receptive field(i.e. recFieldAtEndOfPathway),
then the output dims would be 1. To ensure it is instead rczDimsOfOutput, the input dims
should be recFieldAtEndOfPathway + rczDimsOfOutput - 1
'''
rczDimsOfInput[rcz_i] = recFieldAtEndOfPathway[rcz_i] + rczDimsOfOutput[rcz_i] - 1
return rczDimsOfInput
# Setters
def _setInputAttributes(self, inputToLayerTrain, inputToLayerVal, inputToLayerTest, inputToLayerShapeTrain, \
inputToLayerShapeVal, inputToLayerShapeTest) :
self._inputTrain = inputToLayerTrain; self._inputVal = inputToLayerVal; self._inputTest = inputToLayerTest
self._inputShapeTrain = inputToLayerShapeTrain; self._inputShapeVal = inputToLayerShapeVal;
self._inputShapeTest = inputToLayerShapeTest
def _setOutputAttributes(self, outputTrain, outputVal, outputTest, outputShapeTrain, \
outputShapeVal, outputShapeTest) :
self._outputTrain = outputTrain; self._outputVal = outputVal; self._outputTest = outputTest
self._outputShapeTrain = outputShapeTrain; self._outputShapeVal = outputShapeVal;
self._outputShapeTest = outputShapeTest
# Getters
def pName(self):
return self._pName
def pType(self):
return self._pType
def getLayers(self):
return self._layersInPathway
def getLayer(self, index):
return self._layersInPathway[index]
def subsFactor(self):
return self._subsFactor
def getOutput(self):
return [ self._outputTrain, self._outputVal, self._outputTest ]
def getShapeOfOutput(self):
return [ self._outputShapeTrain, self._outputShapeVal, self._outputShapeTest ]
def getShapeOfInput(self):
return [ self._inputShapeTrain, self._inputShapeVal, self._inputShapeTest ]
# Other API :
def getStringType(self) : raise NotImplementedMethod() # Abstract implementation. Children classes should implement this.
# Will be overriden for lower-resolution pathways.
def getOutputAtNormalRes(self): return self.getOutput()
def getShapeOfOutputAtNormalRes(self): return self.getShapeOfOutput()
class NormalPathway(Pathway):
def __init__(self, pName=None) :
Pathway.__init__(self, pName)
self._pType = PathwayTypes.NORM
# Override parent's abstract classes.
def getStringType(self) :
return "NORMAL"
class SubsampledPathway(Pathway):
def __init__(self, subsamplingFactor, pName=None) :
Pathway.__init__(self, pName)
self._pType = PathwayTypes.SUBS
self._subsFactor = subsamplingFactor
self._outputNormResTrain = None
self._outputNormResVal = None
self._outputNormResTest = None
self._outputNormResShapeTrain = None
self._outputNormResShapeVal = None
self._outputNormResShapeTest = None
def upsampleOutputToNormalRes(self, upsamplingScheme="repeat",
shapeToMatchInRczTrain=None, shapeToMatchInRczVal=None, shapeToMatchInRczTest=None):
#should be called only once to build. Then just call getters if needed to get upsampled layer again.
[outputTrain, outputVal, outputTest] = self.getOutput()
[outputShapeTrain, outputShapeVal, outputShapeTest] = self.getShapeOfOutput()
outputNormResTrain = upsampleRcz5DimArrayAndOptionalCrop(outputTrain,
self.subsFactor(),
upsamplingScheme,
shapeToMatchInRczTrain)
outputNormResVal = upsampleRcz5DimArrayAndOptionalCrop( outputVal,
self.subsFactor(),
upsamplingScheme,
shapeToMatchInRczVal)
outputNormResTest = upsampleRcz5DimArrayAndOptionalCrop(outputTest,
self.subsFactor(),
upsamplingScheme,
shapeToMatchInRczTest)
outputNormResShapeTrain = outputShapeTrain[:2] + shapeToMatchInRczTrain[2:]
outputNormResShapeVal = outputShapeVal[:2] + shapeToMatchInRczVal[2:]
outputNormResShapeTest = outputShapeTest[:2] + shapeToMatchInRczTest[2:]
self._setOutputAttributesNormRes(outputNormResTrain, outputNormResVal, outputNormResTest,
outputNormResShapeTrain, outputNormResShapeVal, outputNormResShapeTest)
def _setOutputAttributesNormRes(self, outputNormResTrain, outputNormResVal, outputNormResTest,
outputNormResShapeTrain, outputNormResShapeVal, outputNormResShapeTest) :
#Essentially this is after the upsampling "layer"
self._outputNormResTrain = outputNormResTrain; self._outputNormResVal = outputNormResVal;
self._outputNormResTest = outputNormResTest
self._outputNormResShapeTrain = outputNormResShapeTrain; self._outputNormResShapeVal = outputNormResShapeVal;
self._outputNormResShapeTest = outputNormResShapeTest
# OVERRIDING parent's classes.
def getStringType(self) :
return "SUBSAMPLED" + str(self.subsFactor())
def getOutputAtNormalRes(self):
# upsampleOutputToNormalRes() must be called first once.
return [ self._outputNormResTrain, self._outputNormResVal, self._outputNormResTest ]
def getShapeOfOutputAtNormalRes(self):
# upsampleOutputToNormalRes() must be called first once.
return [ self._outputNormResShapeTrain, self._outputNormResShapeVal, self._outputNormResShapeTest ]
class FcPathway(Pathway):
def __init__(self, pName=None) :
Pathway.__init__(self, pName)
self._pType = PathwayTypes.FC
# Override parent's abstract classes.
def getStringType(self) :
return "FC"
| [
"joe@joedeMacBook-Pro.local"
] | joe@joedeMacBook-Pro.local |
175b341a56c39c15bc473eabefdea8436aba734f | 09d79c3509252cfccac35bb28de9a0379094823a | /alx/movies/migrations/0002_auto_20201123_1045.py | 1ac4f1ab4103dc7788ff628ea113fe1d93025510 | [] | no_license | marianwitkowski/python2311 | 73ad491016cd6d0010d0203db43aca2c6debe0ad | 9bbeca3fb6d8658a1321ab099ff2102cd7de76e0 | refs/heads/master | 2023-01-22T13:13:56.695680 | 2020-12-02T14:58:15 | 2020-12-02T14:58:15 | 315,350,865 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | # Generated by Django 3.1.3 on 2020-11-23 09:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('movies', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='movie',
options={'verbose_name': 'Film', 'verbose_name_plural': 'Filmy'},
),
]
| [
"marian.witkowski@gmail.com"
] | marian.witkowski@gmail.com |
30c99dfdd5ca6240f4823e4700937a6180f408ea | 7634e833cac4973375739e814cc9aa755c7b1f77 | /day_2/4_find_the_runner_up_score.py | eaca8df361fcb1e777ea912efc837fc1b60c0bdb | [] | no_license | RonakNandanwar26/Innomatics_Internship | 34a2b75a68eae022734010caeef05c28807a7435 | 15884516f38331ae6f152cac8bfd36af4e195c19 | refs/heads/master | 2023-04-17T16:57:32.409572 | 2021-04-14T03:54:39 | 2021-04-14T03:54:39 | 331,293,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | """
Given the participants' score sheet for your University Sports Day, you are required to find the runner-up score. You are given scores. Store them in a list and find the score of the runner-up.
Input Format
The first line contains n. The second line contains an array A[] of n integers each separated by a space.
Constraints
2<=n<=10
-100<=A[i]<=100
Output Format
Print the runner-up score.
Sample Input 0
5
2 3 6 6 5
Sample Output 0
5
"""
if __name__ == '__main__':
n = int(input())
arr = map(int, input().split())
lst = []
for i in arr:
lst.append(i)
print(lst)
lst = sorted(list(set(lst)))
print(lst[-2])
| [
"ronaknandanwar10@gmail.com"
] | ronaknandanwar10@gmail.com |
b09ea8ae30786f0547c3fc9f3cf5ed86aff6342a | 836114e315f19baaf236153d7ff5aea0762898b6 | /__unported__/product_with_supplier_unit_price/__openerp__.py | 2b42f930bbff57e3bc94c05e9b16a01c6712b7a1 | [] | no_license | acsone/product-attribute | 4fd30f89feeb05c80cdd6c47df4ad27c857216db | 4c053ed71a464e31a3e49c9ec9a968a17b799233 | refs/heads/master | 2023-09-05T01:27:53.076091 | 2014-07-09T10:30:41 | 2014-07-09T10:30:41 | 21,644,928 | 1 | 0 | null | 2023-07-25T13:26:14 | 2014-07-09T08:30:01 | Python | UTF-8 | Python | false | false | 1,835 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Supplier unit price",
"version" : "0.1",
"author" : "Savoir-faire Linux",
"website" : "http://www.savoirfairelinux.com",
"license" : "GPL-3",
"category" : "Product",
"complexity" : "easy",
"description": """
On the product form, in the suppliers tab, you have to click on the
line to get the prices of the product from that supplier.
This module displays the unit price directly on the product form by
adding a function field to store the unit price to the supplierinfo
object and adding it to its tree view.
""",
"depends" : ['product'],
"init_xml" : [],
"update_xml" : [
'supplierinfo_view.xml'
],
"demo_xml" : [],
"installable" : True,
"certificate" : ''
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"guewen.baconnier@camptocamp.com"
] | guewen.baconnier@camptocamp.com |
de7cdeb56ac6a9da4ebdb384575159164684b92d | dfad6d85ddcccee8e582c34b5e0486b10d1b1dca | /wizard/confirm_auto_fulfill.py | d0df28f0588ad94334d80e74dca35dfcb42b5960 | [] | no_license | HichamFkr/fulfillement | 97845ad189de9281951390402fc5c3e76723bc14 | 00eba73aec03a2ddf2e069837936c7551c372f8e | refs/heads/master | 2020-05-26T04:29:02.874740 | 2019-07-08T07:43:01 | 2019-07-08T07:43:01 | 188,106,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,541 | py | # -*- coding: utf-8 -*-
from audioop import reverse
from openerp import fields, models, api
import numpy as np
class sale_order_auto_fulfill(models.TransientModel):
_name = 'sale_order.auto.fulfill'
def _decesion_matrix(self, partners):
def fill(row, n, begin, end):
if n == 1:
n = 1
else:
n = 0
row[int(begin): int(end)] = n
return row
nbr_client = len(partners)
rows = 2 ** nbr_client
A = np.zeros((nbr_client, rows))
iterations = 2
for row in A:
begin = 0
end = rows / iterations
step = end
n = 1
for _ in range(0, iterations):
n *= -1
row = fill(row, n, begin, end)
begin = end
end += step
iterations *= 2
# print "Decesion Matrix"
# print A.T
return A.T
@api.one
def _fulfill(self, line):
# line.ensure_one()
qty = line.product_uom_qty * line.sla_line_min
if type(qty) == "Float":
line.qty_livre = int(qty) + 1
else:
line.qty_livre = qty
self.env['sale.order.fulfill'].confirm_fulfillement()
@api.one
def auto_fulfillement(self):
so_ids = self.env.context.get('active_ids')
lines = self.env['sale.order.line'].browse(so_ids).filtered(lambda line: line.state=='fulfillement')
partners = []
scores = []
orders = []
lines_sort_by_score_partner = []
decesion_scores = []
orders.append(lines.mapped('order_id'))
scores.append(lines.mapped('fulfillement_score_partner'))
partners.append(lines.mapped('order_id.partner_id'))
for line in lines:
lines_sort_by_score_partner.append(line)
for s in scores:
arr_scores = np.sort(np.array(list(dict.fromkeys(s))))[::-1] #convert list to numpy array
# arr_scores = np.sort(scores)
# print arr_scores
lines_sort_by_score_partner.sort(key=lambda l: l.fulfillement_score_partner, reverse=True)
partners_sorted = []
for l in lines_sort_by_score_partner:
partners_sorted.append(l.order_partner_id)
partners_sorted = list(dict.fromkeys(partners_sorted))
partners_sorted.sort(key= lambda p:p.credit, reverse=True)
#remove duplicate partners
decesion_matrix = self._decesion_matrix(partners_sorted)
decesion_scores = np.dot(decesion_matrix , arr_scores.T) #to determine highest scores
# print decesion_matrix
# print decesion_scores
max_score = np.max(decesion_scores)
index_max_score = np.argmax(decesion_scores)
decesion_partners = decesion_matrix[index_max_score]
final_decesion_partners = []
for index, item in enumerate(decesion_partners, start=0):
if item == 1.0:
final_decesion_partners.append(partners_sorted[index])
# print decesion_matrix
# print decesion_scores
# print decesion_partners
# print final_decesion_partners
#
self._check_sla_order(final_decesion_partners)
# for p in final_decesion_partners:
# print p
@api.multi
def _check_sla_line(self, line):
# so_ids = self.env.context.get('active_ids')
# lines = self.env['sale.order.line'].browse(so_ids).filtered(lambda line: line.state == 'fulfillement')
if line.qty_livre >= (line.product_uom_qty * line.sla_line_min):
return True
@api.multi
def _check_sla_order(self, partners):
for r in self:
count = 0
for p in partners:
slas = p.fulfillement_sla_ids
for sla in slas:
if sla:
if sla.sla_id.fulfillement_sla_name == "Order percent":
orders = r.env['sale.order'].search([('partner_id', '=', p.id)])
for o in orders:
for l in o.mapped('order_line'):
r._fulfill(l)
for l in o.mapped('order_line'):
if r._check_sla_line(l) == True or l.state != 'fulfillement':
count += 1
if count/o.nb_lines >= sla.value:
return True
| [
"hichem420@gmail.com"
] | hichem420@gmail.com |
b956bf75c5205f6186423173fbd05e4ad8f4c45d | 92002b325654387604286b92b107f31d7fc2167e | /sina_backup.py | a75801791a31a7ebec7e3a75d5a0b4e35e017539 | [] | no_license | jit2088/sina_blog | 21ccbb16ca3f498ab8d226afe7d1e99f83e11fb6 | d3cadcfc9e573597058a648f2554de8d1b0df52b | refs/heads/main | 2023-02-14T18:23:50.183933 | 2021-01-10T16:58:17 | 2021-01-10T16:58:17 | 327,697,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,363 | py | # -*- coding:UTF-8 -*- #
'''
Created on 2011-12-18
@author: Ahan
'''
import re
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
import time
import socket
import locale
import datetime
import codecs
from urllib import urlopen
#正则表达式定义
#匹配博文目录链接
pattern1=u"""<a href="(http:.*?)">博文目录</a>"""
prog1 = re.compile(pattern1)
#匹配博文标题链接
pattern2=u"""<a title="(.*?)" target="_blank" href="(.*?)">.*?</a>"""
prog2=re.compile(pattern2)
#匹配下一页链接
pattern3=u"""<a href="([^"]+)" title="[^"]+">下一页"""
prog3=re.compile(pattern3)
#匹配正文部分
pattern4=u"""<!--博文正文 begin -->[\\s\\S]*?<!-- 正文结束 -->"""
prog4=re.compile(pattern4)
#匹配正文图片链接
pattern5=u"""(src="[^"]+"( real_src ="([^"]+)"))"""
prog5=re.compile(pattern5)
def read_date_from_url(url):
"""以Unicode形式返回从url上读取的所有数据
"""
try:
data = ""
request = urlopen(url)
while True:
s = request.read(1024)
if not s:
break
data += s
return unicode(data)
except:
print '读取数据时出错'
print "Unexpected error:", sys.exc_info()[0],sys.exc_info()[1]
return None
finally:
if request:
request.close()
def save_to_file(url,filename,blog_address):
"""url为博文地址,filename为要保存的文件名,默认后缀为html
"""
#如果文件夹不存在则创建文件夹
if os.path.exists(blog_address)==False:
os.makedirs(blog_address)
#去掉文件名中的非法字符
filename=ReplaceBadCharOfFileName(filename)
file_no=0
while os.path.isfile(blog_address+'/'+filename+'.html')==True:
filename=filename+'('+file_no.__str__()+')'
file_no+=1
text = read_date_from_url(url)
text=_filter(text)
#将图片保存到本地
result=prog5.findall(text)
i=1
for pic in result:
folder=blog_address+'/'+filename+'/'
pic_name='image'+i.__str__()+'.gif'
if os.path.exists(folder)==False:
os.makedirs(folder)
try:
url_file = urlopen(pic[2])
pic_file = codecs.open(folder+pic_name,'wb')
while True:
s = url_file.read(1024)
if not s:
break
pic_file.write(s)
pic_file.close()
url_file.close()
except:
print '噢,保存图片的时候出现问题了,跳过此张图片...'
print "Unexpected error:", sys.exc_info()[0],sys.exc_info()[1]
else:
print '保存图片成功...'
#替换正文中的图片地址
text=text.replace(pic[0],unicode("src=\"" + filename + "/" + pic_name + "\"" + pic[1]),1)
i=i+1
blog_file = codecs.open(blog_address+'/'+filename+'.html','wb')
blog_file.write(text)
blog_file.close()
#提取文本中的正文部分
def _filter(t):
"""提取文本中的正文部分,返回Unicode形式的字符串
"""
result=prog4.search(t)
if result is not None:
return u'<html><head></head><body>' + unicode(result.group()) + u'</dody></html>'
else:
raise Exception('噢,提取正文出错了……')
#去掉文件名的不合法字符
def ReplaceBadCharOfFileName(filename):
filename=filename.replace(" ","")
filename=filename.replace("\\", "")
filename=filename.replace("/", "")
filename=filename.replace(":", "")
filename=filename.replace("*", "")
filename=filename.replace("?", "")
filename=filename.replace("<", "")
filename=filename.replace(">", "")
filename=filename.replace("|", "")
filename=filename.replace("&","")
filename=filename.replace(";","")
return filename
#主函数
if __name__ == '__main__':
#准备阶段
blog_no=1#博文编号
begin=1#起始博文
end=0#结束博文
page=0#页码
saved=0#成功保存的篇数
timeout = 60*5#超时设为5分钟
socket.setdefaulttimeout(timeout)#这里对整个socket层设置超时时间。后续文件中如果再使用到socket,不必再设置
blog_address=raw_input("请输入您的博客地址(输入最后部分即可,比如您的博客地址是http://blog.sina.com.cn/jiangafu,只要输入jiangafu):")
blog_address=blog_address.replace('\r','')
begin=raw_input('从第几篇开始:')
begin=locale.atoi(begin)
while begin<=0:
begin=raw_input('请输入大于0的数:')
begin=locale.atoi(begin)
end=raw_input('到第几篇结束(到最后请输入0):')
end=locale.atoi(end)
while end<0:
end=raw_input('请输入大于等于0的数:')
end=locale.atoi(end)
if end==0:
print '您的博客地址是:http://blog.sina.com.cn/'+blog_address+',保存第'+begin.__str__()+'篇到最后一篇博文'
else:
print '您的博客地址是:http://blog.sina.com.cn/'+blog_address+',保存第'+begin.__str__()+'篇到第'\
+end.__str__()+'篇的博文'
starttime = datetime.datetime.now()
text=read_date_from_url('http://blog.sina.com.cn/'+blog_address)
time.sleep(0.5)
#提取“博文目录”的url
result = prog1.search(text)
if result is not None:
print '博文目录地址:' , result.group(1)
text=read_date_from_url(result.group(1))
time.sleep(0.4)
else:
print '提取博文目录地址失败'
#终止程序运行
sys.exit()
#查找每一页的全部博文,分析、提取、保存
while True:
page+=1
print '开始备份第' , page , '页'
#匹配该页的所有博文地址
result=prog2.findall(text)
#循环下载本页每篇博文
for blog in result:
if blog_no < begin:
blog_no += 1
elif end != 0 and blog_no > end:
break
else:
try:
save_to_file(blog[1],unicode(blog[0]),blog_address)
except:
print '噢,保存第',blog_no,'篇博文',blog[0],'的时候出现问题了,跳过...'
blog_no += 1
print "Unexpected error:", sys.exc_info()[0],sys.exc_info()[1]
else:
print '成功保存了第', blog_no, '篇博文:', blog[0]
blog_no += 1
saved += 1
time.sleep(0.4)
#判断是否有下一页
result = prog3.search(text)
if result is not None:
text = read_date_from_url(result.group(1))
else:
print '这是最后一页'
break
print '博客备份完成一共备份',saved,'篇博文'
print '共用时:',datetime.datetime.now() - starttime
raw_input('按回车键退出...')
| [
"jin.2088@usask.ca"
] | jin.2088@usask.ca |
8c5fb8bc6094cee02d62818ed1fdba969117d0ea | 57235e5fbd29dc5e0b3f24649e15a48935edd65f | /boa3_test/test_sc/built_in_methods_test/IsInstanceListLiteral.py | 11e0d91468bec39a5306a5e643f06dec8b69c858 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | DanPopa46/neo3-boa | ae75543bdc4e0aeadf45578b6b5e4c45b9253557 | e4ef340744b5bd25ade26f847eac50789b97f3e9 | refs/heads/development | 2023-04-01T19:25:08.216180 | 2021-04-15T17:45:38 | 2021-04-15T17:45:38 | 358,663,228 | 0 | 0 | Apache-2.0 | 2021-04-16T16:46:46 | 2021-04-16T16:46:31 | null | UTF-8 | Python | false | false | 94 | py | from boa3.builtin import public
@public
def Main() -> bool:
return isinstance([], list)
| [
"mirellamedeiros.09@hotmail.com"
] | mirellamedeiros.09@hotmail.com |
9252178bd560c85b23332610a4299b0ec0f71f57 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /q4bBcq5NET4CH5Rcb_16.py | 5f42fed2a73573979ea8acc56462e2f23301b0ed | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py |
def jay_and_bob(txt):
a={"half":"14 grams","quarter":"7 grams","eighth":"3.5 grams","sixteenth":"1.75 grams"}
return a[txt]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
5ee67e80af40f1fb9ca2e30abc32a0d1d67ae76a | a617d322816e844829fa48b26759a4ef6258f193 | /cobrapy/ijr904/ex0/case2.py | 01f206a8dc64ece77cc9269f274c2ae2f81dd283 | [] | no_license | t6g/genomescalemetabolicmodeling | cb66c5b56fbcb0ad3b391187c2321f4cb3e87ba5 | e02c0817a2958bb4c41970f5887ee1c7a39c4cd6 | refs/heads/master | 2021-01-10T03:11:01.249420 | 2016-02-04T19:38:54 | 2016-02-04T19:38:54 | 51,077,991 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | from __future__ import print_function
import cobra as cobra
model = cobra.io.read_sbml_model('../../../xml/iJR904.xml')
biomass = model.reactions.get_by_id('BiomassEcoli')
model.change_objective(biomass)
glu = model.reactions.get_by_id('EX_glc_LPAREN_e_RPAREN_')
glu.lower_bound = -18.5
model.optimize()
print('glucose >= -18.5, aerobic growth rate = ', model.solution.f, '1/hr')
print('glucose flux = ', model.solution.x_dict['EX_glc_LPAREN_e_RPAREN_'])
print('glucose shadow price = ', model.solution.y_dict['glc_DASH_D_e'])
print('O2 flux = ', model.solution.x_dict['EX_o2_LPAREN_e_RPAREN_'])
print('O2 shadow price = ', model.solution.y_dict['o2_e'])
print('ATPM flux = ', model.solution.x_dict['ATPM'])
print('ATP flux ATPPRT = ', model.solution.x_dict['ATPPRT'])
print('ATP shadow price = ', model.solution.y_dict['atp_c'])
| [
"tanggp@yahoo.com"
] | tanggp@yahoo.com |
f0f7d898a452de3ce1b9a7940f8dcd61c38c6500 | 18f8abb90efece37949f5b5758c7752b1602fb12 | /py/django_tools/django-haystack/tests/simple_tests/tests/simple_backend.py | d9b5120d942eb0f05a4fcbd1769c58de0da181cd | [
"BSD-3-Clause",
"MIT"
] | permissive | marceltoben/evandrix.github.com | caa7d4c2ef84ba8c5a9a6ace2126e8fd6db1a516 | abc3fbfb34f791f84e9a9d4dc522966421778ab2 | refs/heads/master | 2021-08-02T06:18:12.953567 | 2011-08-23T16:49:33 | 2011-08-23T16:49:33 | 2,267,457 | 3 | 5 | null | 2021-07-28T11:39:25 | 2011-08-25T11:18:56 | C | UTF-8 | Python | false | false | 5,799 | py | from datetime import date
from django.conf import settings
from django.test import TestCase
from haystack import connections, connection_router
from haystack import indexes
from haystack.query import SearchQuerySet
from haystack.utils.loading import UnifiedIndex
from core.models import MockModel
from core.tests.mocks import MockSearchResult
class SimpleMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
name = indexes.CharField(model_attr='author', faceted=True)
pub_date = indexes.DateField(model_attr='pub_date')
def get_model(self):
return MockModel
class SimpleSearchBackendTestCase(TestCase):
fixtures = ['bulk_data.json']
def setUp(self):
super(SimpleSearchBackendTestCase, self).setUp()
self.backend = connections['default'].get_backend()
self.index = connections['default'].get_unified_index().get_index(MockModel)
self.sample_objs = MockModel.objects.all()
def test_update(self):
self.backend.update(self.index, self.sample_objs)
def test_remove(self):
self.backend.remove(self.sample_objs[0])
def test_clear(self):
self.backend.clear()
def test_search(self):
# No query string should always yield zero results.
self.assertEqual(self.backend.search(u''), {'hits': 0, 'results': []})
self.assertEqual(self.backend.search(u'*')['hits'], 23)
self.assertEqual([result.pk for result in self.backend.search(u'*')['results']], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23])
self.assertEqual(self.backend.search(u'daniel')['hits'], 23)
self.assertEqual([result.pk for result in self.backend.search(u'daniel')['results']], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23])
self.assertEqual(self.backend.search(u'should be a string')['hits'], 1)
self.assertEqual([result.pk for result in self.backend.search(u'should be a string')['results']], [8])
# Ensure the results are ``SearchResult`` instances...
self.assertEqual(self.backend.search(u'should be a string')['results'][0].score, 0)
self.assertEqual(self.backend.search(u'index document')['hits'], 6)
self.assertEqual([result.pk for result in self.backend.search(u'index document')['results']], [2, 3, 15, 16, 17, 18])
# Regression-ville
self.assertEqual([result.object.id for result in self.backend.search(u'index document')['results']], [2, 3, 15, 16, 17, 18])
self.assertEqual(self.backend.search(u'index document')['results'][0].model, MockModel)
# No support for spelling suggestions
self.assertEqual(self.backend.search(u'Indx')['hits'], 0)
self.assertFalse(self.backend.search(u'Indx').get('spelling_suggestion'))
# No support for facets
self.assertEqual(self.backend.search(u'', facets=['name']), {'hits': 0, 'results': []})
self.assertEqual(self.backend.search(u'daniel', facets=['name'])['hits'], 23)
self.assertEqual(self.backend.search(u'', date_facets={'pub_date': {'start_date': date(2008, 2, 26), 'end_date': date(2008, 2, 26), 'gap': '/MONTH'}}), {'hits': 0, 'results': []})
self.assertEqual(self.backend.search(u'daniel', date_facets={'pub_date': {'start_date': date(2008, 2, 26), 'end_date': date(2008, 2, 26), 'gap': '/MONTH'}})['hits'], 23)
self.assertEqual(self.backend.search(u'', query_facets={'name': '[* TO e]'}), {'hits': 0, 'results': []})
self.assertEqual(self.backend.search(u'daniel', query_facets={'name': '[* TO e]'})['hits'], 23)
self.assertFalse(self.backend.search(u'').get('facets'))
self.assertFalse(self.backend.search(u'daniel').get('facets'))
# Note that only textual-fields are supported.
self.assertEqual(self.backend.search(u'2009-06-18')['hits'], 0)
# Ensure that swapping the ``result_class`` works.
self.assertTrue(isinstance(self.backend.search(u'index document', result_class=MockSearchResult)['results'][0], MockSearchResult))
def test_more_like_this(self):
self.backend.update(self.index, self.sample_objs)
self.assertEqual(self.backend.search(u'*')['hits'], 23)
# Unsupported by 'simple'. Should see empty results.
self.assertEqual(self.backend.more_like_this(self.sample_objs[0])['hits'], 0)
class LiveSimpleSearchQuerySetTestCase(TestCase):
fixtures = ['bulk_data.json']
def setUp(self):
super(LiveSimpleSearchQuerySetTestCase, self).setUp()
# Stow.
self.old_debug = settings.DEBUG
settings.DEBUG = True
self.old_ui = connections['default'].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = SimpleMockSearchIndex()
self.ui.build(indexes=[self.smmi])
connections['default']._index = self.ui
self.sample_objs = MockModel.objects.all()
self.sqs = SearchQuerySet()
def tearDown(self):
# Restore.
connections['default']._index = self.old_ui
settings.DEBUG = self.old_debug
super(LiveSimpleSearchQuerySetTestCase, self).tearDown()
def test_general_queries(self):
# For now, just make sure these don't throw an exception.
# They won't work until the simple backend is improved.
self.assertTrue(len(self.sqs.auto_query('daniel')) > 0)
self.assertTrue(len(self.sqs.filter(text='index')) > 0)
self.assertTrue(len(self.sqs.exclude(name='daniel')) > 0)
self.assertTrue(len(self.sqs.order_by('-pub_date')) > 0)
| [
"evandrix@gmail.com"
] | evandrix@gmail.com |
d549ba467c97d5c0f1e6bbbe09468611fa76e397 | 8517f9ee07964eabef065a4ebc13ff6dbdb0dc5e | /purchase_analysis/__manifest__.py | 5aa2c925d6e4227818344988a0dba610e35bed32 | [] | no_license | sendalpegat/promotionv12 | 01cb3289862d96ed8d7df58fff65af618ee76724 | d0659ebd3ce46ffe9440e21287c4fea2599e2644 | refs/heads/master | 2022-07-15T04:13:34.766504 | 2020-05-09T05:19:39 | 2020-05-09T05:19:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | # -*- coding: utf-8 -*-
{
"name": "Purchase Analysis",
"category": 'purchase',
'summary': '',
"description": """
""",
"author": "Odox SoftHub",
"website": "http://odoxsofthub.com/",
"depends": ['base', 'purchase', 'product','product_brand'],
"data": [
'security/ir.model.access.csv',
'views/purchase_analysis_view.xml',
'views/res_partner_inherit_view.xml'
],
"installable": True,
"application": True,
"auto_install": False,
} | [
"ashifpk1@gmail.com"
] | ashifpk1@gmail.com |
a227ccbe053d5ce1d3c9cad90399739b78d1a5a2 | f1c77ea87628e8cf1c5020ac74107c97a12ecbb2 | /old/push2ES_batch.py | 8fedd0620f2a50d19ad10a61143c84d1ad1c7764 | [] | no_license | czxxjtu/youTube-8m | 8d1f339e0637d7eabca1c05d13f18457add42c2a | 41f43fbf2817b22c2669a7c46db9341098be5bbe | refs/heads/master | 2021-01-12T09:11:12.966588 | 2016-12-13T03:01:21 | 2016-12-13T03:01:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,720 | py | # 2. Push data into ElasticSearch
# What do I need?
import json
import pyes # Package to dump YouTube data to ElasticSearch (https://pyes.readthedocs.io/en/latest/references/pyes.es.html)
import sys
from retrieveData_batch import retrieveAPIData
reload(sys)
sys.setdefaultencoding('utf-8') # For displaying foreign characters
class push2ES:
def __init__(self, index):
self.apiCall = retrieveAPIData.main(index)
self.tFrame = self.apiCall.tFrame
def pushToES(self):
ttComplete = self.tFrame
# Add a id for looping into ElasticSearch index
#ttComplete["no_index"] = range(1,len(ttComplete)+1)
# Convert DataFrame into json
tmp = ttComplete.reset_index().to_json(orient="records")
# Load each record into json format before bulk
tmp_json = json.loads(tmp)
#print(tmp_json[1:3])
index_name = 'youtube'
type_name = 'pyelastic'
es = pyes.ES('169.53.152.5:9200')
i = 1
for doc in tmp_json:
#print "Document in tmp_json: " + str(i)
try:
es.index(doc, index_name, type_name, bulk=True)
i=i+1
except:
e = sys.exc_info()[0]
print e
pass
es.force_bulk()
print "Total Number of doc in the batch: " + str(i-1) + " . Successful doc: " + str(len(tmp_json))
#print(i-1)
#pushCall = push2ES()
#pushCall.pushToES()
#i = 2635
starti=int(sys.argv[1])
endi=int(sys.argv[2])
if endi > 4800:
endi = 4800
while starti <= endi:
print("Current Document: " + str(starti))
pushCall = push2ES(starti)
pushCall.pushToES()
starti+=1
else:
print("Done")
| [
"noreply@github.com"
] | noreply@github.com |
d72e85e9155e145a4086c011b7fc6725abf1cbec | d6c9f45d1369089f05b2479e531407f5e06649b6 | /kitti/writeDescriptors.py | 36b9e2ba8b99e7189e19f30da7efd112b2987a6a | [] | no_license | jingdao/PointCloudApp | acc2136c5eea7e1f2dc547c09617e67e5f07e4aa | 862972f1ac376ef25dd425f556b5e80a30486201 | refs/heads/master | 2021-01-25T14:56:50.822350 | 2017-09-11T16:59:02 | 2017-09-11T16:59:02 | 38,130,300 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 784 | py | #!/usr/bin/python
import sys
import glob
import os
dir = sys.argv[1]
labels = None
if os.path.isfile(dir+'/labels.txt'):
labels = []
fd = open(dir+'/labels.txt')
for line in fd:
labels.append(int(line))
print 'Read '+str(len(labels))+' labels'
outfile = open(dir+'/svmdata.txt','w')
if labels is None:
maxN = 100
else:
maxN = len(labels)
descriptorFile = open(dir+'/descriptor.pcd')
while not descriptorFile.readline().startswith("DATA ascii"):
pass
for n in range(maxN):
if labels is None:
outfile.write('0 ')
else:
outfile.write(str(labels[n])+' ')
features = descriptorFile.readline().split()
n = 1
for t in features:
outfile.write(str(n)+':'+str(float(t))+' ')
n += 1
outfile.write('\n')
print 'Wrote labels to '+outfile.name
outfile.close()
| [
"chenjingdao@wustl.edu"
] | chenjingdao@wustl.edu |
55bacf98f915eaac80ad81e7cd9541e1d1d103f4 | fb82b947f230d790e02aa931bede474bd2838a1d | /Maquinas.py | 751addb2d37812f788e736bcdcbdca40d04d3cc4 | [] | no_license | josejacomeb/Maquinas | 6dc201da3d27fbbb3b2342186c210e5dadb289ec | a3ce5de4f1da2b2c63c383f88e7d285e56a018a3 | refs/heads/master | 2021-01-20T11:06:32.012727 | 2014-07-12T16:34:14 | 2014-07-12T16:34:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Maquinas.ui'
#
# Created: Sat Jul 12 09:50:50 2014
# by: PyQt5 UI code generator 5.3.1
#
# WARNING! All changes made in this file will be lost!
| [
"josejacomeb@openmailbox.org"
] | josejacomeb@openmailbox.org |
5183ef70a198156e115c1a6cfbf495b63f38c77f | c6a8aa51d489358381c9ba51ad162169243de16c | /10_1-10_3/def__UpToDateShapeLengthField.py | fde44cb61d007f10a5c5401fd0c365d422394f34 | [] | no_license | aantonio99/FCToolbox | e6fa78bb2f3a5fb0eb3174e9f80baa832b174bcc | 4c76e5923dfff1833fd944c8af4cc5ddb3c91f08 | refs/heads/master | 2021-07-21T07:04:17.671545 | 2017-10-31T12:38:14 | 2017-10-31T12:38:14 | 108,995,073 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,861 | py | # -*- coding: utf-8 -*-
'''
Created on 21 fev. 2013
Last update on 07 fev. 2014
@author: Clement Roux
@contact: clement.roux@ens-lyon.fr
CNRS - UMR5600 Environnement Ville Societe
15 Parvis René Descartes, BP 7000, 69342 Lyon Cedex 07, France
@note: For each use of the FluvialCorridor toolbox leading to a publication, report, presentation or any other
document, please refer the following article :
Roux, C., Alber, A., Bertrand, M., Vaudor, L., Piegay, H., submitted. "FluvialCorridor": A new ArcGIS
package for multiscale riverscape exploration. Geomorphology
@summary: def__UpToDateShapeLengthField is an open-source python and arcPy code.
Some GIS operations modifies the Shape_Length field names, preventing further generic functions.
Thus, this script is called in most of the FluvialCorridor modules to update the Shape_Length field.
'''
# Import of required librairies
import arcpy
from arcpy import env
import math
import os
# Allow the temporary outputs overwrite
arcpy.env.overwriteOutput = True
#===============================================================================
# CODING
#===============================================================================
def UpToDateShapeLengthField (a):
x = 0
fieldnames = [f.name for f in arcpy.ListFields(a)]
for i in range(0, len(fieldnames)) :
if fieldnames[i] == "Shape_Length" :
x = 1
if x == 0 :
arcpy.AddField_management(a, "Shape_Length", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
try :
arcpy.CalculateField_management(a, "Shape_Length", "!shape.length!", "PYTHON_9.3", "")
except :
arcpy.CalculateField_management(a, "Shape_Length", "!forme.length!", "PYTHON_9.3", "")
return a
| [
"aurelie.antonio@ens-lyon.fr"
] | aurelie.antonio@ens-lyon.fr |
5e29a1322e48474262d25fec16e26d81b47188a4 | f81e9a81338c56bcca2c1900d6a189f9633ec406 | /store/urls.py | be7e8e7ab89db81e01b1295f20ef379ea90e6787 | [] | no_license | allahgabo/greatkart-django | b2127c9227e28a7d9856a6c99162eeaaf328fa92 | 38f8b07f060fd14a7bf85dd9a9b7217fcccee1af | refs/heads/main | 2023-04-30T01:42:45.962041 | 2021-05-20T19:48:46 | 2021-05-20T19:48:46 | 368,124,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | from django.urls import path
from . import views
urlpatterns = [
path('',views.store,name='store'),
path('home/',views.home,name='home'),
path('category/<slug:category_slug>/',views.store,name='products_by_category'),
path('category/<slug:category_slug>/<slug:product_slug>/',views.product_detail,name='product_detail'),
path('search/',views.search,name='search'),
] | [
"suliman_allahgabo@yahoo.com"
] | suliman_allahgabo@yahoo.com |
398495e9fdcb0aefeea06c8c7e1c34515a197940 | 63d1fd387d5fce97a17edcd0170088f51aef106b | /Dynamic_Programming/knspack/0-1knapsackbottomup.py | a99440d27e09afc7a2064b4eefab30123b46b10c | [] | no_license | ayushkedia357/interview_practice | 46355dd8b18c9e497a9495bac9056c37d628ab0a | fb409c4a8f579581424bff26d56196ba3c2ece4c | refs/heads/master | 2021-05-19T04:18:34.918714 | 2021-04-29T05:36:03 | 2021-04-29T05:36:03 | 251,525,504 | 1 | 1 | null | 2020-04-28T04:42:05 | 2020-03-31T07:02:45 | Python | UTF-8 | Python | false | false | 346 | py | wt = [1,2,3]
val = [6,10,12]
W = 5
n=len(wt)
memo = [[0 for i in range(W+1)] for j in range(n+1)]
for i in range(1,n+1):
for j in range(1,W+1):
if wt[i-1] <= j:
memo[i][j] = max(val[i-1] + memo[i-1][j-wt[i-1]],memo[i-1][j])
else:
memo[i][j] = memo[i-1][j]
print(memo[n][W])
| [
"noreply@github.com"
] | noreply@github.com |
fd030c84a3083c494b2b5cc68e2f65fceb0e1585 | 767280307aeb00b70b3326a22586a514e57bf8d4 | /middlebox.py | 976c2b37a7762ba2edede8fea0e68fea117a2cfc | [] | no_license | vithirun/Reliable-Communication | 3dacf5178e4b4f2888ebb7277aeb545e8e9bf533 | 1dca93dd10379dfaa00b18a40c1b6dd9b35b6b50 | refs/heads/master | 2021-09-01T05:10:13.677144 | 2017-12-25T01:35:47 | 2017-12-25T01:35:47 | 115,296,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,453 | py | #!/usr/bin/env python3
from switchyard.lib.address import *
from switchyard.lib.packet import *
from switchyard.lib.userlib import *
from threading import *
from random import *
import time
BLASTER_IP = "192.168.100.1"
BLASTER_MAC = "10:00:00:00:00:01"
MIDDLEBOX_BLASTER_IP = "192.168.100.2"
MIDDLEBOX_BLASTER_MAC = "40:00:00:00:00:01"
MIDDLEBOX_BLASTEE_IP = "192.168.200.2"
MIDDLEBOX_BLASTEE_MAC = "40:00:00:00:00:02"
BLASTEE_IP = "192.168.200.1"
BLASTEE_MAC = "20:00:00:00:00:01"
def set_ip_layer_middlebox_blastee(pkt):
pkt[IPv4].src = MIDDLEBOX_BLASTEE_IP
pkt[IPv4].dst = BLASTEE_IP
return pkt
def set_ethernet_layer_middlebox_blastee(pkt):
pkt[Ethernet].src = MIDDLEBOX_BLASTEE_MAC
pkt[Ethernet].dst = BLASTEE_MAC
return pkt
def set_transport_layer_middlebox_blastee(pkt):
pkt[UDP].src = 4444
pkt[UDP].dst = 5555
return pkt
def set_ip_layer_middlebox_blaster(pkt):
pkt[IPv4].src = MIDDLEBOX_BLASTER_IP
pkt[IPv4].dst = BLASTER_IP
return pkt
def set_ethernet_layer_middlebox_blaster(pkt):
pkt[Ethernet].src = MIDDLEBOX_BLASTER_MAC
pkt[Ethernet].dst = BLASTER_MAC
return pkt
def set_transport_layer_middlebox_blaster(pkt):
pkt[UDP].src = 4444
pkt[UDP].dst = 5555
return pkt
def read_parameters_from_file(param_file):
with open(param_file) as data:
substrings = data.read().split(" ")
range_value = float(substrings[1])
return range_value
def generate_random(range_value):
drop_packet = False
random_value = uniform(0, 1)
if random_value < range_value:
drop_packet = True
return drop_packet
def switchy_main(net):
my_intf = net.interfaces()
mymacs = [intf.ethaddr for intf in my_intf]
myips = [intf.ipaddr for intf in my_intf]
param_file = "middlebox_params.txt"
while True:
gotpkt = True
try:
timestamp, dev, pkt = net.recv_packet()
log_debug("Device is {}".format(dev))
except NoPackets:
log_debug("No packets available in recv_packet")
gotpkt = False
except Shutdown:
log_debug("Got shutdown signal")
break
if gotpkt:
log_debug("I got a packet {}".format(pkt))
if dev == "middlebox-eth0":
log_debug("Received from blaster")
'''
Received data packet
Should I drop it?
If not, modify headers & send to blastee
'''
pkt = set_ethernet_layer_middlebox_blastee(pkt)
pkt = set_ip_layer_middlebox_blastee(pkt)
pkt = set_transport_layer_middlebox_blastee(pkt)
range_value = read_parameters_from_file(param_file)
drop_packet = generate_random(range_value)
if drop_packet is True:
log_debug("Dropping Packet")
else:
net.send_packet("middlebox-eth1", pkt)
elif dev == "middlebox-eth1":
log_debug("Received from blastee")
'''
Received ACK
Modify headers & send to blaster. Not dropping ACK packets!
'''
pkt = set_ethernet_layer_middlebox_blaster(pkt)
pkt = set_ip_layer_middlebox_blaster(pkt)
pkt = set_transport_layer_middlebox_blaster(pkt)
net.send_packet("middlebox-eth0", pkt)
else:
log_debug("Oops :))")
net.shutdown()
| [
"viki@DESKTOP-G5DH82V.localdomain"
] | viki@DESKTOP-G5DH82V.localdomain |
15cdef80de74576e1921e64605edf2172182c0c0 | 59ccc333c74b64cd3160ff66e5594cf0d94c6a4e | /learn_python/work3_test_pytest/core/calc.py | 8b27050d342fccff0cedb4dc40f76ba5be9887d4 | [] | no_license | pengketong-pkt/pengketong_lg4 | 86933c324d0cc85267035c63887de8449f275534 | 79460f837b1f94da58ddba091487d9a670d1de33 | refs/heads/master | 2023-04-28T17:57:56.665724 | 2021-05-17T09:15:28 | 2021-05-17T09:15:28 | 293,811,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | class Calc:
def div(self, a, b):
return a/b
def mul(self, a, b):
return a*b
return a / b
def mul(self, a, b):
return a * b
| [
"13680586193@139.com"
] | 13680586193@139.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.