blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
43f4c52fd42c9bc9c7c4b78c33def1efaf6c1845 | 4ccff489fccbeb033a9f2dad46d8fdd88f19f77b | /cn_tests/calendars/test_szsh_calendar.py | bd621c63d67cafcab5b0d49fa9a125d746bc7b42 | [
"Apache-2.0"
] | permissive | hebpmo/zipline | 67094c237ae9830ab69f9a209929c7430bd4d4c5 | 396469b29e7e0daea4fe1e8a1c18f6c7eeb92780 | refs/heads/master | 2020-04-07T03:32:54.494193 | 2018-01-08T17:13:39 | 2018-01-08T17:13:39 | 158,020,235 | 0 | 1 | Apache-2.0 | 2018-11-17T20:04:50 | 2018-11-17T20:04:50 | null | UTF-8 | Python | false | false | 21,620 | py | import unittest
import os
import pandas as pd
import numpy as np
from pandas.util.testing import assert_index_equal
from parameterized import parameterized
from zipline.predicates import assert_equal
from zipline.utils.calendars.exchange_calendar_szsh import SZSHExchangeCalendar
import cn_tests
GAPS_BETWEEN_SESSIONS = True
MAX_SESSION_HOURS = 5.5
def load_answer_key(filename):
"""
Load a CSV from tests/resources/calendars/{filename}.csv
"""
fullpath = os.path.join(
os.path.dirname(os.path.abspath(cn_tests.__file__)),
'resources',
'calendars',
filename + '.csv',
)
return pd.read_csv(
fullpath,
index_col=0,
# NOTE: Merely passing parse_dates=True doesn't cause pandas to set
# the dtype correctly, and passing all reasonable inputs to the
# dtype kwarg cause read_csv to barf.
parse_dates=[0, 1, 2],
date_parser=lambda x: pd.Timestamp(x, tz='UTC')
)
def _find_full_session(calendar):
for session_label in calendar.schedule.index:
if session_label not in calendar.early_closes:
return session_label
return None
class Test_szsh_calendar(unittest.TestCase):
def setUp(self):
answer_key_filename = 'szsh'
GAPS_BETWEEN_SESSIONS = True
MAX_SESSION_HOURS = 0
self.answers = load_answer_key(answer_key_filename)
self.start_date = self.answers.index[0]
self.end_date = self.answers.index[-1]
self.calendar = SZSHExchangeCalendar(self.start_date, self.end_date)
self.one_minute = pd.Timedelta(minutes=1)
self.one_hour = pd.Timedelta(hours=1)
def test_sanity_check_session_lengths(self):
# make sure that no session is longer than MAX_SESSION_HOURS hours
for session in self.calendar.all_sessions:
o, c = self.calendar.open_and_close_for_session(session)
delta = c - o
self.assertTrue((delta.seconds / 3600) <= MAX_SESSION_HOURS)
def test_calculated_against_csv(self):
assert_index_equal(self.calendar.schedule.index, self.answers.index)
def test_is_open_on_minute(self):
one_minute = pd.Timedelta(minutes=1)
for market_minute in self.answers.market_open:
market_minute_utc = market_minute
# The exchange should be classified as open on its first minute
self.assertTrue(self.calendar.is_open_on_minute(market_minute_utc))
if GAPS_BETWEEN_SESSIONS:
# Decrement minute by one, to minute where the market was not
# open
pre_market = market_minute_utc - one_minute
self.assertFalse(self.calendar.is_open_on_minute(pre_market))
for market_minute in self.answers.market_close:
close_minute_utc = market_minute
# should be open on its last minute
self.assertTrue(self.calendar.is_open_on_minute(close_minute_utc))
if GAPS_BETWEEN_SESSIONS:
# increment minute by one minute, should be closed
post_market = close_minute_utc + one_minute
self.assertFalse(self.calendar.is_open_on_minute(post_market))
def _verify_minute(self, calendar, minute,
next_open_answer, prev_open_answer,
next_close_answer, prev_close_answer):
self.assertEqual(
calendar.next_open(minute),
next_open_answer
)
self.assertEqual(
self.calendar.previous_open(minute),
prev_open_answer
)
self.assertEqual(
self.calendar.next_close(minute),
next_close_answer
)
self.assertEqual(
self.calendar.previous_close(minute),
prev_close_answer
)
def test_next_prev_open_close(self):
# for each session, check:
# - the minute before the open (if gaps exist between sessions)
# - the first minute of the session
# - the second minute of the session
# - the minute before the close
# - the last minute of the session
# - the first minute after the close (if gaps exist between sessions)
answers_to_use = self.answers[1:-2]
for idx, info in enumerate(answers_to_use.iterrows()):
open_minute = info[1].iloc[0]
close_minute = info[1].iloc[1]
minute_before_open = open_minute - self.one_minute
# answers_to_use starts at the second element of self.answers,
# so self.answers.iloc[idx] is one element before, and
# self.answers.iloc[idx + 2] is one element after the current
# element
previous_open = self.answers.iloc[idx].market_open
next_open = self.answers.iloc[idx + 2].market_open
previous_close = self.answers.iloc[idx].market_close
next_close = self.answers.iloc[idx + 2].market_close
# minute before open
if GAPS_BETWEEN_SESSIONS:
self._verify_minute(
self.calendar, minute_before_open, open_minute,
previous_open, close_minute, previous_close
)
# open minute
self._verify_minute(
self.calendar, open_minute, next_open, previous_open,
close_minute, previous_close
)
# second minute of session
self._verify_minute(
self.calendar, open_minute + self.one_minute, next_open,
open_minute, close_minute, previous_close
)
# minute before the close
self._verify_minute(
self.calendar, close_minute - self.one_minute, next_open,
open_minute, close_minute, previous_close
)
# the close
self._verify_minute(
self.calendar, close_minute, next_open, open_minute,
next_close, previous_close
)
# minute after the close
if GAPS_BETWEEN_SESSIONS:
self._verify_minute(
self.calendar, close_minute + self.one_minute, next_open,
open_minute, next_close, close_minute
)
def test_next_prev_minute(self):
all_minutes = self.calendar.all_minutes
# test 20,000 minutes because it takes too long to do the rest.
for idx, minute in enumerate(all_minutes[1:20000]):
self.assertEqual(
all_minutes[idx + 2],
self.calendar.next_minute(minute)
)
self.assertEqual(
all_minutes[idx],
self.calendar.previous_minute(minute)
)
# test a couple of non-market minutes
if GAPS_BETWEEN_SESSIONS:
for open_minute in self.answers.market_open[1:]:
hour_before_open = open_minute - self.one_hour
self.assertEqual(
open_minute,
self.calendar.next_minute(hour_before_open)
)
for close_minute in self.answers.market_close[1:]:
hour_after_close = close_minute + self.one_hour
self.assertEqual(
close_minute,
self.calendar.previous_minute(hour_after_close)
)
def test_minute_to_session_label(self):
for idx, info in enumerate(self.answers[1:-2].iterrows()):
session_label = info[1].name
open_minute = info[1].iloc[0]
close_minute = info[1].iloc[1]
hour_into_session = open_minute + self.one_hour
minute_before_session = open_minute - self.one_minute
minute_after_session = close_minute + self.one_minute
next_session_label = self.answers.iloc[idx + 2].name
previous_session_label = self.answers.iloc[idx].name
# verify that minutes inside a session resolve correctly
minutes_that_resolve_to_this_session = [
self.calendar.minute_to_session_label(open_minute),
self.calendar.minute_to_session_label(open_minute,
direction="next"),
self.calendar.minute_to_session_label(open_minute,
direction="previous"),
self.calendar.minute_to_session_label(open_minute,
direction="none"),
self.calendar.minute_to_session_label(hour_into_session),
self.calendar.minute_to_session_label(hour_into_session,
direction="next"),
self.calendar.minute_to_session_label(hour_into_session,
direction="previous"),
self.calendar.minute_to_session_label(hour_into_session,
direction="none"),
self.calendar.minute_to_session_label(close_minute),
self.calendar.minute_to_session_label(close_minute,
direction="next"),
self.calendar.minute_to_session_label(close_minute,
direction="previous"),
self.calendar.minute_to_session_label(close_minute,
direction="none"),
session_label
]
if GAPS_BETWEEN_SESSIONS:
minutes_that_resolve_to_this_session.append(
self.calendar.minute_to_session_label(
minute_before_session
)
)
minutes_that_resolve_to_this_session.append(
self.calendar.minute_to_session_label(
minute_before_session,
direction="next"
)
)
minutes_that_resolve_to_this_session.append(
self.calendar.minute_to_session_label(
minute_after_session,
direction="previous"
)
)
self.assertTrue(all(x == minutes_that_resolve_to_this_session[0]
for x in minutes_that_resolve_to_this_session))
minutes_that_resolve_to_next_session = [
self.calendar.minute_to_session_label(minute_after_session),
self.calendar.minute_to_session_label(minute_after_session,
direction="next"),
next_session_label
]
self.assertTrue(all(x == minutes_that_resolve_to_next_session[0]
for x in minutes_that_resolve_to_next_session))
self.assertEqual(
self.calendar.minute_to_session_label(minute_before_session,
direction="previous"),
previous_session_label
)
# make sure that exceptions are raised at the right time
with self.assertRaises(ValueError):
self.calendar.minute_to_session_label(open_minute, "asdf")
if GAPS_BETWEEN_SESSIONS:
with self.assertRaises(ValueError):
self.calendar.minute_to_session_label(
minute_before_session,
direction="none"
)
@parameterized.expand([
(1, 0),
(2, 0),
(2, 1),
])
def test_minute_index_to_session_labels(self, interval, offset):
minutes = self.calendar.minutes_for_sessions_in_range(
pd.Timestamp('2016-01-04', tz='UTC'),
pd.Timestamp('2016-04-01', tz='UTC'),
)
minutes = minutes[range(offset, len(minutes), interval)]
np.testing.assert_array_equal(
np.array(minutes.map(self.calendar.minute_to_session_label),
dtype='datetime64[ns]'),
self.calendar.minute_index_to_session_labels(minutes)
)
def test_next_prev_session(self):
session_labels = self.answers.index[1:-2]
max_idx = len(session_labels) - 1
# the very first session
first_session_label = self.answers.index[0]
with self.assertRaises(ValueError):
self.calendar.previous_session_label(first_session_label)
# all the sessions in the middle
for idx, session_label in enumerate(session_labels):
if idx < max_idx:
self.assertEqual(
self.calendar.next_session_label(session_label),
session_labels[idx + 1]
)
if idx > 0:
self.assertEqual(
self.calendar.previous_session_label(session_label),
session_labels[idx - 1]
)
# the very last session
last_session_label = self.answers.index[-1]
with self.assertRaises(ValueError):
self.calendar.next_session_label(last_session_label)
def test_minutes_for_period(self):
# full session
# find a session that isn't an early close. start from the first
# session, should be quick.
full_session_label = _find_full_session(self.calendar)
if full_session_label is None:
raise ValueError("Cannot find a full session to test!")
minutes = self.calendar.minutes_for_session(full_session_label)
_open, _close = self.calendar.open_and_close_for_session(
full_session_label
)
# 生成期间全部分钟???
np.testing.assert_array_equal(
minutes,
pd.date_range(start=_open, end=_close, freq="min")
)
try:
# early close period
early_close_session_label = self.calendar.early_closes[0]
except:
# 不存在提前收盘则返回,不再进行后续测试
return
minutes_for_early_close = self.calendar.minutes_for_session(early_close_session_label)
_open, _close = self.calendar.open_and_close_for_session(
early_close_session_label
)
np.testing.assert_array_equal(
minutes_for_early_close,
pd.date_range(start=_open, end=_close, freq="min")
)
def test_sessions_in_range(self):
# pick two sessions
session_count = len(self.calendar.schedule.index)
first_idx = session_count // 3
second_idx = 2 * first_idx
first_session_label = self.calendar.schedule.index[first_idx]
second_session_label = self.calendar.schedule.index[second_idx]
answer_key = self.calendar.schedule.index[first_idx:second_idx + 1]
np.testing.assert_array_equal(
answer_key,
self.calendar.sessions_in_range(first_session_label,
second_session_label)
)
def _get_session_block(self):
# find and return a (full session, early close session, full session)
# block
#shortened_session = self.calendar.early_closes[c]
# 没有提取收盘,随机选择一个
max_num = len(self.calendar.schedule.index)
c = np.random.randint(2, max_num - 1)
shortened_session = self.calendar.schedule.index[c]
shortened_session_idx = self.calendar.schedule.index.get_loc(shortened_session)
session_before = self.calendar.schedule.index[
shortened_session_idx - 1
]
session_after = self.calendar.schedule.index[shortened_session_idx + 1]
return [session_before, shortened_session, session_after]
def test_minutes_in_range(self):
sessions = self._get_session_block()
first_open, first_close = self.calendar.open_and_close_for_session(
sessions[0]
)
minute_before_first_open = first_open - self.one_minute
middle_open, middle_close = self.calendar.open_and_close_for_session(sessions[1])
last_open, last_close = self.calendar.open_and_close_for_session(
sessions[-1]
)
minute_after_last_close = last_close + self.one_minute
# get all the minutes between first_open and last_close
minutes1 = self.calendar.minutes_in_range(
first_open,
last_close
)
minutes2 = self.calendar.minutes_in_range(
minute_before_first_open,
minute_after_last_close
)
if GAPS_BETWEEN_SESSIONS:
np.testing.assert_array_equal(minutes1, minutes2)
else:
# if no gaps, then minutes2 should have 2 extra minutes
np.testing.assert_array_equal(minutes1, minutes2[1:-1])
# manually construct the minutes
all_minutes = np.concatenate([
pd.date_range(
start=first_open,
end=first_close,
freq="min"
),
pd.date_range(
start=middle_open,
end=middle_close,
freq="min"
),
pd.date_range(
start=last_open,
end=last_close,
freq="min"
)
])
np.testing.assert_array_equal(all_minutes, minutes1)
def test_minutes_for_sessions_in_range(self):
sessions = self._get_session_block()
minutes = self.calendar.minutes_for_sessions_in_range(
sessions[0],
sessions[-1]
)
# do it manually
session0_minutes = self.calendar.minutes_for_session(sessions[0])
session1_minutes = self.calendar.minutes_for_session(sessions[1])
session2_minutes = self.calendar.minutes_for_session(sessions[2])
concatenated_minutes = np.concatenate([
session0_minutes.values,
session1_minutes.values,
session2_minutes.values
])
np.testing.assert_array_equal(
concatenated_minutes,
minutes.values
)
def test_sessions_window(self):
sessions = self._get_session_block()
np.testing.assert_array_equal(
self.calendar.sessions_window(sessions[0], len(sessions) - 1),
self.calendar.sessions_in_range(sessions[0], sessions[-1])
)
np.testing.assert_array_equal(
self.calendar.sessions_window(
sessions[-1],
-1 * (len(sessions) - 1)),
self.calendar.sessions_in_range(sessions[0], sessions[-1])
)
def test_session_distance(self):
sessions = self._get_session_block()
self.assertEqual(2, self.calendar.session_distance(sessions[0],
sessions[-1]))
def test_open_and_close_for_session(self):
for index, row in self.answers.iterrows():
session_label = row.name
open_answer = row.iloc[0]
close_answer = row.iloc[1]
found_open, found_close = self.calendar.open_and_close_for_session(session_label)
# Test that the methods for just session open and close produce the
# same values as the method for getting both.
alt_open = self.calendar.session_open(session_label)
self.assertEqual(alt_open, found_open)
alt_close = self.calendar.session_close(session_label)
self.assertEqual(alt_close, found_close)
self.assertEqual(open_answer, found_open)
self.assertEqual(close_answer, found_close)
def test_session_opens_in_range(self):
found_opens = self.calendar.session_opens_in_range(
self.answers.index[0],
self.answers.index[-1],
)
assert_equal(found_opens, self.answers['market_open'])
def test_session_closes_in_range(self):
found_closes = self.calendar.session_closes_in_range(
self.answers.index[0],
self.answers.index[-1],
)
assert_equal(found_closes, self.answers['market_close'])
def test_daylight_savings(self):
# 夏令时开关
# 2004 daylight savings switches:
# Sunday 2004-04-04 and Sunday 2004-10-31
# make sure there's no weirdness around calculating the next day's
# session's open time.
for date in ["2004-04-05", "2004-11-01"]:
next_day = pd.Timestamp(date, tz='UTC')
open_date = next_day + pd.Timedelta(days=self.calendar.open_offset)
the_open = self.calendar.schedule.loc[next_day].market_open
localized_open = the_open.tz_localize("UTC").tz_convert(
self.calendar.tz
)
self.assertEqual(
(open_date.year, open_date.month, open_date.day),
(localized_open.year, localized_open.month, localized_open.day)
)
self.assertEqual(
self.calendar.open_time.hour,
localized_open.hour
)
self.assertEqual(
self.calendar.open_time.minute,
localized_open.minute
)
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
"liu.dengfeng@hotmail.com"
] | liu.dengfeng@hotmail.com |
bfa821e97e78f4b36482e3e7f8e07c8a5c0aa1f4 | e08838e0101c142685cfcdae9e74f6972b084789 | /url_allow_lister/application.py | 87bc658b1b645504b70d71c24ed69255316a2a3a | [] | no_license | prabhat14176/CICD-test | 7e2e97a2ec0f886da43c549d86cbd89251dd4b22 | 840e37e27bb0d24fbfb598afe4fc32a863b01705 | refs/heads/main | 2022-12-19T19:08:05.442233 | 2020-10-12T11:30:00 | 2020-10-12T11:30:00 | 303,362,928 | 0 | 0 | null | 2020-10-12T11:30:02 | 2020-10-12T10:57:22 | Python | UTF-8 | Python | false | false | 3,779 | py | import yaml
import os
import glob
import json
import datetime
CWD_PATH = os.path.dirname(os.path.realpath(__file__))
CONF_PATH = os.path.join(CWD_PATH, '../config/')
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
NOWTIME = datetime.datetime.now()
RUNTIME_AS_STRING = NOWTIME.strftime(DATETIME_FORMAT)
def load_yaml_config(configFilePath):
yaml_config = None
with open(configFilePath, 'r') as account_stream:
if account_stream != "": # To prevent failure on empty file
try:
yaml_config = yaml.safe_load(account_stream)
except yaml.YAMLError as exc:
print(exc)
raise
return yaml_config
def _produceVersionInfo(domainsDict):
"""
return the versionInfo output
"""
versionInfo = []
ticketAPIDict = {}
ticketAPIDict['ID'] = 'TicketApi'
ticketAPIDict['modifiedDate'] = RUNTIME_AS_STRING
versionInfo.append(ticketAPIDict)
cmsDict = {}
cmsDict['ID'] = 'CMS'
cmsDict['modifiedDate'] = RUNTIME_AS_STRING
versionInfo.append(cmsDict)
return versionInfo
def _produceGetDomains(domainsDict):
"""
return the complete getdomains output
"""
getDomains = []
for domain in domainsDict:
responseSection = {}
responseSection['ID'] = domain
responseSection['modifiedDate'] = RUNTIME_AS_STRING
responseSection['Domains'] = domainsDict[domain]
getDomains.append(responseSection)
return getDomains
def _produceGetDomain(domainsDict, targetDomain):
"""
return the getdomain output for both ticketAPI and CMS
"""
getDomains = []
for domain in domainsDict:
if targetDomain == domain:
responseSection = {}
responseSection['ID'] = domain
responseSection['modifiedDate'] = RUNTIME_AS_STRING
responseSection['Domains'] = domainsDict[domain]
getDomains.append(responseSection)
return getDomains
def writeOutput(output, fileType):
"""
output is json so write it out prettified to a file named {fileType}.json
"""
if not os.path.isdir(os.path.join(CONF_PATH, '../output')):
os.mkdir(os.path.join(CONF_PATH, '../output'))
if not os.path.isdir(os.path.join(CONF_PATH, '../output', 'api')):
os.mkdir(os.path.join(CONF_PATH, '../output', 'api'))
if not os.path.isdir(os.path.join(CONF_PATH, '../output/api', 'application')):
os.mkdir(os.path.join(CONF_PATH, '../output/api', 'application'))
outputfile = os.path.join(CONF_PATH, '../output', '{0}.json'.format(fileType))
data_file = open(outputfile, 'w')
data_file.write(json.dumps(output, indent=2))
data_file.close()
def loadConfigData():
domainsDict = {}
success = True
errorList = []
for filename in glob.iglob(os.path.join(CONF_PATH, '**/*.yml'), recursive=True):
try:
domainsDict.update(load_yaml_config(filename))
except Exception as e:
success = False
errorList.append("Unable to process file {0}, Error: {1}".format(filename, str(e)))
continue
return success, domainsDict, errorList
def main():
success, domainsDict, errorList = loadConfigData()
if success:
versionInfo = _produceVersionInfo(domainsDict)
getDomains = _produceGetDomains(domainsDict)
getDomainTicket = _produceGetDomain(domainsDict, 'ticketApi')
getDomainCMS = _produceGetDomain(domainsDict, 'CMS')
writeOutput(versionInfo, 'api/versioninfo')
writeOutput(getDomains, 'api/getdomains')
writeOutput(getDomainTicket, 'api/application/TicketApi')
writeOutput(getDomainCMS, 'api/application/CMS')
else:
print(errorList)
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
414cc945c689da8fa4dd40fa1fcd8d9c41032710 | b5b6ace81a17ed63608e8570c475027d66997f79 | /Esercizi_Brizioli/Es7.02/Monte_Carlo_NVT/Start_prog.py | 8cd1aa46d56e8733dde5f23b1531bd5b496b228d | [] | no_license | BrizioliMatteo/LSN_exercise | 9d6530bad90f77da7d1a4628aeed66d1fe2f41d4 | 45c19a5a0b66b62bfacceffed88cd9126defd026 | refs/heads/master | 2020-06-22T06:51:33.171357 | 2019-07-18T21:55:29 | 2019-07-18T21:55:29 | 197,662,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import math
from scipy.optimize import curve_fit
import subprocess
from shutil import *
from glob import glob
Tem=[0.8,1.1,1.2]
rho=[1.1,0.8,0.05]
cut_off=[2.2,2.5,5.]
delta=[0.12,0.2,5.]
with open('input.dat', 'r') as file:
data = file.readlines()
for i in range(3):
data[0]=str(Tem[i])+"\n"
data[2]=str(rho[i])+"\n"
data[3]=str(cut_off[i])+"\n"
data[4]=str(delta[i])+"\n"
for l in range (2):
data[5]=str(10*(l+1))+"\n"
data[7]=str(l)+"\n"
with open('input.dat', 'w') as file:
file.writelines( data )
cmd= "./Monte_Carlo_NVT.exe"
value = subprocess.call(cmd, shell = True)
| [
"matteo.brizioli@gmail.com"
] | matteo.brizioli@gmail.com |
c28fc765f9d7635585ebbd080f8961d4bc1dbc96 | 96d8cf1ee8835fc1909e35de462e810019f49997 | /04_ssl_mm_v13/ssl_mm_v11_cifar10/config.py | f3452e618ac6161a1b34345785f2c0feb20a2798 | [] | no_license | tnguyen9210/semi-supervised-learning-robustness-pytorch | d57913b7d3a71249f791563a1f232f25a4a6960b | 1d51c77dfa9c6d80cc188227ddf9506b18545a4b | refs/heads/master | 2022-10-19T04:57:25.733110 | 2020-06-05T08:51:37 | 2020-06-05T08:51:37 | 258,005,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,991 | py |
import argparse
def parse_args():
parser = argparse.ArgumentParser()
# General
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--device', type=str, default="cuda:0")
parser.add_argument('--data_dir', type=str, default="../../data/cifar10_v13")
parser.add_argument('--domain', type=str, default="cifar10_orig")
parser.add_argument('--img_size', type=int, default=32)
# Training
parser.add_argument('--num_epochs', type=int, default=150)
parser.add_argument('--num_iters', type=int, default=500000)
parser.add_argument('--num_iters_per_epoch', type=int, default=3000)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--consis_warmup', type=int, default=200000)
# Optim
parser.add_argument('--optim', type=str, default='sgd', help="Optimizer: sgd, adagrad, adam or adamax.")
parser.add_argument('--lr', type=float, default=0.05, help="Learning rate.")
parser.add_argument('--momentum', type=float, default=0.9, help="Momentum.")
parser.add_argument('--l2_params', type=float, default=5e-4, help='L2 regularization for params.')
parser.add_argument('--max_grad_norm', type=float, default=1, help="Max grad norm.")
parser.add_argument('--scheduler_t0', type=int, default=10)
parser.add_argument('--scheduler_tmult', type=int, default=2)
# MixMatch
parser.add_argument('--mm_num_augments', type=int, default=2)
parser.add_argument('--mm_temperature', type=float, default=0.5)
parser.add_argument('--mm_alpha', type=float, default=0.75)
parser.add_argument('--mm_consis_coef', type=float, default=75)
# Feature encoder (CNNs)
parser.add_argument('--resnet_depth', type=int, default=28)
parser.add_argument('--resnet_widen_factor', type=int, default=2)
parser.add_argument('--resnet_group1_droprate', type=float, default=0.3)
parser.add_argument('--resnet_group2_droprate', type=float, default=0.3)
parser.add_argument('--resnet_group3_droprate', type=float, default=0.3)
# Image classifier (FCs)
parser.add_argument('--img_cls_nlayers', type=int, default=2)
parser.add_argument('--img_cls_hidden_dim1', type=int, default=128)
parser.add_argument('--img_cls_hidden_dim2', type=int, default=128)
parser.add_argument('--img_cls_droprate1', type=float, default=0.0)
parser.add_argument('--img_cls_droprate2', type=float, default=0.0)
# Logging, Saving and Loading
parser.add_argument('--model_id', type=str, default='10', help='ID under which to save models.')
parser.add_argument('--model_dir', type=str, default='./saved_models')
parser.add_argument('--eval_set', type=str, default='test_lbl')
parser.add_argument('--ckpt_name', type=str, default='best_model.ckpt', help='Filename of the pretrained model.')
parser.add_argument('--load', dest='load', action='store_true', help='Load pretrained model.')
return parser.parse_args()
| [
"tnguyen9210@gmail.com"
] | tnguyen9210@gmail.com |
511e9060c7d3e7c4c0091d6a267df626ceea2f43 | e95c551ef54745552ad6fd0ade5de14fe70c081a | /stone/stone_log_utls.py | 2b7d8149fdf21b5bdb640c36de993c40eb77f3f5 | [] | no_license | stone20170821/stone | d196d482130e31edd14af08fe9dfb5b97e1a2b48 | ed8fa3326a0ab172b8fa8e3f0cdd81bd7f31130f | refs/heads/master | 2021-01-20T10:29:06.302047 | 2019-07-22T14:19:21 | 2019-07-22T14:19:21 | 101,635,329 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | import logging
info_logger = logging.getLogger('info')
report_logger = logging.getLogger('report')
| [
"woogux@126.com"
] | woogux@126.com |
3c187259e439be08762bde0c0a5f2af0b8c42e44 | 7734095146264ace0415f8b255a07b3e12ab0172 | /questions/build.py | 2e38a2525a4270f03a06e938423c306fd0ec9720 | [] | no_license | PerceptumNL/cb-energietransitie | 7ac762fdfcee96f15c9aa3b725fbb537774b498c | 912d8e2cc227cdbaea7abf71e0e0af0976ad3328 | refs/heads/master | 2021-01-16T18:02:45.507431 | 2013-08-21T16:35:37 | 2013-08-21T16:35:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | import os
directory="./build"
if not os.path.exists(directory):
os.makedirs(directory)
filenames = ['js_vendor/jquery-ui-1.10.3.custom.min.js', 'js_vendor/traits-0.4.mini.js', 'lib/custom_activity_loader.js', 'lib/ddq.js', 'lib/mcq.js', 'lib/tfq.js']
with open('build/questionary.js', 'w') as outfile:
for fname in filenames:
with open(fname) as infile:
for line in infile:
outfile.write(line)
filenames = ['lib/common.css', 'lib/ddq.css', 'lib/mcq.css', 'lib/tfq.css']
with open('build/questionary.css', 'w') as outfile:
for fname in filenames:
with open(fname) as infile:
for line in infile:
outfile.write(line)
| [
"sergio@perceptum.nl"
] | sergio@perceptum.nl |
206e98cf74a0cc37cb60e816cab3cf0bb4e08614 | d2fb9166f0c8a40261715b9a06bb7a7c77fce46c | /apps/supplementary_applications/migrations/0023_auto_20191218_2043.py | a48604edcbb8e71dc67f46377a80b369c885734f | [] | no_license | surya20r/UNote | a5d205050bedb87e7011fe679f844943e39576bb | 5d034b1dcb3a6bdf307f18eb769b8dcfc5ca5448 | refs/heads/master | 2023-08-08T03:52:33.155638 | 2020-01-15T06:00:59 | 2020-01-15T06:00:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | # Generated by Django 2.2.4 on 2019-12-18 20:43
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('supplementary_applications', '0022_auto_20191218_1623'),
]
operations = [
migrations.AlterField(
model_name='supplementaryapplication',
name='downvoters',
field=models.ManyToManyField(blank=True, related_name='downvoters', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='supplementaryapplication',
name='upvoters',
field=models.ManyToManyField(blank=True, related_name='upvoters', to=settings.AUTH_USER_MODEL),
),
]
| [
"anish.mahto99@gmail.com"
] | anish.mahto99@gmail.com |
34a1c0235615920c69d66c20f7774fba3f391aa2 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/quickFixes/PyPandasSeriesToListQuickFixTest/dataframeGetitem.py | ee17a810d951dd9ec0fdaef3088c7dab1cfb67d5 | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 351 | py | import pandas as pd
# DataFrame columns case
df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
list(df[['a', 'b']].values)
bb = ["a", "b", "c"]
list(df[bb].values)
# with errors
list(df.<error descr="Name expected">[</error>'a'].values)
<warning descr="Method Series.to_list() is recommended">list<caret>(df['a'].values)</warning> | [
"intellij-monorepo-bot-no-reply@jetbrains.com"
] | intellij-monorepo-bot-no-reply@jetbrains.com |
6d11fd956434d519b5a5372dabe17f786e73b2ac | d2fcdd28d21ca69464a78719a2ecb679d264c2d4 | /12_strings/longest_word.py | cd734f3339e5957b0ab91b68a07332d1a4493304 | [] | permissive | boswellgathu/py_learn | 2ce471d2ce8098b79fe8ae369c2e30e61953883f | 1555c7f0f37108571b9235683fee901eded48f57 | refs/heads/master | 2022-01-21T19:26:17.599857 | 2022-01-17T19:16:44 | 2022-01-17T19:16:44 | 181,168,148 | 0 | 3 | MIT | 2019-09-17T17:16:53 | 2019-04-13T12:31:27 | Python | UTF-8 | Python | false | false | 297 | py | # Program to display the longest word in a given sentence
# The sentence should be given as an input from the key board by the user
# If there are two words of equal length, it displays the first longest word
# for more info on this quiz, go to this url: http://www.programmr.com/longest-word-3 | [
"boswell.gathu@andela.com"
] | boswell.gathu@andela.com |
0f805ed12ba74d2e6e9ac11f49ad6e0e15c6e302 | 79a7f267341c389a41200e2c08e1525d650b8d68 | /choice_field/migrations/0002_auto_20190305_1147.py | 0732b0ce88bff53e432bf833392c894533391bb8 | [] | no_license | d051a/django_optgroup | 9156019c01f12c4a0cc869adf1635d378af4bda8 | d5baf08ee4e90d5e0267776a4d53a3f40f221f65 | refs/heads/master | 2020-04-27T01:22:03.977799 | 2019-03-05T14:50:27 | 2019-03-05T14:50:27 | 173,962,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | # Generated by Django 2.1.7 on 2019-03-05 08:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('choice_field', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='categories',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='choice_field.Categories'),
),
]
| [
"akargavin@gmail.com"
] | akargavin@gmail.com |
6d26f6360d5a50ad9aebd66ed3c2d6d6785ecc65 | 4cb4b9bbe6ab5de69a42678f2f06fcd9dacc877f | /tests/unit_tests/test_ef_utils.py | ae078c23edf62b0c59c7c8ab9182c3b916aaeed4 | [
"Apache-2.0"
] | permissive | vgaidarji/ef-open | 1515368997e1c12dcfdd20e36c0086e9dd48259b | e1b4527a7c98caa1ecfab01a5cf0b293ca7495b5 | refs/heads/master | 2020-06-11T08:31:38.944537 | 2019-06-19T10:34:28 | 2019-06-19T10:34:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,306 | py | """
Copyright 2016-2017 Ellation, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import base64
import os
from StringIO import StringIO
import unittest
from botocore.exceptions import ClientError
from mock import Mock, patch
# For local application imports, context_paths must be first despite lexicon ordering
import context_paths
from ef_config import EFConfig
import ef_utils
class TestEFUtils(unittest.TestCase):
"""
Tests for 'ef_utils.py' Relies on the ef_site_config.py for testing. Look inside that file for where
some of the test values are coming from.
"""
def setUp(self):
"""
Setup function that is run before every test
Returns:
None
"""
os.chdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../.."))
def tearDown(self):
"""
Teardown function that is run after every test.
Returns:
None
"""
pass
@patch('sys.stderr', new_callable=StringIO)
def test_fail_with_message(self, mock_stderr):
"""
Tests fail() with a regular string message and checks if the message in stderr and exit code matches
Args:
mock_stderr: StringIO, captures the string sent to sys.stderr
Returns:
None
Raises:
AssertionError if any of the assert checks fail
"""
with self.assertRaises(SystemExit) as exception:
ef_utils.fail("Error Message")
error_message = mock_stderr.getvalue().strip()
self.assertEquals(error_message, "Error Message")
self.assertEquals(exception.exception.code, 1)
@patch('sys.stdout', new_callable=StringIO)
@patch('sys.stderr', new_callable=StringIO)
def test_fail_with_message_and_exception_data(self, mock_stderr, mock_stdout):
"""
Test fail() with a regular string message and a python object as the exception data
Args:
mock_stderr: StringIO, captures the string sent to sys.stderr
mock_stdout: StringIO, captures the string sent to sys.stdout
Returns:
None
Raises:
AssertionError if any of the assert checks fail
"""
with self.assertRaises(SystemExit) as exception:
ef_utils.fail("Error Message", {"ErrorCode": 22})
error_message = mock_stderr.getvalue().strip()
self.assertEquals(error_message, "Error Message")
self.assertEquals(exception.exception.code, 1)
output_message = mock_stdout.getvalue().strip()
self.assertEquals(output_message, "{'ErrorCode': 22}")
@patch('sys.stderr', new_callable=StringIO)
def test_fail_with_None_message(self, mock_stderr):
"""
Test fail() with a None object
Args:
mock_stderr: StringIO, captures the string sent to sys.stderr
Returns:
None
Raises:
AssertionError if any of the assert checks fail
"""
with self.assertRaises(SystemExit) as exception:
ef_utils.fail(None)
error_message = mock_stderr.getvalue().strip()
self.assertEquals(error_message, "None")
self.assertEquals(exception.exception.code, 1)
@patch('sys.stderr', new_callable=StringIO)
def test_fail_with_empty_string(self, mock_stderr):
"""
Test fail() with a an empty string
Args:
mock_stderr: StringIO, captures the string sent to sys.stderr
Returns:
None
Raises:
AssertionError if any of the assert checks fail
"""
with self.assertRaises(SystemExit) as exception:
ef_utils.fail("")
error_message = mock_stderr.getvalue().strip()
self.assertEquals(error_message, "")
self.assertEquals(exception.exception.code, 1)
@patch('urllib2.urlopen')
def test_http_get_metadata_200_status_code(self, mock_urllib2):
"""
Test http_get_metadata to retrieve an ami-id with 200 success status.
Args:
mock_urllib2: MagicMock, returns back 200 and the ami-id value
Returns:
None
Raises:
AssertionError if any of the assert checks fail
"""
mock_response = Mock(name="Always 200 Status Code")
mock_response.getcode.return_value = 200
mock_response.read.return_value = "ami-12345678"
mock_urllib2.return_value = mock_response
response = ef_utils.http_get_metadata("ami-id")
self.assertEquals(response, "ami-12345678")
@patch('urllib2.urlopen')
def test_http_get_metadata_non_200_status_code(self, mock_urllib2):
"""
Test http_get_metadata to retrieve ami-id and get a non 200 status code.
Args:
mock_urllib2: MagicMock, returns back a non 200 status code.
Returns:
None
Raises:
AssertionError if any of the assert checks fail
"""
mock_response = Mock(name="Always non-200 Status Code")
mock_response.getcode.return_value = 400
mock_urllib2.return_value = mock_response
with self.assertRaises(IOError) as exception:
ef_utils.http_get_metadata("ami-id")
self.assertIn("Non-200 response", exception.exception.message)
@patch('ef_utils.http_get_metadata')
def test_whereami_ec2(self, mock_http_get_metadata):
"""
Tests whereami to see if it returns 'ec2' by mocking an ec2 environment
Args:
mock_http_get_metadata: MagicMock, returns "i-somestuff"
Returns:
None
Raises:
AssertionError if any of the assert checks fail
"""
mock_http_get_metadata.return_value = "i-somestuff"
result = ef_utils.whereami()
self.assertEquals(result, "ec2")
@patch('ef_utils.is_in_virtualbox')
@patch('ef_utils.gethostname')
@patch('ef_utils.http_get_metadata')
def test_whereami_local(self, mock_http_get_metadata, mock_gethostname, mock_is_in_virtualbox):
"""
Tests whereami to see if it returns 'local' by mocking a local machine environment
Args:
mock_http_get_metadata: MagicMock, returns something other than "i-...."
mock_gethostname: MagicMock, returns .local
Returns:
None
Raises:
AssertionError if any of the assert checks fail
"""
mock_http_get_metadata.return_value = "nothinguseful"
mock_is_in_virtualbox.return_value = False
mock_gethostname.return_value = ".local"
result = ef_utils.whereami()
self.assertEquals(result, "local")
@patch('ef_utils.is_in_virtualbox')
@patch('ef_utils.gethostname')
@patch('ef_utils.http_get_metadata')
def test_whereami_unknown(self, mock_http_get_metadata, mock_gethostname, mock_is_in_virtualbox):
"""
Tests whereami to see if it returns 'unknown' by mocking the environment to not match anything
Args:
mock_http_get_metadata: MagicMock, returns something other than "i-...."
mock_gethostname: MagicMock, returns some junk value
Returns:
None
Raises:
AssertionError if any of the assert checks fail
"""
mock_http_get_metadata.return_value = "nothinguseful"
mock_is_in_virtualbox.return_value = False
mock_gethostname.return_value = "not local"
result = ef_utils.whereami()
self.assertEquals(result, "unknown")
@patch('ef_utils.http_get_metadata')
def test_http_get_instance_env(self, mock_http_get_metadata):
"""
Tests http_get_instance_env to see if it returns 'alpha' by mocking the metadata with a valid IAM instance profile
Args:
mock_http_get_metadata: MagicMock, returns a valid JSON InstanceProfileArn
Returns:
None
Raises:
AssertionError if any of the assert checks fail
"""
mock_http_get_metadata.return_value = "{\"InstanceProfileArn\": \"arn:aws:iam::1234:role/alpha-server\"}"
env = ef_utils.http_get_instance_env()
self.assertEquals(env, "alpha")
@patch('ef_utils.http_get_metadata')
def test_http_get_instance_env_exception(self, mock_http_get_metadata):
"""
Tests http_get_instance_env to see if it raises an exception by mocking the metadata to be invalid
Args:
mock_http_get_metadata: MagicMock, returns junk value
Returns:
None
Raises:
AssertionError if any of the assert checks fail
"""
mock_http_get_metadata.return_value = "No data"
with self.assertRaises(Exception) as exception:
ef_utils.http_get_instance_env()
self.assertIn("Error looking up metadata:iam/info", exception.exception.message)
@patch('ef_utils.http_get_metadata')
def test_http_get_instance_role(self, mock_http_get_metadata):
"""
Tests http_get_instance_role to return the service name by mocking the metadata
Args:
mock_http_get_metadata: MagicMock, returns a valid JSON InstanceProfileArn
Returns:
None
Raises:
AssertionError if any of the assert checks fail
"""
mock_http_get_metadata.return_value = "{\"InstanceProfileArn\": \"arn:aws:iam::1234:role/alpha-server\"}"
role = ef_utils.http_get_instance_role()
self.assertEquals(role, "server")
@patch('ef_utils.http_get_metadata')
def test_http_get_instance_role_exception(self, mock_http_get_metadata):
"""
Tests http_get_instance_role to see if it raises an exception by giving it invalid metadata
Args:
mock_http_get_metadata: MagicMock, returns junk value
Returns:
None
Raises:
AssertionError if any of the assert checks fail
"""
mock_http_get_metadata.return_value = "No data"
with self.assertRaises(Exception) as exception:
ef_utils.http_get_instance_role()
self.assertIn("Error looking up metadata:iam/info:", exception.exception.message)
@patch('ef_utils.http_get_metadata')
def test_get_instance_aws_context(self, mock_http_get_metadata):
"""
Tests get_instance_aws_context to see if it produces a dict object with all the
data supplied in the metadata.
Args:
mock_http_get_metadata: MagicMock, returns valid responses in the order its called
Returns:
None
Raises:
AssertionError if any of the assert checks fail
"""
mock_http_get_metadata.side_effect = ["us-west-2a", "i-00001111f"]
mock_ec2_client = Mock(name="mock-ec2-client")
mock_ec2_client.describe_instances.return_value = \
{
"Reservations": [
{
"OwnerId": "4444",
"Instances": [
{
"IamInstanceProfile": {
"Arn": "arn:aws:iam::1234:instance-profile/alpha0-server-ftp"
}
}
]
}
]
}
result = ef_utils.get_instance_aws_context(mock_ec2_client)
self.assertEquals(result["account"], "4444")
self.assertEquals(result["instance_id"], "i-00001111f")
self.assertEquals(result["region"], "us-west-2")
self.assertEquals(result["role"], "alpha0-server-ftp")
self.assertEquals(result["service"], "server-ftp")
@patch('ef_utils.http_get_metadata')
def test_get_instance_aws_context_metadata_exception(self, mock_http_get_metadata):
"""
Tests get_instance_aws_context to see if it throws an exception by giving it invalid metadata
Args:
mock_http_get_metadata: MagicMock, throws an IOError exception
Returns:
None
Raises:
AssertionError if any of the assert checks fail
"""
mock_http_get_metadata.side_effect = IOError("No data")
mock_ec2_client = Mock(name="mock-ec2-client")
with self.assertRaises(IOError) as exception:
ef_utils.get_instance_aws_context(mock_ec2_client)
self.assertIn("Error looking up metadata:availability-zone or instance-id:", exception.exception.message)
@patch('boto3.Session')
def test_create_aws_clients(self, mock_session_constructor):
"""
Tests create_aws_clients by providing all the parameters and mocking the boto3.Session constructor.
Verifies that all the keys show up in the dict object returned.
Args:
mock_session_constructor: MagicMock, returns Mock object representing a boto3.Session object
Returns:
None
Raises:
AssertionError if any of the assert checks fail
"""
mock_session = Mock(name="mock-boto3-session")
mock_session.client.return_value = Mock(name="mock-client")
mock_session_constructor.return_value = mock_session
amazon_services = ["acm", "batch", "ec2", "sqs"]
client_dict = ef_utils.create_aws_clients("us-west-2d", "default", *amazon_services)
self.assertTrue("acm" in client_dict)
self.assertTrue("batch" in client_dict)
self.assertTrue("ec2" in client_dict)
self.assertTrue("sqs" in client_dict)
self.assertTrue("SESSION" in client_dict)
@patch('boto3.Session')
def test_create_aws_clients_no_profile(self, mock_session_constructor):
"""
Test create_aws_clients with all the parameters except profile and mocking the boto3 Session constructor.
Verifies that all the keys show up in the dict object returned.
Args:
mock_session_constructor: MagicMock, returns Mock object representing a boto3.Session object
Returns:
None
Raises:
AssertionError if any of the assert checks fail
"""
mock_session = Mock(name="mock-boto3-session")
mock_session.client.return_value = Mock(name="mock-client")
mock_session_constructor.return_value = mock_session
amazon_services = ["acm", "batch", "ec2", "sqs"]
client_dict = ef_utils.create_aws_clients("us-west-2d", None, *amazon_services)
self.assertTrue("acm" in client_dict)
self.assertTrue("batch" in client_dict)
self.assertTrue("ec2" in client_dict)
self.assertTrue("sqs" in client_dict)
self.assertTrue("SESSION" in client_dict)
@patch('boto3.Session')
def test_create_aws_clients_cache_multiple_configs(self, mock_session_constructor):
"""
Test create_aws_clients with multiple parameters and mocking the boto3
Session constructor.
Check that every (region, profile) pair gets its own set of clients.
Args:
mock_session_constructor: MagicMock, returns Mock object representing a boto3.Session object
Returns:
None
Raises:
AssertionError if any of the assert checks fail
"""
mock_session = Mock(name="mock-boto3-session")
# make sure we get different clients on every call
mock_session.client.side_effect = lambda *args, **kwargs: Mock(name="mock-boto3-session")
mock_session_constructor.return_value = mock_session
amazon_services = ["acm", "batch", "ec2", "sqs"]
cases = [
("us-west-2d", None),
("us-west-3d", None),
("us-west-2d", "codemobs"),
("us-west-2d", "ellationeng"),
("", None),
]
built_clients = {}
for region, profile in cases:
client_dict = ef_utils.create_aws_clients(region, profile, *amazon_services)
for key, clients in built_clients.items():
# check if the new clients are unique
self.assertNotEquals(client_dict, clients,
msg="Duplicate clients for {} vs {}".format(key, (region, profile)))
built_clients[(region, profile)] = client_dict
@patch('boto3.Session')
def test_create_aws_clients_cache_same_client(self, mock_session_constructor):
"""
Test create_aws_clients with same parameters and mocking the boto3
Session constructor.
Check that we get the same clients every time.
Args:
mock_session_constructor: MagicMock, returns Mock object representing a boto3.Session object
Returns:
None
Raises:
AssertionError if any of the assert checks fail
"""
mock_session = Mock(name="mock-boto3-session")
# make sure we get different clients on every call
mock_session.client.side_effect = lambda *args, **kwargs: Mock(name="mock-boto3-session")
mock_session_constructor.return_value = mock_session
amazon_services = ["acm", "batch", "ec2", "sqs"]
cases = [
("us-west-2d", None),
("us-west-3d", None),
("us-west-2d", "codemobs"),
("us-west-2d", "ellationeng"),
("", None),
]
for region, profile in cases:
clients1 = ef_utils.create_aws_clients(region, profile, *amazon_services)
clients2 = ef_utils.create_aws_clients(region, profile, *amazon_services)
self.assertEquals(clients1, clients2, msg="Should get the same clients for the same region/profile pair")
@patch('boto3.Session')
def test_create_aws_clients_cache_new_clients(self, mock_session_constructor):
"""
Test create_aws_clients with same parameters and mocking the boto3
Session constructor.
Check that we get the same clients every time.
Args:
mock_session_constructor: MagicMock, returns Mock object representing a boto3.Session object
Returns:
None
Raises:
AssertionError if any of the assert checks fail
"""
mock_session = Mock(name="mock-boto3-session")
# make sure we get different clients on every call
mock_session.client.side_effect = lambda *args, **kwargs: Mock(name="mock-boto3-session")
mock_session_constructor.return_value = mock_session
amazon_services = ["acm", "batch", "ec2", "sqs"]
new_amazon_services = amazon_services + ["cloudfront"]
region, profile = "us-west-2", "testing"
clients = ef_utils.create_aws_clients(region, profile, *amazon_services)
# copy the old clients, so they're not overwritten
built_clients = {k: v for k, v in clients.items()}
new_clients = ef_utils.create_aws_clients(region, profile, *new_amazon_services)
for service in new_amazon_services:
self.assertIn(service, new_clients)
for service, client in built_clients.items():
self.assertEquals(new_clients.get(service), client)
def test_get_account_id(self):
"""
Checks if get_account_id returns the correct account id
Returns:
None
Raises:
AssertionError if any of the assert checks fail
"""
target_account_id = "123456789"
mock_sts_client = Mock(name="mock sts client")
mock_sts_client.get_caller_identity.return_value.get.return_value = target_account_id
self.assertEquals(ef_utils.get_account_id(mock_sts_client), target_account_id)
def test_get_autoscaling_group_properties_valid_asg_name(self):
"""Test method returns valid parameters file"""
mock_asg_resource = Mock(name="Mock Autoscaling Client")
mock_asg_resource.describe_auto_scaling_groups.return_value = \
{
"AutoScalingGroups": [
{
"DesiredCapacity": 2,
"Tags": [
{
"ResourceType": "auto-scaling-group",
"ResourceId": "alpha0-test-instance-ServerGroup",
"PropagateAtLaunch": "true",
"Value": "alpha0-test-instance",
"Key": "Name"
}
],
"AutoScalingGroupName": "alpha0-test-instance-ServerGroup"
}
]
}
result = ef_utils.get_autoscaling_group_properties(mock_asg_resource, "alpha0", "test-instance")
self.assertEquals(result[0]["DesiredCapacity"], 2)
self.assertEquals(result[0]["AutoScalingGroupName"], "alpha0-test-instance-ServerGroup")
self.assertEquals(result[0]["Tags"][0]["ResourceId"], "alpha0-test-instance-ServerGroup")
def test_get_autoscaling_group_properties_valid_tag_name(self):
"""Test method returns valid parameters file"""
mock_asg_resource = Mock(name="Mock Autoscaling Client")
mock_asg_resource.describe_auto_scaling_groups.return_value = \
{
"AutoScalingGroups": [
]
}
mock_asg_resource.describe_tags.return_value = \
{
"Tags": [
{
"ResourceType": "auto-scaling-group",
"ResourceId": "alpha0-test-instance-ServerGroup",
"PropagateAtLaunch": "true",
"Value": "alpha0-test-instance",
"Key": "Name"
}
]
}
result = ef_utils.get_autoscaling_group_properties(mock_asg_resource, "alpha0", "test-instance")
mock_asg_resource.describe_tags.assert_called_once_with(
Filters=[{ "Name": "Key", "Values": ["Name"] }, { "Name": "Value", "Values": ["alpha0-test-instance"]}])
mock_asg_resource.describe_auto_scaling_groups.assert_called_with(
AutoScalingGroupNames=["alpha0-test-instance-ServerGroup"])
class TestEFUtilsKMS(unittest.TestCase):
"""Test cases for functions using kms"""
def setUp(self):
self.service = "test-service"
self.env = "test"
self.secret = "secret"
self.error_response = {'Error': {'Code': 'FakeError', 'Message': 'Testing catch of all ClientErrors'}}
self.client_error = ClientError(self.error_response, "boto3")
self.mock_kms = Mock(name="mocked kms client")
self.bytes_return = "cipher_blob".encode()
self.mock_kms.encrypt.return_value = {"CiphertextBlob": self.bytes_return}
self.mock_kms.decrypt.return_value = {"Plaintext": self.bytes_return}
def test_kms_encrypt_call(self):
"""Validates basic kms call parameters"""
ef_utils.kms_encrypt(self.mock_kms, self.service, self.env, self.secret)
self.mock_kms.encrypt.assert_called_once_with(
KeyId='alias/{}-{}'.format(self.env, self.service),
Plaintext=self.secret.encode()
)
def test_kms_encrypt_call_subservice(self):
"""Validate KMS encryption call on a subservice, where periods should be converted to underscores due to
alias name restrictions"""
subservice = self.service + ".subservice"
ef_utils.kms_encrypt(self.mock_kms, subservice, self.env, self.secret)
self.mock_kms.encrypt.assert_called_once_with(
KeyId='alias/{}-{}'.format(self.env, self.service + "_subservice"),
Plaintext=self.secret.encode()
)
def test_kms_encrypt_returns_b64(self):
"""Validate that function returns a base64 encoded value"""
encrypted_secret = ef_utils.kms_encrypt(self.mock_kms, self.service, self.env, self.secret)
b64_return = base64.b64encode(self.bytes_return)
self.assertEqual(b64_return, encrypted_secret)
def test_kms_encrypt_fails_client_error(self):
"""Ensures that function fails a generic ClientError despite any special handling for specific error codes"""
self.mock_kms.encrypt.side_effect = self.client_error
with self.assertRaises(SystemExit):
ef_utils.kms_encrypt(self.mock_kms, self.service, self.env, self.secret)
def test_kms_decrypt_call(self):
"""Validates basic kms call parameters"""
b64_secret = base64.b64encode(self.secret)
ef_utils.kms_decrypt(self.mock_kms, b64_secret)
self.mock_kms.decrypt.assert_called_once_with(CiphertextBlob=self.secret)
def test_kms_decrypt_fails_without_b64_secret(self):
"""Ensures that function fails when passed a non-base64 encoded secret"""
with self.assertRaises(SystemExit):
ef_utils.kms_decrypt(self.mock_kms, self.secret)
def test_kms_decrypt_fails_client_error(self):
"""Ensures that function fails a generic ClientError despite any special handling for specific error codes"""
self.mock_kms.decrypt.side_effect = self.client_error
with self.assertRaises(SystemExit):
ef_utils.kms_decrypt(self.mock_kms, self.secret)
| [
"noreply@github.com"
] | noreply@github.com |
cf639a724588384b8978abe482aac9ccb94cb3f7 | 63b369bbed088c59d668dccc93a7b9dec9d3111c | /framingham_predict.py | 3c416c98a090a3365171de964fffb57be51a595c | [] | no_license | letthedataconfess/Implementation-of-Chat-Bot-for-Landmark-Detection-using-Deep-Learning | 7a9fa625b96f42e188ad043a9f72ba2ebd1b2479 | 8faf990cd544859c78b95284a41baadc60a5c828 | refs/heads/main | 2023-04-25T21:21:25.116575 | 2021-05-02T06:11:02 | 2021-05-02T06:11:02 | 363,579,711 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | import pandas as pd
import pickle
filename = 'notebook/finalized_model.sav'
loaded_model = pickle.load(open(filename, 'rb'))
def model_prediction(data):
# data = {
# 'male': 1,
# 'age': 39,
# 'cigsPerDay': 19,
# 'BPMeds': 0,
# 'prevalentStroke': 0,
# 'prevalentHyp': 0,
# 'diabetes': 0,
# 'totChol': 195,
# 'BMI': 26.97,
# 'heartRate': 80
# }
# data.values()
response = loaded_model.predict([list(data.values())])
return response[0]
| [
"noreply@github.com"
] | noreply@github.com |
a6380c2890db603777bb098d01ff5f745c4f1ef5 | f885108bc57bb52e9e5f421cfeb7906d73d57f03 | /hh.ru.py | 84e1c6a49aff94e481ff7c67e599e5cf07a56e20 | [] | no_license | digkill/hh_parser | 319a84f01aa73c622c7f58e703a83d016c881a63 | 50fc60d523a84c8a1589b6c7c849a48889590464 | refs/heads/master | 2022-07-23T02:50:09.098921 | 2019-11-06T20:16:53 | 2019-11-06T20:16:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,548 | py | import requests
from bs4 import BeautifulSoup as bs
import csv
headers = {'accept': '*/*',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'}
baseUrl = 'https://hh.ru/search/vacancy?area=1search_period=3&text=php&page=0'
def hhParse(baseUrl, headers):
jobs = []
urls = []
urls.append(baseUrl)
session = requests.Session()
request = session.get(baseUrl, headers=headers)
if request.status_code == 200:
print('OK')
soup = bs(request.content, 'lxml')
try:
pagination = soup.find_all('a', attrs={'data-qa': 'pager-page'})
count = int(pagination[-1].text)
for i in range(count):
url = f'https://hh.ru/search/vacancy?area=1search_period=3&text=php&page={i}'
if url not in urls:
urls.append(url)
except:
pass
for url in urls:
request = session.get(url, headers=headers)
soup = bs(request.content, 'lxml')
divs = soup.find_all('div', attrs={'data-qa': 'vacancy-serp__vacancy'})
for div in divs:
try:
title = div.find('a', attrs={'data-qa': 'vacancy-serp__vacancy-title'}).text
href = div.find('a', attrs={'data-qa': 'vacancy-serp__vacancy-title'})['href']
company = div.find('a', attrs={'data-qa': 'vacancy-serp__vacancy-employer'}).text
responsibility = div.find('div', attrs={'data-qa': 'vacancy-serp__vacancy_snippet_responsibility'}).text
requirement = div.find('div', attrs={'data-qa': 'vacancy-serp__vacancy_snippet_requirement'}).text
content = responsibility + ' ' + requirement
jobs.append({
'title': title,
'href': href,
'company': company,
'content': content,
})
except:
pass
print(len(jobs))
else:
print('ERROR OR DONE = ' + str(request.status_code))
return jobs
def filesWriter(jobs):
with open('parsed_jobs.csv', 'w', encoding='utf-8') as file:
record = csv.writer(file)
record.writerow(('Название вакансии', 'URL', 'Название компании', 'Описание'))
for job in jobs:
record.writerow((job['title'], job['href'], job['company'], job['content']))
jobs = hhParse(baseUrl, headers)
filesWriter(jobs)
| [
"vitaliy@edifanov.com"
] | vitaliy@edifanov.com |
d39108fb43a28ba0ebb985c8d44a6adb62666aec | de1acbfb99e203994043c6ea4d2a75382ba92406 | /old/info.py | 25a86ba3d6099ab4d2f5ef98448d6b5cbf19587a | [] | no_license | PlanteVodu/system | 30ce7e5b5c9e710211d080217f6ae1f1852abaf2 | c809cc8ad58de0e504339ee5ce7e0610b74e2783 | refs/heads/master | 2021-06-14T06:16:44.528778 | 2017-05-21T21:34:19 | 2017-05-21T21:34:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,536 | py | import psutil
print (" ")
print ("Informations sur les utilisateurs : ")
users = psutil.users()
print ("Nombre d'utilisateur connecte (peut etre plusieurs fois le meme) : ")
print len(users) #nombre d'utilisateur
nbusers = len(users)
length = len(users)
taille = 0
while taille < length:
if taille < length:
print(users[taille][0]) #affichage du nom de l'utilisateur
taille = taille +1
else:
print(" ")
print (" ")
print ("Informations sur la RAM (% utilise) : ")
vm = psutil.swap_memory() #RAM -> swap
mem = vm[3]
print (mem)
totmem = vm[0] / 1000000000
print ("Memoire total en GB: ")
print (totmem)
usedmem = vm[1] / 1000000000
print ("Memoire utilisee en GB: ")
print (usedmem)
availmem = vm[2] / 1000000000
print ("Memoire disponible en GB: ")
print (availmem)
print (" ")
print ("Information sur la memoire restante du disque '/' (% utilise) : ")
disk = psutil.disk_usage('/')
disque = disk[3]
print (disque)
print (" ")
print ("Nombre de disques : ")
dis = psutil.disk_partitions()
nbdisk = len(dis)
print (nbdisk)
taille = 0
length = len(dis)
print ("Les disques sont : ")
while taille < length:
if taille < length:
print(dis[taille][0]) #affichage du nom de l'utilisateur
taille = taille +1
else:
print(" ")
print (" ")
print ("Nombre de processus lances : ")
pids = psutil.pids()
nbpids = len(pids)
print (nbpids)
print (" ")
import time
temps = time.localtime();
ilest = "{year}-{month}-{day} {hour}:{minu}:{sec}".format(year=temps.tm_year, month=temps.tm_mon, day=temps.tm_mday, hour=temps.tm_hour, minu=temps.tm_min, sec=temps.tm_sec)
print ilest
nomsDisk = '|'.join(diskname[0] for diskname in psutil.disk_partitions())
print nomsDisk
nomsUser = '|'.join(users[0] for users in psutil.users())
print nomsUser
import socket
hostname = socket.gethostname()
import sqlite3 as lite
con = lite.connect('BDD.db')
with con:
cur = con.cursor()
cur.execute("INSERT INTO infosys (nomMachine, date, nbUsers, utilisation, memoireTotal, memoireUtilise, memoireDispo, nombreDisk, nomDisk, utilisationRacine, nbProcessus) VALUES(:hostname,:ilest,:nbusers,:mem,:totmem,:usedmem,:availmem,:nbdisk,:nomsDisk,:disque,:nbpids);", {'hostname': hostname, 'ilest': ilest, 'nbusers': nbusers, 'mem': mem, 'totmem': totmem, 'usedmem': usedmem, 'availmem': availmem, 'nbdisk': nbdisk, 'nomsDisk': nomsDisk, 'disque': disque, 'nbpids': nbpids})
cur.execute("INSERT INTO users (nomMachine, date, nomUsers) VALUES (:hostname,:ilest,:nomsUser);", {'hostname': hostname, 'ilest': ilest, 'nomsUser': nomsUser})
| [
"fildas.pa3@gmail.com"
] | fildas.pa3@gmail.com |
d6cf2cbb46a9f8b93b4b792e77f994a9fe9413c1 | 53b522367c4e3416142afacac580058d32581d8e | /test1/test1/courses/migrations/0002_task_taskstatus.py | da03402bb4f16a584e5434324ce26a8057d1ea7a | [] | no_license | Poreykin/test_project | 52131345979f621aca1a722f5646593be42c96df | cd8005e523cd9dbfe31c6042abff7edbae370f75 | refs/heads/master | 2020-03-21T07:00:35.616514 | 2018-07-04T08:51:04 | 2018-07-04T08:51:04 | 138,254,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,289 | py | # Generated by Django 2.0.6 on 2018-06-29 13:53
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_fsm
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('courses', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('solution', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='TaskStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('state', django_fsm.FSMField(choices=[('PRC', 'process'), ('RVW', 'review'), ('DBT', 'debt'), ('CMP', 'complete')], default='PRC', max_length=50)),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('task', models.ForeignKey(db_index=False, on_delete=django.db.models.deletion.CASCADE, to='courses.Task')),
],
),
]
| [
"zhenyaporeykin@yandex.ru"
] | zhenyaporeykin@yandex.ru |
ca6b3166f393338dabec04bc58f53131b6d65b8a | 177b66facda74108e693d0fe4e0be1cd8b3adc79 | /cell/test data.py | f552320e7e0631afc676614ecd295e8330064807 | [] | no_license | leizeling/my_learn | 04c0266adc319f5679c6db17ad4681a448def5eb | 3be0446d1a9e2d301d58f455261763231f1aa7d6 | refs/heads/master | 2020-03-19T04:12:32.196213 | 2018-06-07T14:51:39 | 2018-06-07T14:51:39 | 135,805,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,938 | py | # _*_ conding:utf-8 _*_
from __future__ import print_function
import os
import numpy as np
from skimage.io import imsave, imread
data_path = '/home/momoh/mabocombinedimgs22/'
image_rows = 420
image_cols = 580
def create_test_data2():
train_data_path = os.path.join(data_path, 'test')
images = os.listdir(train_data_path) #文件名列表
total = len(images) / 2
imgs = np.ndarray((total, image_rows, image_cols), dtype=np.uint8) #np.ndarray中参数表示的是维度,默认值为零
imgs_mask = np.ndarray((total, image_rows, image_cols), dtype=np.uint8)
i = 0
print('-'*30)
print('Creating training images...')
print('-'*30)
for image_name in images:
if 'mask' in image_name:
continue
image_mask_name = image_name.split('.')[0] + '_mask.jpg'
img = imread(os.path.join(train_data_path, image_name)) #(width,height,channel)
img_mask = imread(os.path.join(train_data_path, image_mask_name))
img =img[:,:,1] #(width,height)
img_mask=img_mask[:,:,1]
img = np.array([img]) #(1,width,height)
img_mask = np.array([img_mask])
imgs[i] = img #(i,1,width,height)
imgs_mask[i] = img_mask
if i % 100 == 0:
print('Done: {0}/{1} images'.format(i, total))
i += 1
print(total)
np.save('imgs_test.npy', imgs)
np.save('imgs_mask_test.npy', imgs_mask)
print('Saving to .npy files done.')
def create_train_data():
train_data_path = os.path.join(data_path, 'train')
images = os.listdir(train_data_path)
total = len(images) / 2
imgs = np.ndarray((total, image_rows, image_cols), dtype=np.uint8)
imgs_mask = np.ndarray((total, image_rows, image_cols), dtype=np.uint8)
i = 0
print('-'*30)
print('Creating training images...')
print('-'*30)
for image_name in images:
if 'mask' in image_name:
continue
image_mask_name = image_name.split('.')[0] + '_mask.jpg'
img = imread(os.path.join(train_data_path, image_name))
img_mask = imread(os.path.join(train_data_path, image_mask_name))
img =img[:,:,1]
img_mask=img_mask[:,:,1]
img = np.array([img])
img_mask = np.array([img_mask])
imgs[i] = img
imgs_mask[i] = img_mask
if i % 100 == 0:
print('Done: {0}/{1} images'.format(i, total))
i += 1
print(total)
np.save('imgs_train.npy', imgs)
np.save('imgs_mask_train.npy', imgs_mask)
print('Saving to .npy files done.')
def load_train_data():
imgs_train = np.load('imgs_train.npy')
imgs_mask_train = np.load('imgs_mask_train.npy')
return imgs_train, imgs_mask_train
def create_test_data():
train_data_path = os.path.join(data_path, 'test')
images = os.listdir(train_data_path)
total = len(images)/2
imgs = np.ndarray((total, image_rows, image_cols), dtype=np.uint8)
imgs_id = np.ndarray((total, ), dtype=np.int32)
i = 0
print('-'*30)
print('Creating test images...')
print('-'*30)
for image_name in images:
if 'mask' in image_name:
continue
img_id = int(image_name.split('.')[0])#image_name
img = imread(os.path.join(train_data_path, image_name))
img =img[:,:,1]
img = np.array([img])
imgs[i] = img
imgs_id[i] = img_id
if i % 100 == 0:
print('Done: {0}/{1} images'.format(i, total))
i += 1
print('Loading done.')
np.save('imgs_test.npy', imgs)
np.save('imgs_id_test.npy', imgs_id)
print('Saving to .npy files done.')
def load_test_data():
imgs_test = np.load('imgs_test.npy')
imgs_mask_test = np.load('imgs_mask_test.npy')
imgs_id = np.load('imgs_id_test.npy')
return imgs_test, imgs_id,imgs_mask_test
if __name__ == '__main__':
#create_train_data()
create_test_data()
create_test_data2()
| [
"1072113944@qq.comm"
] | 1072113944@qq.comm |
d8dafe62404a5753d70525f7fc485e3cb449e5e6 | a2ae4a37ed2a0cb4e9afabe0782142e66ec47226 | /tutorial_snippets/snippets/migrations/0001_initial.py | 27627537b3e69f8e0c3b5515aa0153624a5ff3d8 | [] | no_license | deltadada/REST_snippets2 | 548343e4488385b916cc06a61203c85b66595806 | 95f4a24bb0af5a7f695739ac3cdc6996fd79d0e6 | refs/heads/master | 2020-05-29T21:29:23.265548 | 2015-02-13T18:38:28 | 2015-02-13T18:38:28 | 30,764,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,262 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Snippet',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(max_length=100, default='', blank=True)),
('code', models.TextField()),
('linenos', models.BooleanField(default=False)),
('language', models.CharField(choices=[('abap', 'ABAP'), ('ada', 'Ada'), ('agda', 'Agda'), ('ahk', 'autohotkey'), ('alloy', 'Alloy'), ('antlr', 'ANTLR'), ('antlr-as', 'ANTLR With ActionScript Target'), ('antlr-cpp', 'ANTLR With CPP Target'), ('antlr-csharp', 'ANTLR With C# Target'), ('antlr-java', 'ANTLR With Java Target'), ('antlr-objc', 'ANTLR With ObjectiveC Target'), ('antlr-perl', 'ANTLR With Perl Target'), ('antlr-python', 'ANTLR With Python Target'), ('antlr-ruby', 'ANTLR With Ruby Target'), ('apacheconf', 'ApacheConf'), ('apl', 'APL'), ('applescript', 'AppleScript'), ('as', 'ActionScript'), ('as3', 'ActionScript 3'), ('aspectj', 'AspectJ'), ('aspx-cs', 'aspx-cs'), ('aspx-vb', 'aspx-vb'), ('asy', 'Asymptote'), ('at', 'AmbientTalk'), ('autoit', 'AutoIt'), ('awk', 'Awk'), ('basemake', 'Base Makefile'), ('bash', 'Bash'), ('bat', 'Batchfile'), ('bbcode', 'BBCode'), ('befunge', 'Befunge'), ('blitzbasic', 'BlitzBasic'), ('blitzmax', 'BlitzMax'), ('boo', 'Boo'), ('brainfuck', 'Brainfuck'), ('bro', 'Bro'), ('bugs', 'BUGS'), ('c', 'C'), ('c-objdump', 'c-objdump'), ('ca65', 'ca65 assembler'), ('cbmbas', 'CBM BASIC V2'), ('ceylon', 'Ceylon'), ('cfc', 'Coldfusion CFC'), ('cfengine3', 'CFEngine3'), ('cfm', 'Coldfusion HTML'), ('cfs', 'cfstatement'), ('chai', 'ChaiScript'), ('chapel', 'Chapel'), ('cheetah', 'Cheetah'), ('cirru', 'Cirru'), ('clay', 'Clay'), ('clojure', 'Clojure'), ('clojurescript', 'ClojureScript'), ('cmake', 'CMake'), ('cobol', 'COBOL'), ('cobolfree', 'COBOLFree'), ('coffee-script', 'CoffeeScript'), ('common-lisp', 'Common Lisp'), ('console', 'Bash Session'), ('control', 'Debian Control file'), ('coq', 'Coq'), ('cpp', 'C++'), ('cpp-objdump', 'cpp-objdump'), ('croc', 'Croc'), ('cryptol', 'Cryptol'), ('csharp', 'C#'), ('css', 'CSS'), ('css+django', 'CSS+Django/Jinja'), ('css+erb', 'CSS+Ruby'), ('css+genshitext', 'CSS+Genshi Text'), ('css+lasso', 'CSS+Lasso'), ('css+mako', 'CSS+Mako'), ('css+mozpreproc', 'CSS+mozpreproc'), ('css+myghty', 'CSS+Myghty'), ('css+php', 'CSS+PHP'), ('css+smarty', 'CSS+Smarty'), ('cucumber', 'Gherkin'), ('cuda', 'CUDA'), ('cypher', 'Cypher'), ('cython', 'Cython'), ('d', 'D'), ('d-objdump', 'd-objdump'), ('dart', 'Dart'), ('delphi', 'Delphi'), ('dg', 'dg'), ('diff', 'Diff'), ('django', 'Django/Jinja'), ('docker', 'Docker'), ('dpatch', 'Darcs Patch'), ('dtd', 'DTD'), ('duel', 'Duel'), ('dylan', 'Dylan'), ('dylan-console', 'Dylan session'), ('dylan-lid', 'DylanLID'), ('ebnf', 'EBNF'), ('ec', 'eC'), ('ecl', 'ECL'), ('eiffel', 'Eiffel'), ('elixir', 'Elixir'), ('erb', 'ERB'), ('erl', 'Erlang erl session'), ('erlang', 'Erlang'), ('evoque', 'Evoque'), ('factor', 'Factor'), ('fan', 'Fantom'), ('fancy', 'Fancy'), ('felix', 'Felix'), ('fortran', 'Fortran'), ('foxpro', 'FoxPro'), ('fsharp', 'FSharp'), ('gap', 'GAP'), ('gas', 'GAS'), ('genshi', 'Genshi'), ('genshitext', 'Genshi Text'), ('glsl', 'GLSL'), ('gnuplot', 'Gnuplot'), ('go', 'Go'), ('golo', 'Golo'), ('gooddata-cl', 'GoodData-CL'), ('gosu', 'Gosu'), ('groff', 'Groff'), ('groovy', 'Groovy'), ('gst', 'Gosu Template'), ('haml', 'Haml'), ('handlebars', 'Handlebars'), ('haskell', 'Haskell'), ('haxeml', 'Hxml'), ('html', 'HTML'), ('html+cheetah', 'HTML+Cheetah'), ('html+django', 'HTML+Django/Jinja'), ('html+evoque', 'HTML+Evoque'), ('html+genshi', 'HTML+Genshi'), ('html+handlebars', 'HTML+Handlebars'), ('html+lasso', 'HTML+Lasso'), ('html+mako', 'HTML+Mako'), ('html+myghty', 'HTML+Myghty'), ('html+php', 'HTML+PHP'), ('html+smarty', 'HTML+Smarty'), ('html+twig', 'HTML+Twig'), ('html+velocity', 'HTML+Velocity'), ('http', 'HTTP'), ('hx', 'Haxe'), ('hybris', 'Hybris'), ('hylang', 'Hy'), ('i6t', 'Inform 6 template'), ('idl', 'IDL'), ('idris', 'Idris'), ('iex', 'Elixir iex session'), ('igor', 'Igor'), ('inform6', 'Inform 6'), ('inform7', 'Inform 7'), ('ini', 'INI'), ('io', 'Io'), ('ioke', 'Ioke'), ('irc', 'IRC logs'), ('isabelle', 'Isabelle'), ('jade', 'Jade'), ('jags', 'JAGS'), ('jasmin', 'Jasmin'), ('java', 'Java'), ('javascript+mozpreproc', 'Javascript+mozpreproc'), ('jlcon', 'Julia console'), ('js', 'JavaScript'), ('js+cheetah', 'JavaScript+Cheetah'), ('js+django', 'JavaScript+Django/Jinja'), ('js+erb', 'JavaScript+Ruby'), ('js+genshitext', 'JavaScript+Genshi Text'), ('js+lasso', 'JavaScript+Lasso'), ('js+mako', 'JavaScript+Mako'), ('js+myghty', 'JavaScript+Myghty'), ('js+php', 'JavaScript+PHP'), ('js+smarty', 'JavaScript+Smarty'), ('json', 'JSON'), ('jsonld', 'JSON-LD'), ('jsp', 'Java Server Page'), ('julia', 'Julia'), ('kal', 'Kal'), ('kconfig', 'Kconfig'), ('koka', 'Koka'), ('kotlin', 'Kotlin'), ('lagda', 'Literate Agda'), ('lasso', 'Lasso'), ('lcry', 'Literate Cryptol'), ('lean', 'Lean'), ('lhs', 'Literate Haskell'), ('lidr', 'Literate Idris'), ('lighty', 'Lighttpd configuration file'), ('limbo', 'Limbo'), ('liquid', 'liquid'), ('live-script', 'LiveScript'), ('llvm', 'LLVM'), ('logos', 'Logos'), ('logtalk', 'Logtalk'), ('lsl', 'LSL'), ('lua', 'Lua'), ('make', 'Makefile'), ('mako', 'Mako'), ('maql', 'MAQL'), ('mask', 'Mask'), ('mason', 'Mason'), ('mathematica', 'Mathematica'), ('matlab', 'Matlab'), ('matlabsession', 'Matlab session'), ('minid', 'MiniD'), ('modelica', 'Modelica'), ('modula2', 'Modula-2'), ('monkey', 'Monkey'), ('moocode', 'MOOCode'), ('moon', 'MoonScript'), ('mozhashpreproc', 'mozhashpreproc'), ('mozpercentpreproc', 'mozpercentpreproc'), ('mql', 'MQL'), ('mscgen', 'Mscgen'), ('mupad', 'MuPAD'), ('mxml', 'MXML'), ('myghty', 'Myghty'), ('mysql', 'MySQL'), ('nasm', 'NASM'), ('nemerle', 'Nemerle'), ('nesc', 'nesC'), ('newlisp', 'NewLisp'), ('newspeak', 'Newspeak'), ('nginx', 'Nginx configuration file'), ('nimrod', 'Nimrod'), ('nit', 'Nit'), ('nixos', 'Nix'), ('nsis', 'NSIS'), ('numpy', 'NumPy'), ('objdump', 'objdump'), ('objdump-nasm', 'objdump-nasm'), ('objective-c', 'Objective-C'), ('objective-c++', 'Objective-C++'), ('objective-j', 'Objective-J'), ('ocaml', 'OCaml'), ('octave', 'Octave'), ('ooc', 'Ooc'), ('opa', 'Opa'), ('openedge', 'OpenEdge ABL'), ('pan', 'Pan'), ('pawn', 'Pawn'), ('perl', 'Perl'), ('perl6', 'Perl6'), ('php', 'PHP'), ('pig', 'Pig'), ('pike', 'Pike'), ('plpgsql', 'PL/pgSQL'), ('postgresql', 'PostgreSQL SQL dialect'), ('postscript', 'PostScript'), ('pot', 'Gettext Catalog'), ('pov', 'POVRay'), ('powershell', 'PowerShell'), ('prolog', 'Prolog'), ('properties', 'Properties'), ('protobuf', 'Protocol Buffer'), ('psql', 'PostgreSQL console (psql)'), ('puppet', 'Puppet'), ('py3tb', 'Python 3.0 Traceback'), ('pycon', 'Python console session'), ('pypylog', 'PyPy Log'), ('pytb', 'Python Traceback'), ('python', 'Python'), ('python3', 'Python 3'), ('qbasic', 'QBasic'), ('qml', 'QML'), ('racket', 'Racket'), ('ragel', 'Ragel'), ('ragel-c', 'Ragel in C Host'), ('ragel-cpp', 'Ragel in CPP Host'), ('ragel-d', 'Ragel in D Host'), ('ragel-em', 'Embedded Ragel'), ('ragel-java', 'Ragel in Java Host'), ('ragel-objc', 'Ragel in Objective C Host'), ('ragel-ruby', 'Ragel in Ruby Host'), ('raw', 'Raw token data'), ('rb', 'Ruby'), ('rbcon', 'Ruby irb session'), ('rconsole', 'RConsole'), ('rd', 'Rd'), ('rebol', 'REBOL'), ('red', 'Red'), ('redcode', 'Redcode'), ('registry', 'reg'), ('resource', 'ResourceBundle'), ('rexx', 'Rexx'), ('rhtml', 'RHTML'), ('robotframework', 'RobotFramework'), ('rql', 'RQL'), ('rsl', 'RSL'), ('rst', 'reStructuredText'), ('rust', 'Rust'), ('sass', 'Sass'), ('scala', 'Scala'), ('scaml', 'Scaml'), ('scheme', 'Scheme'), ('scilab', 'Scilab'), ('scss', 'SCSS'), ('shell-session', 'Shell Session'), ('slim', 'Slim'), ('smali', 'Smali'), ('smalltalk', 'Smalltalk'), ('smarty', 'Smarty'), ('sml', 'Standard ML'), ('snobol', 'Snobol'), ('sourceslist', 'Debian Sourcelist'), ('sp', 'SourcePawn'), ('sparql', 'SPARQL'), ('spec', 'RPMSpec'), ('splus', 'S'), ('sql', 'SQL'), ('sqlite3', 'sqlite3con'), ('squidconf', 'SquidConf'), ('ssp', 'Scalate Server Page'), ('stan', 'Stan'), ('swift', 'Swift'), ('swig', 'SWIG'), ('systemverilog', 'systemverilog'), ('tads3', 'TADS 3'), ('tcl', 'Tcl'), ('tcsh', 'Tcsh'), ('tea', 'Tea'), ('tex', 'TeX'), ('text', 'Text only'), ('todotxt', 'Todotxt'), ('trac-wiki', 'MoinMoin/Trac Wiki markup'), ('treetop', 'Treetop'), ('ts', 'TypeScript'), ('twig', 'Twig'), ('urbiscript', 'UrbiScript'), ('vala', 'Vala'), ('vb.net', 'VB.net'), ('vctreestatus', 'VCTreeStatus'), ('velocity', 'Velocity'), ('verilog', 'verilog'), ('vgl', 'VGL'), ('vhdl', 'vhdl'), ('vim', 'VimL'), ('xml', 'XML'), ('xml+cheetah', 'XML+Cheetah'), ('xml+django', 'XML+Django/Jinja'), ('xml+erb', 'XML+Ruby'), ('xml+evoque', 'XML+Evoque'), ('xml+lasso', 'XML+Lasso'), ('xml+mako', 'XML+Mako'), ('xml+myghty', 'XML+Myghty'), ('xml+php', 'XML+PHP'), ('xml+smarty', 'XML+Smarty'), ('xml+velocity', 'XML+Velocity'), ('xquery', 'XQuery'), ('xslt', 'XSLT'), ('xtend', 'Xtend'), ('xul+mozpreproc', 'XUL+mozpreproc'), ('yaml', 'YAML'), ('yaml+jinja', 'YAML+Jinja'), ('zephir', 'Zephir')], max_length=100, default='python')),
('style', models.CharField(choices=[('autumn', 'autumn'), ('borland', 'borland'), ('bw', 'bw'), ('colorful', 'colorful'), ('default', 'default'), ('emacs', 'emacs'), ('friendly', 'friendly'), ('fruity', 'fruity'), ('igor', 'igor'), ('manni', 'manni'), ('monokai', 'monokai'), ('murphy', 'murphy'), ('native', 'native'), ('paraiso-dark', 'paraiso-dark'), ('paraiso-light', 'paraiso-light'), ('pastie', 'pastie'), ('perldoc', 'perldoc'), ('rrt', 'rrt'), ('tango', 'tango'), ('trac', 'trac'), ('vim', 'vim'), ('vs', 'vs'), ('xcode', 'xcode')], max_length=100, default='friendly')),
],
options={
'ordering': ('created',),
},
bases=(models.Model,),
),
]
| [
"markowitzjj@mail.nlm.nih.gov"
] | markowitzjj@mail.nlm.nih.gov |
baa05ca51f5bbccbe658dac624ebc2f430ff06de | 3506f231e00d6a5c9eb9c944d11afec23d530e6a | /nestedLoopBreakException.py | aa3db356b2c2b575c95fc38aa4d40f47073d7314 | [] | no_license | HafizulHaque/Python-Practice- | 336ba21da5a2fcc6961db5169b44a94da720bb27 | c9474b51488786fc97769d3411ae23dc404445df | refs/heads/main | 2023-01-29T14:25:05.119818 | 2020-12-10T16:13:39 | 2020-12-10T16:13:39 | 320,323,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | import random
class foundException(Exception): pass
table = ['car', 'house', 'horse']
target = 7
try:
for recNo, record in enumerate(table):
for rowNo, row in enumerate(record):
for index, item in enumerate(row):
item = random.randint(1, 10)
if item == target:
raise foundException()
except foundException as err:
print('{}:{}:{}'.format(record, row, index))
else:
print('not found the number')
| [
"hafizulhaque.cse.cuet@gmail.com"
] | hafizulhaque.cse.cuet@gmail.com |
1044eab6cb08b310b8e4e0dcf36cd2297ac2e587 | 597516230cbc04313d4f1e5b61f148a78fd0fcef | /backend/controller/twitter.py | e48ab5de84cf33aa4f754d04e2950482bbe089c5 | [] | no_license | amni/SMSmart | b102be1b622bd395f7796e7e091b6b0721e92ea1 | 2e1e59d442df7dd0bda42026db13193dfd357b70 | refs/heads/master | 2022-07-23T19:46:08.025445 | 2015-04-14T20:17:23 | 2015-04-14T20:17:23 | 24,907,923 | 1 | 0 | null | 2022-07-06T19:21:38 | 2014-10-07T19:33:32 | Python | UTF-8 | Python | false | false | 1,061 | py | from base import Base
import api.wrapper.twitter_wrapper as twitter_wrapper
class Twitter(Base):
def default(self, user, **kwargs):
return self.feed(user, **kwargs)
def feed(self, user, **kwargs):
key = kwargs["key"]
results = twitter_wrapper.get_twitter_feed(user)
if self.is_error(results):
return self.get_error_response(results, key)
spliced_results = self.cut_results(user, key, results, 7)
results = spliced_results[0]
joined_results = self.CARROT_TOKEN.join(results)
cleaned_results = self.prepend_key(key, joined_results)
return self.split_result(cleaned_results)
def tweet(self, user, **kwargs):
key = kwargs["key"]
message = kwargs["message"]
twitter_wrapper.tweet_message(user, message)
return {"messages" :[]}
def retweet(self, user, **kwargs):
key = kwargs["key"]
tweet_id = kwargs["tweet_id"]
twitter_wrapper.retweet_message(user, tweet_id)
return {"messages" :[]}
| [
"jmw86@duke.edu"
] | jmw86@duke.edu |
d03be730f5152cae90fc5e3ab74bc02abd2f401f | df7343ebf94cd9b5215f474e56221a4198724652 | /461. Hamming Distance.py | c8704a05dd41926e88dc493b1d89880c5dd4a98b | [] | no_license | eric-zhu94/leetcode | 642dcaf96d7f80de89d86b87e6b743c8cd08d979 | 229f62316fd512efc7fee87911028808a59c6ed0 | refs/heads/master | 2023-04-29T03:41:57.997695 | 2021-05-17T12:07:02 | 2021-05-17T12:07:02 | 250,923,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | class Solution:
def hammingDistance(self, x: int, y: int) -> int:
xbit = []
ybit = []
ans = 0
while x > 0:
bit = x % 2
x //= 2
xbit.append(bit)
while y > 0:
bit = y % 2
y //= 2
ybit.append(bit)
if len(xbit) > len(ybit):
for i in range(len(xbit) - len(ybit)):
ybit.append(0)
if len(ybit) > len(xbit):
for i in range(len(ybit) - len(xbit)):
xbit.append(0)
for j in range(len(xbit)):
if xbit[j] != ybit[j]:
ans +=1
return ans
| [
"noreply@github.com"
] | noreply@github.com |
067cbea37ee1b8d8d6927902bac7648fa994edc4 | 6ea7158f4985f47528d86a456fd2928aecfe009b | /ptlflow/models/gma/extractor.py | 93d9831147cd93e6be289503ef9cb82213101f9a | [
"WTFPL",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"CC-BY-NC-SA-4.0"
] | permissive | hmorimitsu/ptlflow | e318f23ac93eb115e249be212c7353ed51b1396f | d6582a0fd386517fdefbe2c347cef53150b5b1da | refs/heads/main | 2023-05-14T07:04:29.196743 | 2023-05-06T08:36:33 | 2023-05-06T08:36:33 | 375,416,785 | 140 | 14 | Apache-2.0 | 2023-05-06T08:36:34 | 2021-06-09T16:12:33 | Python | UTF-8 | Python | false | false | 6,419 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
class ResidualBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes)
self.norm2 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm3 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes)
self.norm2 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm3 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
if not stride == 1:
self.norm3 = nn.Sequential()
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x + y)
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(BottleneckBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes // 4, kernel_size=1, padding=0)
self.conv2 = nn.Conv2d(planes // 4, planes // 4, kernel_size=3, padding=1, stride=stride)
self.conv3 = nn.Conv2d(planes // 4, planes, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes // 4)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes // 4)
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes // 4)
self.norm2 = nn.BatchNorm2d(planes // 4)
self.norm3 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm4 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes // 4)
self.norm2 = nn.InstanceNorm2d(planes // 4)
self.norm3 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm4 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
self.norm3 = nn.Sequential()
if not stride == 1:
self.norm4 = nn.Sequential()
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm4)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
y = self.relu(self.norm3(self.conv3(y)))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x + y)
class BasicEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
super(BasicEncoder, self).__init__()
self.norm_fn = norm_fn
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(64)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(64)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 64
self.layer1 = self._make_layer(64, stride=1)
self.layer2 = self._make_layer(96, stride=2)
self.layer3 = self._make_layer(128, stride=2)
# output convolution
self.conv2 = nn.Conv2d(128, output_dim, kernel_size=1)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# if input is list, combine batch dimension
is_list = isinstance(x, tuple) or isinstance(x, list)
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if self.training and self.dropout is not None:
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x | [
"henriquem87@gmail.com"
] | henriquem87@gmail.com |
04db9c3fbe3b7089250387b79ad864a22bb84d5f | 040a3c03b7927a862d05ea948147d862a24a6b0b | /tools/releasetools/common.py | 7ff17f4e0768039edfe1cb33fde9214ef3b33e39 | [] | no_license | xiangxin19960319/Patch_MIUIV5 | 3db0bbb3be4e3f2f130a8afaa82d42f607115708 | e177617ae419721d154fee59e2b169c4dd4de291 | refs/heads/master | 2021-01-01T15:51:26.548456 | 2013-10-28T13:49:29 | 2013-10-28T13:49:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,918 | py | # Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import errno
import getopt
import getpass
import imp
import os
import platform
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import zipfile
try:
from hashlib import sha1 as sha1
except ImportError:
from sha import sha as sha1
# missing in Python 2.4 and before
if not hasattr(os, "SEEK_SET"):
os.SEEK_SET = 0
class Options(object): pass
OPTIONS = Options()
OPTIONS.search_path = os.path.join(os.environ["PORT_ROOT"], "tools");
OPTIONS.verbose = False
OPTIONS.tempfiles = []
OPTIONS.device_specific = None
OPTIONS.extras = {}
OPTIONS.info_dict = None
# Values for "certificate" in apkcerts that mean special things.
SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
class ExternalError(RuntimeError): pass
def Run(args, **kwargs):
"""Create and return a subprocess.Popen object, printing the command
line on the terminal if -v was specified."""
if OPTIONS.verbose:
print " running: ", " ".join(args)
return subprocess.Popen(args, **kwargs)
def CloseInheritedPipes():
""" Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
before doing other work."""
if platform.system() != "Darwin":
return
for d in range(3, 1025):
try:
stat = os.fstat(d)
if stat is not None:
pipebit = stat[0] & 0x1000
if pipebit != 0:
os.close(d)
except OSError:
pass
def LoadInfoDict(zip):
"""Read and parse the META/misc_info.txt key/value pairs from the
input target files and return a dict."""
d = {}
try:
for line in zip.read("META/misc_info.txt").split("\n"):
line = line.strip()
if not line or line.startswith("#"): continue
k, v = line.split("=", 1)
d[k] = v
except KeyError:
# ok if misc_info.txt doesn't exist
pass
# backwards compatibility: These values used to be in their own
# files. Look for them, in case we're processing an old
# target_files zip.
if "mkyaffs2_extra_flags" not in d:
try:
d["mkyaffs2_extra_flags"] = zip.read("META/mkyaffs2-extra-flags.txt").strip()
except KeyError:
# ok if flags don't exist
pass
if "recovery_api_version" not in d:
try:
d["recovery_api_version"] = zip.read("META/recovery-api-version.txt").strip()
except KeyError:
raise ValueError("can't find recovery API version in input target-files")
if "tool_extensions" not in d:
try:
d["tool_extensions"] = zip.read("META/tool-extensions.txt").strip()
except KeyError:
# ok if extensions don't exist
pass
try:
data = zip.read("META/imagesizes.txt")
for line in data.split("\n"):
if not line: continue
name, value = line.split(" ", 1)
if not value: continue
if name == "blocksize":
d[name] = value
else:
d[name + "_size"] = value
except KeyError:
pass
def makeint(key):
if key in d:
d[key] = int(d[key], 0)
makeint("recovery_api_version")
makeint("blocksize")
makeint("system_size")
makeint("userdata_size")
makeint("recovery_size")
makeint("boot_size")
d["fstab"] = LoadRecoveryFSTab(zip)
return d
def LoadRecoveryFSTab(zip):
class Partition(object):
pass
try:
data = zip.read("RECOVERY/RAMDISK/etc/recovery.fstab")
except KeyError:
print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab in %s." % zip
data = ""
d = {}
for line in data.split("\n"):
line = line.strip()
if not line or line.startswith("#"): continue
pieces = line.split()
if not (3 <= len(pieces) <= 4):
raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
p = Partition()
p.mount_point = pieces[0]
p.fs_type = pieces[1]
p.device = pieces[2]
p.length = 0
options = None
if len(pieces) >= 4:
if pieces[3].startswith("/"):
p.device2 = pieces[3]
if len(pieces) >= 5:
options = pieces[4]
else:
p.device2 = None
options = pieces[3]
else:
p.device2 = None
if options:
options = options.split(",")
for i in options:
if i.startswith("length="):
p.length = int(i[7:])
else:
print "%s: unknown option \"%s\"" % (p.mount_point, i)
d[p.mount_point] = p
return d
def DumpInfoDict(d):
for k, v in sorted(d.items()):
print "%-25s = (%s) %s" % (k, type(v).__name__, v)
def BuildBootableImage(sourcedir):
"""Take a kernel, cmdline, and ramdisk directory from the input (in
'sourcedir'), and turn them into a boot image. Return the image
data, or None if sourcedir does not appear to contains files for
building the requested image."""
if (not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK) or
not os.access(os.path.join(sourcedir, "kernel"), os.F_OK)):
return None
ramdisk_img = tempfile.NamedTemporaryFile()
img = tempfile.NamedTemporaryFile()
p1 = Run(["mkbootfs", os.path.join(sourcedir, "RAMDISK")],
stdout=subprocess.PIPE)
p2 = Run(["minigzip"],
stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
p2.wait()
p1.wait()
assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (targetname,)
assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (targetname,)
cmd = ["mkbootimg", "--kernel", os.path.join(sourcedir, "kernel")]
fn = os.path.join(sourcedir, "cmdline")
if os.access(fn, os.F_OK):
cmd.append("--cmdline")
cmd.append(open(fn).read().rstrip("\n"))
fn = os.path.join(sourcedir, "base")
if os.access(fn, os.F_OK):
cmd.append("--base")
cmd.append(open(fn).read().rstrip("\n"))
fn = os.path.join(sourcedir, "pagesize")
if os.access(fn, os.F_OK):
cmd.append("--pagesize")
cmd.append(open(fn).read().rstrip("\n"))
cmd.extend(["--ramdisk", ramdisk_img.name,
"--output", img.name])
p = Run(cmd, stdout=subprocess.PIPE)
p.communicate()
assert p.returncode == 0, "mkbootimg of %s image failed" % (
os.path.basename(sourcedir),)
img.seek(os.SEEK_SET, 0)
data = img.read()
ramdisk_img.close()
img.close()
return data
def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir):
"""Return a File object (with name 'name') with the desired bootable
image. Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name
'prebuilt_name', otherwise construct it from the source files in
'unpack_dir'/'tree_subdir'."""
prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
if os.path.exists(prebuilt_path):
print "using prebuilt %s..." % (prebuilt_name,)
return File.FromLocalFile(name, prebuilt_path)
else:
return None
#print "building image from target_files %s..." % (tree_subdir,)
#return File(name, BuildBootableImage(os.path.join(unpack_dir, tree_subdir)))
def UnzipTemp(filename, pattern=None):
"""Unzip the given archive into a temporary directory and return the name.
If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
main file), open for reading.
"""
tmp = tempfile.mkdtemp(prefix="targetfiles-")
OPTIONS.tempfiles.append(tmp)
def unzip_to_dir(filename, dirname):
cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
if pattern is not None:
cmd.append(pattern)
p = Run(cmd, stdout=subprocess.PIPE)
p.communicate()
if p.returncode != 0:
raise ExternalError("failed to unzip input target-files \"%s\"" %
(filename,))
m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
if m:
unzip_to_dir(m.group(1), tmp)
unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
filename = m.group(1)
else:
unzip_to_dir(filename, tmp)
return tmp, zipfile.ZipFile(filename, "r")
def GetKeyPasswords(keylist):
"""Given a list of keys, prompt the user to enter passwords for
those which require them. Return a {key: password} dict. password
will be None if the key has no password."""
no_passwords = []
need_passwords = []
devnull = open("/dev/null", "w+b")
for k in sorted(keylist):
# We don't need a password for things that aren't really keys.
if k in SPECIAL_CERT_STRINGS:
no_passwords.append(k)
continue
p = Run(["openssl", "pkcs8", "-in", k+".pk8",
"-inform", "DER", "-nocrypt"],
stdin=devnull.fileno(),
stdout=devnull.fileno(),
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode == 0:
no_passwords.append(k)
else:
need_passwords.append(k)
devnull.close()
key_passwords = PasswordManager().GetPasswords(need_passwords)
key_passwords.update(dict.fromkeys(no_passwords, None))
return key_passwords
def SignFile(input_name, output_name, key, password, align=None,
whole_file=False):
"""Sign the input_name zip/jar/apk, producing output_name. Use the
given key and password (the latter may be None if the key does not
have a password.
If align is an integer > 1, zipalign is run to align stored files in
the output zip on 'align'-byte boundaries.
If whole_file is true, use the "-w" option to SignApk to embed a
signature that covers the whole file in the archive comment of the
zip file.
"""
if align == 0 or align == 1:
align = None
if align:
temp = tempfile.NamedTemporaryFile()
sign_name = temp.name
else:
sign_name = output_name
cmd = ["java", "-Xmx4096m", "-jar",
os.path.join(OPTIONS.search_path, "signapk.jar")]
if whole_file:
cmd.append("-w")
cmd.extend([key + ".x509.pem", key + ".pk8",
input_name, sign_name])
p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
if password is not None:
password += "\n"
p.communicate(password)
if p.returncode != 0:
raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
if align:
p = Run(["zipalign", "-f", str(align), sign_name, output_name])
p.communicate()
if p.returncode != 0:
raise ExternalError("zipalign failed: return code %s" % (p.returncode,))
temp.close()
def CheckSize(data, target, info_dict):
"""Check the data string passed against the max size limit, if
any, for the given target. Raise exception if the data is too big.
Print a warning if the data is nearing the maximum size."""
if target.endswith(".img"): target = target[:-4]
mount_point = "/" + target
if info_dict["fstab"]:
if mount_point == "/userdata": mount_point = "/data"
p = info_dict["fstab"][mount_point]
fs_type = p.fs_type
limit = info_dict.get(p.device + "_size", None)
if not fs_type or not limit: return
if fs_type == "yaffs2":
# image size should be increased by 1/64th to account for the
# spare area (64 bytes per 2k page)
limit = limit / 2048 * (2048+64)
size = len(data)
pct = float(size) * 100.0 / limit
msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
if pct >= 99.0:
raise ExternalError(msg)
elif pct >= 95.0:
print
print " WARNING: ", msg
print
elif OPTIONS.verbose:
print " ", msg
def ReadApkCerts(tf_zip):
"""Given a target_files ZipFile, parse the META/apkcerts.txt file
and return a {package: cert} dict."""
certmap = {}
for line in tf_zip.read("META/apkcerts.txt").split("\n"):
line = line.strip()
if not line: continue
m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
r'private_key="(.*)"$', line)
if m:
name, cert, privkey = m.groups()
if cert in SPECIAL_CERT_STRINGS and not privkey:
certmap[name] = cert
elif (cert.endswith(".x509.pem") and
privkey.endswith(".pk8") and
cert[:-9] == privkey[:-4]):
certmap[name] = cert[:-9]
else:
raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
return certmap
COMMON_DOCSTRING = """
-p (--path) <dir>
Prepend <dir>/bin to the list of places to search for binaries
run by this script, and expect to find jars in <dir>/framework.
-s (--device_specific) <file>
Path to the python module containing device-specific
releasetools code.
-x (--extra) <key=value>
Add a key/value pair to the 'extras' dict, which device-specific
extension code may look at.
-v (--verbose)
Show command lines being executed.
-h (--help)
Display this usage message and exit.
"""
def Usage(docstring):
print docstring.rstrip("\n")
print COMMON_DOCSTRING
def ParseOptions(argv,
docstring,
extra_opts="", extra_long_opts=(),
extra_option_handler=None):
"""Parse the options in argv and return any arguments that aren't
flags. docstring is the calling module's docstring, to be displayed
for errors and -h. extra_opts and extra_long_opts are for flags
defined by the caller, which are processed by passing them to
extra_option_handler."""
try:
opts, args = getopt.getopt(
argv, "hvp:s:x:" + extra_opts,
["help", "verbose", "path=", "device_specific=", "extra="] +
list(extra_long_opts))
except getopt.GetoptError, err:
Usage(docstring)
print "**", str(err), "**"
sys.exit(2)
path_specified = False
for o, a in opts:
if o in ("-h", "--help"):
Usage(docstring)
sys.exit()
elif o in ("-v", "--verbose"):
OPTIONS.verbose = True
elif o in ("-p", "--path"):
OPTIONS.search_path = a
elif o in ("-s", "--device_specific"):
OPTIONS.device_specific = a
elif o in ("-x", "--extra"):
key, value = a.split("=", 1)
OPTIONS.extras[key] = value
else:
if extra_option_handler is None or not extra_option_handler(o, a):
assert False, "unknown option \"%s\"" % (o,)
os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
os.pathsep + os.environ["PATH"])
return args
def Cleanup():
for i in OPTIONS.tempfiles:
if os.path.isdir(i):
shutil.rmtree(i)
else:
os.remove(i)
class PasswordManager(object):
def __init__(self):
self.editor = os.getenv("EDITOR", None)
self.pwfile = os.getenv("ANDROID_PW_FILE", None)
def GetPasswords(self, items):
"""Get passwords corresponding to each string in 'items',
returning a dict. (The dict may have keys in addition to the
values in 'items'.)
Uses the passwords in $ANDROID_PW_FILE if available, letting the
user edit that file to add more needed passwords. If no editor is
available, or $ANDROID_PW_FILE isn't define, prompts the user
interactively in the ordinary way.
"""
current = self.ReadFile()
first = True
while True:
missing = []
for i in items:
if i not in current or not current[i]:
missing.append(i)
# Are all the passwords already in the file?
if not missing: return current
for i in missing:
current[i] = ""
if not first:
print "key file %s still missing some passwords." % (self.pwfile,)
answer = raw_input("try to edit again? [y]> ").strip()
if answer and answer[0] not in 'yY':
raise RuntimeError("key passwords unavailable")
first = False
current = self.UpdateAndReadFile(current)
def PromptResult(self, current):
"""Prompt the user to enter a value (password) for each key in
'current' whose value is fales. Returns a new dict with all the
values.
"""
result = {}
for k, v in sorted(current.iteritems()):
if v:
result[k] = v
else:
while True:
result[k] = getpass.getpass("Enter password for %s key> "
% (k,)).strip()
if result[k]: break
return result
def UpdateAndReadFile(self, current):
if not self.editor or not self.pwfile:
return self.PromptResult(current)
f = open(self.pwfile, "w")
os.chmod(self.pwfile, 0600)
f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
f.write("# (Additional spaces are harmless.)\n\n")
first_line = None
sorted = [(not v, k, v) for (k, v) in current.iteritems()]
sorted.sort()
for i, (_, k, v) in enumerate(sorted):
f.write("[[[ %s ]]] %s\n" % (v, k))
if not v and first_line is None:
# position cursor on first line with no password.
first_line = i + 4
f.close()
p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
_, _ = p.communicate()
return self.ReadFile()
def ReadFile(self):
result = {}
if self.pwfile is None: return result
try:
f = open(self.pwfile, "r")
for line in f:
line = line.strip()
if not line or line[0] == '#': continue
m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
if not m:
print "failed to parse password file: ", line
else:
result[m.group(2)] = m.group(1)
f.close()
except IOError, e:
if e.errno != errno.ENOENT:
print "error reading password file: ", str(e)
return result
def ZipWriteStr(zip, filename, data, perms=0644):
# use a fixed timestamp so the output is repeatable.
zinfo = zipfile.ZipInfo(filename=filename,
date_time=(2009, 1, 1, 0, 0, 0))
zinfo.compress_type = zip.compression
zinfo.external_attr = perms << 16
zip.writestr(zinfo, data)
class DeviceSpecificParams(object):
module = None
def __init__(self, **kwargs):
"""Keyword arguments to the constructor become attributes of this
object, which is passed to all functions in the device-specific
module."""
for k, v in kwargs.iteritems():
setattr(self, k, v)
self.extras = OPTIONS.extras
if self.module is None:
path = OPTIONS.device_specific
if not path: return
try:
if os.path.isdir(path):
info = imp.find_module("releasetools", [path])
else:
d, f = os.path.split(path)
b, x = os.path.splitext(f)
if x == ".py":
f = b
info = imp.find_module(f, [d])
self.module = imp.load_module("device_specific", *info)
except ImportError:
print "unable to load device-specific module; assuming none"
def _DoCall(self, function_name, *args, **kwargs):
"""Call the named function in the device-specific module, passing
the given args and kwargs. The first argument to the call will be
the DeviceSpecific object itself. If there is no module, or the
module does not define the function, return the value of the
'default' kwarg (which itself defaults to None)."""
if self.module is None or not hasattr(self.module, function_name):
return kwargs.get("default", None)
return getattr(self.module, function_name)(*((self,) + args), **kwargs)
def FullOTA_Assertions(self):
"""Called after emitting the block of assertions at the top of a
full OTA package. Implementations can add whatever additional
assertions they like."""
return self._DoCall("FullOTA_Assertions")
def FullOTA_InstallEnd(self):
"""Called at the end of full OTA installation; typically this is
used to install the image for the device's baseband processor."""
return self._DoCall("FullOTA_InstallEnd")
def IncrementalOTA_Assertions(self):
"""Called after emitting the block of assertions at the top of an
incremental OTA package. Implementations can add whatever
additional assertions they like."""
return self._DoCall("IncrementalOTA_Assertions")
def IncrementalOTA_VerifyEnd(self):
"""Called at the end of the verification phase of incremental OTA
installation; additional checks can be placed here to abort the
script before any changes are made."""
return self._DoCall("IncrementalOTA_VerifyEnd")
def IncrementalOTA_InstallEnd(self):
"""Called at the end of incremental OTA installation; typically
this is used to install the image for the device's baseband
processor."""
return self._DoCall("IncrementalOTA_InstallEnd")
def WriteRawImage(self, *args):
return self._DoCall("WriteRawImage")
class File(object):
def __init__(self, name, data):
self.name = name
self.data = data
self.size = len(data)
self.sha1 = sha1(data).hexdigest()
@classmethod
def FromLocalFile(cls, name, diskname):
f = open(diskname, "rb")
data = f.read()
f.close()
return File(name, data)
def WriteToTemp(self):
t = tempfile.NamedTemporaryFile()
t.write(self.data)
t.flush()
return t
def AddToZip(self, z):
ZipWriteStr(z, self.name, self.data)
DIFF_PROGRAM_BY_EXT = {
".gz" : "imgdiff",
".zip" : ["imgdiff", "-z"],
".jar" : ["imgdiff", "-z"],
".apk" : ["imgdiff", "-z"],
".img" : "imgdiff",
}
class Difference(object):
def __init__(self, tf, sf):
self.tf = tf
self.sf = sf
self.patch = None
def ComputePatch(self):
"""Compute the patch (as a string of data) needed to turn sf into
tf. Returns the same tuple as GetPatch()."""
tf = self.tf
sf = self.sf
ext = os.path.splitext(tf.name)[1]
diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
ttemp = tf.WriteToTemp()
stemp = sf.WriteToTemp()
ext = os.path.splitext(tf.name)[1]
try:
ptemp = tempfile.NamedTemporaryFile()
if isinstance(diff_program, list):
diff_program[0] = os.path.join(OPTIONS.search_path, "releasetools", diff_program[0])
cmd = copy.copy(diff_program)
else:
diff_program = os.path.join(OPTIONS.search_path, "releasetools", diff_program)
cmd = [diff_program]
cmd.append(stemp.name)
cmd.append(ttemp.name)
cmd.append(ptemp.name)
p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, err = p.communicate()
if err or p.returncode != 0:
print "WARNING: failure running %s:\n%s\n" % (diff_program, err)
return None
diff = ptemp.read()
finally:
ptemp.close()
stemp.close()
ttemp.close()
self.patch = diff
return self.tf, self.sf, self.patch
def GetPatch(self):
"""Return a tuple (target_file, source_file, patch_data).
patch_data may be None if ComputePatch hasn't been called, or if
computing the patch failed."""
return self.tf, self.sf, self.patch
def ComputeDifferences(diffs):
"""Call ComputePatch on all the Difference objects in 'diffs'."""
print len(diffs), "diffs to compute"
# Do the largest files first, to try and reduce the long-pole effect.
by_size = [(i.tf.size, i) for i in diffs]
by_size.sort(reverse=True)
by_size = [i[1] for i in by_size]
lock = threading.Lock()
diff_iter = iter(by_size) # accessed under lock
def worker():
try:
lock.acquire()
for d in diff_iter:
lock.release()
start = time.time()
d.ComputePatch()
dur = time.time() - start
lock.acquire()
tf, sf, patch = d.GetPatch()
if sf.name == tf.name:
name = tf.name
else:
name = "%s (%s)" % (tf.name, sf.name)
if patch is None:
print "patching failed! %s" % (name,)
else:
print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
lock.release()
except Exception, e:
print e
raise
# start worker threads; wait for them all to finish.
threads = [threading.Thread(target=worker)
for i in range(OPTIONS.worker_threads)]
for th in threads:
th.start()
while threads:
threads.pop().join()
# map recovery.fstab's fs_types to mount/format "partition types"
PARTITION_TYPES = { "yaffs2": "MTD", "mtd": "MTD", "ext3": "EMMC",
"ext4": "EMMC", "emmc": "EMMC", "vfat": "EMMC",
"auto": "EMMC"}
def GetTypeAndDevice(mount_point, info):
fstab = info["fstab"]
if fstab:
return PARTITION_TYPES[fstab[mount_point].fs_type], fstab[mount_point].device
else:
return None
| [
"xiangxin19960319@gmail.com"
] | xiangxin19960319@gmail.com |
628e37506fd7dd536c95a26577af0d10853652c7 | 3ea3e6a6b76cf6fa5438559fc26b5bf48143ca14 | /src/bayes/kuwoData.py | ec779ef49048d42cf9adb1c2e7e3759dbed7b76d | [] | no_license | Lhfcws/ai_hw | 2e0e51e883e0c0e7c715efe26f4b8a628725a14c | 326d4153285ae52bd5128ee218bc159c6aaf69c0 | refs/heads/master | 2021-01-25T05:57:53.536876 | 2013-06-05T03:48:12 | 2013-06-05T03:48:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | #encoding=utf8
import kuworank
def main():
f = open("names.txt", 'r')
lines = f.readlines()
for line in lines:
kuworank.main(line.strip())
| [
"lhfcws@163.com"
] | lhfcws@163.com |
dd79dec37c06033bdff6d7411c8f6c3d09d8f37d | ffef4697f09fb321a04f2b3aad98b688f4669fb5 | /tests/mindspore_test_framework/utils/block_util.py | 9d75ae0888ac00f966c55f42d14cc03bdb2d3a8c | [
"Apache-2.0",
"AGPL-3.0-only",
"BSD-3-Clause-Open-MPI",
"MPL-1.1",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"MPL-2.0",
"LGPL-2.1-only",
"GPL-2.0-only",
"Libpng",
"BSL-1.0",
"MIT",
"MPL-2.0-no-copyleft-exception",
"IJG",
"Z... | permissive | Ewenwan/mindspore | 02a0f1fd660fa5fec819024f6feffe300af38c9c | 4575fc3ae8e967252d679542719b66e49eaee42b | refs/heads/master | 2021-05-19T03:38:27.923178 | 2020-03-31T05:49:10 | 2020-03-31T05:49:10 | 251,512,047 | 1 | 0 | Apache-2.0 | 2020-03-31T05:48:21 | 2020-03-31T05:48:20 | null | UTF-8 | Python | false | false | 13,610 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utils for Cell related computation."""
# pylint: disable=missing-docstring
import numpy as np
from mindspore.common.api import _executor, ms_function
from mindspore.common.tensor import Tensor
from mindspore import nn, context
from mindspore.ops.composite import GradOperation
from mindspore.ops import functional as F
from mindspore.ops import operations as P
from mindspore import ParameterTuple
from . import keyword
def get_uniform_with_shape(shape):
np.random.seed(1)
return np.random.uniform(-0.1, 0.1, size=shape).astype(np.float32)
def set_block_param_with_rand(net, rand_func=None):
if not isinstance(net, nn.Cell) or rand_func is None:
return
for param in net.trainable_params():
param.default_input = Tensor(rand_func(param.default_input.asnumpy().shape))
def compile_block(net, *inputs, rand_func=None, training=True):
set_block_training(net, training)
set_block_param_with_rand(net, rand_func)
return _executor.compile(net, *inputs)
def run_block(net, *inputs, rand_func=None, training=True):
set_block_training(net, training)
set_block_param_with_rand(net, rand_func)
if context.get_context("mode") == context.PYNATIVE_MODE:
def func_pynative(*inputs):
@ms_function
def _func_pynative(*inputs):
return net(*inputs)
return _func_pynative(*inputs)
return func_pynative(*inputs)
return net(*inputs)
class IthOutputCell(nn.Cell):
def __init__(self, network, output_index):
if isinstance(network, nn.Cell):
super(IthOutputCell, self).__init__(auto_prefix=False)
else:
super(IthOutputCell, self).__init__()
self.network = network
self.output_index = output_index
def construct(self, *inputs):
raise NotImplementedError
def construct1(self, x1):
predict = self.network(x1)[self.output_index]
return predict
def construct2(self, x1, x2):
predict = self.network(x1, x2)[self.output_index]
return predict
def construct3(self, x1, x2, x3):
predict = self.network(x1, x2, x3)[self.output_index]
return predict
def construct4(self, x1, x2, x3, x4):
predict = self.network(x1, x2, x3, x4)[self.output_index]
return predict
def construct5(self, x1, x2, x3, x4, x5):
predict = self.network(x1, x2, x3, x4, x5)[self.output_index]
return predict
def get_output_cell(network, num_input, output_index, training=True):
net = IthOutputCell(network, output_index)
f = getattr(net, 'construct%d' % num_input)
setattr(net, "construct", f)
set_block_training(net, training)
return net
class OutputReduceSumCell(nn.Cell):
def __init__(self, network, output_num):
super(OutputReduceSumCell, self).__init__()
self.output_num = output_num
self.network = network
self.reduce_sum = P.ReduceSum()
def construct(self, *inputs):
if self.output_num == 1:
return self.reduce_sum(self.network(*inputs), None)
ret = F.make_tuple()
for index in range(self.output_num):
predict = self.network(*inputs)[index]
predict_reduce = self.reduce_sum(predict, None)
ret = ret + F.make_tuple(predict_reduce)
return ret
def get_output_reduce_cell(network, output_num, training=True):
net = OutputReduceSumCell(network, output_num)
set_block_training(net, training)
return net
class InputOpNet(nn.Cell):
def __init__(self, op, c1=None, c2=None, c3=None, c4=None):
super(InputOpNet, self).__init__()
self.op = op
self.c1 = c1
self.c2 = c2
self.c3 = c3
self.c4 = c4
def construct(self, *inputs):
raise NotImplementedError
def construct0_c0_fake(self, data):
x = self.op() + data
return x
def construct0_c1_fake(self, data):
x = self.op(self.c1) + data
return x
def construct0_c2_fake(self, data):
x = self.op(self.c1, self.c2) + data
return x
def construct0_c3_fake(self, data):
x = self.op(self.c1, self.c2, self.c3) + data
return x
def construct0_c0(self):
x = self.op()
return x
def construct0_c1(self):
x = self.op(self.c1)
return x
def construct0_c2(self):
x = self.op(self.c1, self.c2)
return x
def construct1_c0(self, x1):
x = self.op(x1)
return x
def construct1_c1(self, x1):
x = self.op(x1, self.c1)
return x
def construct1_c2(self, x1):
x = self.op(x1, self.c1, self.c2)
return x
def construct1_c3(self, x1):
x = self.op(x1, self.c1, self.c2, self.c3)
return x
def construct1_c4(self, x1):
x = self.op(x1, self.c1, self.c2, self.c3, self.c4)
return x
def constructc1_1(self, x1):
x = self.op(self.c1, x1)
return x
def construct2_c0(self, x1, x2):
x = self.op(x1, x2)
return x
def construct2_c1(self, x1, x2):
x = self.op(x1, x2, self.c1)
return x
def construct2_c3(self, x1, x2):
x = self.op(x1, x2, self.c1, self.c2, self.c3)
return x
def construct3_c0(self, x1, x2, x3):
x = self.op(x1, x2, x3)
return x
def construct3_c1(self, x1, x2, x3):
x = self.op(x1, x2, x3, self.c1)
return x
def construct4_c0(self, x1, x2, x3, x4):
x = self.op(x1, x2, x3, x4)
return x
def construct4_c1(self, x1, x2, x3, x4):
x = self.op(x1, x2, x3, x4, self.c1)
return x
def construct4_c4(self, x1, x2, x3, x4):
x = self.op(x1, x2, x3, x4, self.c1, self.c2, self.c3, self.c4)
return x
def construct5_c0(self, x1, x2, x3, x4, x5):
x = self.op(x1, x2, x3, x4, x5)
return x
def construct6_c0(self, x1, x2, x3, x4, x5, x6):
x = self.op(x1, x2, x3, x4, x5, x6)
return x
def construct5_c1(self, x1, x2, x3, x4, x5):
x = self.op(x1, x2, x3, x4, x5, self.c1)
return x
def gen_net(op, input_num, training=True, desc_const=(), const_first=False, add_fake_input=False):
if isinstance(op, nn.Cell):
return op
net = InputOpNet(op, *desc_const)
if const_first:
fn_name = 'constructc%d_%d' % (len(desc_const), input_num)
else:
fn_name = 'construct%d_c%d' % (input_num, len(desc_const))
if add_fake_input:
fn_name += '_fake'
f = getattr(net, fn_name)
setattr(net, "construct", f)
set_block_training(net, training)
return net
class OperationBackward(nn.Cell):
def __init__(self, network, grad_op, sens):
if isinstance(network, nn.Cell):
super(OperationBackward, self).__init__(auto_prefix=False)
else:
super(OperationBackward, self).__init__()
self.network = network
self.grad = grad_op
self.sens = sens
def construct(self, *inputs):
return self.grad(self.network)(*inputs, self.sens)
class OperationBackwardWithNoSens(nn.Cell):
def __init__(self, network, grad_op):
if isinstance(network, nn.Cell):
super(OperationBackwardWithNoSens, self).__init__(auto_prefix=False)
else:
super(OperationBackwardWithNoSens, self).__init__()
self.network = network
self.grad = grad_op
def construct(self, *inputs):
return self.grad(self.network)(*inputs)
class NNBackward(nn.Cell):
def __init__(self, network, grad_op, sens):
if isinstance(network, nn.Cell):
super(NNBackward, self).__init__(auto_prefix=False)
else:
super(NNBackward, self).__init__()
self.network = network
self.grad = grad_op
self.params = ParameterTuple(network.trainable_params())
self.sens = sens
def construct(self, *inputs):
return self.grad(self.network, self.params)(*inputs, self.sens)
class NNBackwardWithNoSens(nn.Cell):
def __init__(self, network, grad_op):
if isinstance(network, nn.Cell):
super(NNBackwardWithNoSens, self).__init__(auto_prefix=False)
else:
super(NNBackwardWithNoSens, self).__init__()
self.network = network
self.grad = grad_op
self.params = ParameterTuple(network.trainable_params())
def construct(self, *inputs):
return self.grad(self.network, self.params)(*inputs)
def gen_grad_net(net, grad_op, input_num, sens=None, training=True, desc_const=(),
const_first=False, add_fake_input=False):
if not isinstance(net, nn.Cell):
net = gen_net(net, input_num, desc_const=desc_const, const_first=const_first, add_fake_input=add_fake_input)
if grad_op.get_by_list:
if grad_op.sens_param:
net = NNBackward(net, grad_op, sens)
else:
net = NNBackwardWithNoSens(net, grad_op)
else:
if grad_op.sens_param:
net = OperationBackward(net, grad_op, sens)
else:
net = OperationBackwardWithNoSens(net, grad_op)
set_block_training(net, training)
return net
def set_block_training(net, training=True):
if isinstance(net, nn.Cell):
net.set_train(training)
def set_block_phase(net, phase='train'):
if isinstance(net, nn.Cell):
net.phase = phase
def create_funcs(verification_set, block_generator, block_runner, grad_op=None, default_rand_func=None):
def create_func(block, num_outputs, rand_func, desc_const, const_first, add_fake_input, split_outputs):
def function(*inputs):
# gradient
if grad_op:
if num_outputs == 0:
grad_op_ = GradOperation('grad', get_all=grad_op.get_all,
get_by_list=grad_op.get_by_list, sens_param=False)
b = block_generator(block, grad_op_, len(inputs), desc_const=desc_const,
const_first=const_first, add_fake_input=add_fake_input)
return block_runner(b, *inputs, rand_func=rand_func)
if num_outputs == 1:
b = block_generator(block, grad_op, len(inputs) - 1, inputs[-1], desc_const=desc_const,
const_first=const_first, add_fake_input=add_fake_input)
return block_runner(b, *(inputs[:-1]), rand_func=rand_func)
if split_outputs:
block_inputs = inputs[0:len(inputs) - num_outputs]
sens_inputs = inputs[len(inputs) - num_outputs:]
ret = []
for i in range(num_outputs):
bi_inputs = list(block_inputs)
bi = get_output_cell(block, len(block_inputs), i)
bi = block_generator(bi, grad_op, len(bi_inputs), sens_inputs[i], desc_const=desc_const,
const_first=const_first, add_fake_input=add_fake_input)
grads_i = block_runner(bi, *bi_inputs, rand_func=rand_func)
if isinstance(grads_i, tuple):
ret.extend(grads_i)
else:
ret.append(grads_i)
return ret
block_inputs = inputs[0:len(inputs) - num_outputs]
sens_inputs = tuple(inputs[len(inputs) - num_outputs:])
b = block_generator(block, grad_op, len(block_inputs), sens_inputs, desc_const=desc_const,
const_first=const_first, add_fake_input=add_fake_input)
return block_runner(b, *block_inputs, rand_func=rand_func)
# forward
inputs_num = len(inputs)
if add_fake_input and inputs_num == 1:
# input is faked
inputs_num = 0
b = block_generator(block, inputs_num, desc_const=desc_const, const_first=const_first,
add_fake_input=add_fake_input)
return block_runner(b, *inputs, rand_func=rand_func)
return function
bc_configs = verification_set[keyword.function]
for config in bc_configs:
block = config[keyword.block]
rand_func = config.get(keyword.init_param_with, default_rand_func)
num_outputs = config.get(keyword.num_outputs, 0)
desc_const = config.get(keyword.desc_const, [])
const_first = config.get(keyword.const_first, False)
add_fake_input = config.get(keyword.add_fake_input, False)
split_outputs = config.get(keyword.split_outputs, True)
config[keyword.block] = create_func(block, num_outputs, rand_func, desc_const,
const_first, add_fake_input, split_outputs)
return bc_configs
| [
"leon.wanghui@huawei.com"
] | leon.wanghui@huawei.com |
a88db205f4e1b8864028cbe217a561a695c73966 | c625157aa5f39541dc5dd86977c0eb2299db1780 | /modules/attention.py | f24e47ab7d7c18c863746e232e6a71111954171e | [] | no_license | Shikhar-S/TreeCodeGen | e56eb8ec13e42fbae642f880388dd985e7fc531e | 572aa96fabb98d62335bc43084acafbe4bd93fe5 | refs/heads/main | 2023-08-31T12:52:24.776300 | 2021-09-28T18:14:05 | 2021-09-28T18:14:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,166 | py | import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from fairseq import utils
import os
import functools
from fairseq.models import transformer
from fairseq.modules.multihead_attention import *
from .nstack_merge_tree_attention import *
class MergeStackNodesOnAffinityValueAttention(nn.Module):
"""
Hierarchical embeddings is embedded from outside
Encoder Layer computes the embeddings,
if not share: chunk into multiple embeddings
pass each to each attention layer!
"""
WNSTACK_NORM = ['none', 'mean', 'sqrt_mean']
def __init__(
self, args, embed_dim, num_heads, dropout=0., bias=True,
add_bias_kv=False, add_zero_attn=False, padding_idx=1, **kwargs):
super().__init__()
self.args = args
self.kwargs = kwargs
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.dropout_layer = nn.Dropout(dropout)
self.bias = bias
self.add_bias_kv = add_bias_kv
self.add_zero_attn = add_zero_attn
self.onnx_trace = False
self.padding_idx = padding_idx
self.src_len_norm = getattr(args, 'src_len_norm', 'none')
self.cum_node = getattr(args, 'cum_node', 'sum')
self.nstack_on = getattr(args, 'nstack_on', 'value')
self.nstack_pos_embed = getattr(args, 'nstack_pos_embed', False)
self.nstack_pos_embed_learned = getattr(args, 'nstack_pos_embed_learned', False)
self.nstack_linear = getattr(args, 'nstack_linear', False)
self.wnstack_include_leaves = getattr(args, 'wnstack_include_leaves', True)
self.wnstack_norm = kwargs.get('wnstack_norm', getattr(args, 'wnstack_norm', 'none'))
self.wnstack_up_norm = kwargs.get('wnstack_up_norm', getattr(args, 'wnstack_up_norm', 'none'))
self.nstack_mask_fname = kwargs.get('nstack_mask_fn', getattr(args, 'nstack_mask_fn', 'default'))
self.mutual_level = getattr(args, 'mutual_ancestor_level', 5)
self.nstack_mask_func = MergeWeightMask.acquire_masking_fn(self.nstack_mask_fname, self.mutual_level)
self.nstack_hier_embed = getattr(args, 'nstack_hier_embed', False)
self.nstack_hier_embed_max_horiz = getattr(args, 'nstack_hier_embed_max_horiz', 100)
self.nstack_hier_embed_max_ver = getattr(args, 'nstack_hier_embed_max_ver', 1024)
self.nstack_hier_embed_share = getattr(args, 'nstack_hier_embed_share', False)
# max_horiz=100, max_ver=1024
print(f'Acquire Mask function[{self.nstack_mask_fname}]: {self.nstack_mask_func}')
assert self.wnstack_norm in self.WNSTACK_NORM
assert self.nstack_on in ['value', 'key'], f'{self.nstack_on}'
# if self.nstack_hier_embed:
self._build_layer()
@property
def value_tofloat_mle(self):
return self.nstack_mask_fname == MergeWeightMask.LEAVES_SUBTREE and value_tofloat_mle
def extra_repr(self):
return 'sln={},on={},posemb={},posemb_l={},hier_emb={},cumnode={},linear={},wfname={},upnorm={},hier_share={},dwstack_proj_act={},tofloatmle'.format(
self.src_len_norm, self.nstack_on, self.nstack_pos_embed, self.nstack_pos_embed_learned,
self.nstack_hier_embed, self.cum_node,
self.nstack_linear, self.nstack_mask_fname,
self.wnstack_up_norm, self.nstack_hier_embed_share, self.dwstack_proj_act, self.value_tofloat_mle
)
def _build_layer(self):
self.src_len_norm = getattr(self.args, 'src_len_norm', 'sqrt')
self.dwstack_proj_act = getattr(self.args, 'dwstack_proj_act', 'none')
self.head_dim = self.embed_dim // self.num_heads
assert self.head_dim * self.num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * self.embed_dim, self.embed_dim))
if self.bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * self.embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=self.bias)
if self.add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, self.embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, self.embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = self.add_zero_attn
self.nstack_linear_layer = NstackLinear(self.head_dim, self.head_dim, False) if self.nstack_linear else None
self.dwstack_linear = transformer.Linear(self.embed_dim, self.num_heads)
self.project_dwstack_key = lambda x: self.dwstack_linear(x)
if self.dwstack_proj_act == 'sigmoid':
self.project_dwstack_key = lambda x: self.dwstack_linear(x).sigmoid()
elif self.dwstack_proj_act == 'tanh':
self.project_dwstack_key = lambda x: self.dwstack_linear(x).tanh()
assert not self.nstack_pos_embed, f'not now'
self.embed_positions = PositionalEmbedding(
self.args.max_source_positions, self.head_dim, self.padding_idx,
left_pad=False,
learned=self.nstack_pos_embed_learned,
) if self.nstack_pos_embed else None
self.reset_parameters()
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query):
return self._in_proj(query, end=self.embed_dim)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def reorder_incremental_state(self, incremental_state, new_order):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer[k] = input_buffer[k].index_select(0, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(
self,
incremental_state,
'attn_state',
) or {}
def _set_input_buffer(self, incremental_state, buffer):
utils.set_incremental_state(
self,
incremental_state,
'attn_state',
buffer,
)
def prepare_dptree_qkv(
self, query, key, value, node_key, node_value, key_padding_mask=None, node_padding_mask=None,
incremental_state=None, need_weights=True, static_kv=False,
compute_query_nodes=True, compute_key_nodes=True, force_self_att=False):
qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()
kv_same = key.data_ptr() == value.data_ptr()
node_kv_same = node_key.data_ptr() == node_value.data_ptr()
tgt_len, query_bsz, embed_dim = query.size()
leave_len, key_bsz, embed_dim_ = key.size()
node_len, key_bsz, embed_dim_ = node_key.size()
n = leave_len
m = node_len
t = tgt_len
bsz = b = key_bsz
assert embed_dim == self.embed_dim
assert key.size() == value.size()
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_key' in saved_state:
if static_kv:
assert kv_same and not qkv_same
key = value = node_key = node_value = None
else:
saved_state = None
assert compute_query_nodes
assert compute_key_nodes
if qkv_same or force_self_att:
# self-attention
q = self.in_proj_q(query)
assert node_kv_same
k, v = self.in_proj_kv(key)
node_k, node_v = self.in_proj_kv(node_key)
elif kv_same:
# encoder-decoder attention
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = node_k = node_v = None
else:
k, v = self.in_proj_kv(key)
assert node_kv_same
node_k, node_v = self.in_proj_kv(node_key)
else:
raise NotImplementedError
q *= self.scaling
assert self.bias_v is None
assert self.bias_k is None
assert not self.add_zero_attn
q = q.contiguous().view(t, query_bsz * self.num_heads, self.head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, key_bsz * self.num_heads, self.head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, key_bsz * self.num_heads, self.head_dim).transpose(0, 1)
if node_k is not None:
node_k = node_k.contiguous().view(-1, key_bsz * self.num_heads, self.head_dim).transpose(0, 1)
if node_v is not None:
node_v = node_v.contiguous().view(-1, key_bsz * self.num_heads, self.head_dim).transpose(0, 1)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
assert static_kv, f'static_kv={static_kv}, only cross-attention impl here'
def maybe_concat_state_kv(name, x):
if name in saved_state:
prev = saved_state[name]
prev = prev.view(b * self.num_heads, -1, self.head_dim)
# o = prev if static_kv else torch.cat((prev, x), dim=1)
o = prev
else:
o = x
saved_state[name] = o.view(b, self.num_heads, -1, self.head_dim)
return o
k = maybe_concat_state_kv('prev_key', k)
v = maybe_concat_state_kv('prev_value', v)
node_k = maybe_concat_state_kv('prev_node_key', node_k)
node_v = maybe_concat_state_kv('prev_node_value', node_v)
self._set_input_buffer(incremental_state, saved_state)
src_len = k.size(1)
node_src_len = node_k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(-1) == src_len
if node_padding_mask is not None:
assert node_padding_mask.size(-1) == node_src_len
return (q, k, v, node_k, node_v, key_padding_mask, node_padding_mask, saved_state, src_len, node_src_len, tgt_len, query_bsz)
@classmethod
def get_ntree_mask(cls, n, spans, nheads):
# spans: [b, m, 2]
with torch.no_grad():
b, m, _ = spans.size()
try:
rg = torch.arange(0, n, device=spans.device, dtype=spans.dtype).view(1, n, 1, 1)
except:
print("the stats are n is ",n)
spans = spans.unsqueeze(1)
"""important the encodr decoder comes here"""
mask = (rg >= spans[:, :, :, :1]) ^ (rg > spans[:, :, :, 1:])
mask = mask.view(b, 1, n, m, 1).contiguous().expand(b, nheads, n, m, 1)
mask = mask.contiguous().view(b * nheads, n, m, 1)
return mask
def accumulate_upward(self, leaves, nodes, mask, **kwargs):
# leaves: [b, n, 1, c]
# nodes: [b, 1, m, c]
# mask: [b, n, m, 1]
float_mask = mask.type_as(nodes)
node_stack = nodes * float_mask
if self.wnstack_include_leaves:
stack = torch.cat([leaves, node_stack], 2)
upward_cum = torch.cumsum(stack, dim=2)
node_states = upward_cum[:, :, 1:]
else:
stack = node_stack
upward_cum = torch.cumsum(stack, dim=2)
node_states = upward_cum
if self.wnstack_up_norm == 'mean':
node_states /= torch.cumsum(float_mask, dim=2).clamp_(1.0, 1e4)
elif self.wnstack_up_norm == 'sqrt_mean':
node_states /= torch.cumsum(float_mask, dim=2).sqrt_().clamp_(1.0, 1e4)
node_states *= float_mask
return node_states
def accumulate_rightward_backup(self, node_states, mask, right_weight, **kwargs):
# node_states: [b, n, m, c]
# mask: [b, n, m, 1]
# node_states_t: [b, c, m, n]
# right_weight: [b, 1, n, 1]
# rv_node_out: [b, m, c]
node_states_t = node_states.transpose(1, 3)
assert not torch.isnan(right_weight).any(), f'right_weight::right_weight problem: {right_weight}'
rv_node_out = torch.matmul(node_states_t, right_weight).squeeze_(-1).transpose(1, 2)
rv_node_out = rv_node_out.clamp_(-1e4, 1e4)
assert not torch.isnan(rv_node_out).any(), f'rv_node_out::after matmul problem: {rv_node_out}'
if self.wnstack_norm == 'mean':
mask_length = mask.type_as(node_states).sum(dim=1).clamp_(1.0, 1e4)
rv_node_out /= mask_length
elif self.wnstack_norm == 'sqrt_mean':
mask_length = mask.type_as(node_states).sum(dim=1).clamp_(1.0, 1e4).sqrt_()
rv_node_out /= mask_length
return rv_node_out
def accumulate_rightward(self, node_states, mask, right_weight, tofloat=False, **kwargs):
# node_states: [b, n, m, c]
# mask: [b, n, m, 1]
# node_states_t: [b, c, m, n]
# right_weight: [b, 1, n, 1]
# rv_node_out: [b, m, c]
if self.wnstack_norm == 'mean':
mask_length = mask.type_as(node_states).sum(dim=1, keepdim=True).clamp_(1.0, 1e4)
node_states /= mask_length
elif self.wnstack_norm == 'sqrt_mean':
mask_length = mask.type_as(node_states).sum(dim=1, keepdim=True).clamp_(1.0, 1e4).sqrt_()
node_states /= mask_length
node_states_t = node_states.transpose(1, 3)
# assert not torch.isnan(right_weight).any(), f'right_weight::right_weight problem: {right_weight}'
if tofloat:
rv_node_out = torch.matmul(node_states_t.float(), right_weight.float()).type_as(node_states_t)
else:
rv_node_out = torch.matmul(node_states_t, right_weight)
# rv_node_out = torch.matmul(node_states_t, right_weight).squeeze_(-1).transpoAse(1, 2)
# rv_node_out = rv_node_out.clamp_(-1e4, 1e4)
rv_node_out = rv_node_out.squeeze_(-1).transpose(1, 2)
# if torch.isnan(rv_node_out).any():
# print(f'Nan Occur!!!')
# rv_node_out_float = torch.matmul(node_states_t.float(), right_weight.float()).squeeze_(-1).transpose(1, 2)
# where_nan = rv_node_out_float[torch.isnan(rv_node_out)]
# print(where_nan)
# raise AssertionError('Nan in rv_node_out')
# assert not torch.isnan(rv_node_out).any(), f'rv_node_out::after matmul problem: {rv_node_out}, {node_states_t.max()}, {node_states_t.min()}'
assert not torch.isnan(rv_node_out).any(), f'rv_node_out::after matmul problem: NaN [{tofloat}][type={rv_node_out.dtype}], consider export value_tofloat=1 '
# if self.wnstack_norm == 'mean':
# mask_length = mask.type_as(node_states).sum(dim=1).clamp_(1.0, 1e4)
# rv_node_out /= mask_length
# elif self.wnstack_norm == 'sqrt_mean':
# mask_length = mask.type_as(node_states).sum(dim=1).clamp_(1.0, 1e4).sqrt_()
# rv_node_out /= mask_length
return rv_node_out
def _compute_nstree_states(self, leaves, rv_nodes, right_weight, mask, hier_embed=None, **kwargs):
"""alright folks this is the fucking main thing, alright! HIERARCHICAL ACCUMULATION"""
leaves = leaves.unsqueeze(2)
rv_nodes = rv_nodes.unsqueeze(1)
# leaves: [bh, n, c]
# rv_nodes: [bh, m, c]
# right_weight: [bh, 1, n, 1]
# mask: [bh, n, m, 1]
# hier_embed: [bh, n, m, c]
# leaves: [bh, n, 1, c]
# rv_nodes: [bh, 1, m, c]
nodes = rv_nodes
"""HIERARCHICAL EMBEDDINGS"""
if hier_embed is not None:
nodes = rv_nodes + hier_embed
"""UPWARD CA"""
node_states = self.accumulate_upward(leaves, nodes, mask, **kwargs)
# assert not torch.isnan(node_states).any(), f'node_states::upward problem: {node_states.sum(-1)}'
"""WEIGHTED AGGREGATION"""
rv_node_out = self.accumulate_rightward(
node_states, mask, right_weight, tofloat=value_tofloat or self.value_tofloat_mle)
return rv_node_out
def compute_att_weights_values(
self, q, k, v, node_k, node_v, ntree_mask, right_weight, hier_embed=None, force_self_att=False, **kwargs):
# q [bh, t, d]
# k: [bh, n, d]
# v: [bh, n, d]
# node_k: [bh, m, d]
# node_v: [bh, m, d]
# ntree_mask: [bh, n, m, 1]
attn_le = torch.bmm(q, k.transpose(1, 2))
attn_no = torch.bmm(q, node_k.transpose(1, 2))
# attn_le: [bh, t, n]
# attn_no: [bh, t, m]
# attn_le_t: [bh, n, t]
# attn_no_t: [bh, m, t]
# nstree_attn_wgts: [bh, t, n+m]
if force_self_att and skip_aff_ln and self.nstack_mask_fname == MergeWeightMask.LEAVES_SUBTREE:
assert q.size(1) == k.size(1) + node_k.size(1), f'{q.size(1)} != {k.size(1) + node_k.size(1)}'
n = k.size(1)
no_attn_le = attn_le[:, n:]
no_attn_no = attn_no[:, n:]
le_attn_le = attn_le[:, :n]
le_attn_no = attn_no[:, :n]
# no_attn_le: [bh, m, n]
# no_attn_no: [bh, m, m]
# le_attn_le: [bh, n, n]
# le_attn_no: [bh, m, m]
# no_attn_le_t: [bh, n, m]
# no_attn_no_t: [bh, m, m]
no_attn_le_t = no_attn_le.transpose(1, 2)
no_attn_no_t = no_attn_no.transpose(1, 2)
no_nstree_attn_no = self._compute_nstree_states(no_attn_le_t, no_attn_no_t, right_weight, ntree_mask, None)
no_nstree_attn_no = no_nstree_attn_no.transpose(1, 2)
# no_nstree_attn_no: [bh, m, m]
no_nstree_attn_wgts = torch.cat((no_attn_le, no_nstree_attn_no), 2)
le_attn_wgts = torch.cat((le_attn_le, le_attn_no), 2)
nstree_attn_wgts = torch.cat((le_attn_wgts, no_nstree_attn_wgts), 1)
# no_nstree_attn_wgts: [bh, n + m, n + m]
else:
attn_le_t = attn_le.transpose(1, 2)
attn_no_t = attn_no.transpose(1, 2)
nstree_attn_no = self._compute_nstree_states(attn_le_t, attn_no_t, right_weight, ntree_mask, None)
nstree_attn_wgts = torch.cat((attn_le_t, nstree_attn_no), 1).transpose(1, 2)
# torch.set_printoptions(profile="full")
# assert not torch.isnan(node_v).any(), f'node_v::before nstack'
# assert not torch.isnan(v).any(), f'v::before nstack'
node_v = self._compute_nstree_states(v, node_v, right_weight, ntree_mask, hier_embed)
# FIXME: values explode !
assert not torch.isnan(node_v).any(), f'node_v::after nstack: {node_v.sum(-1)}'
# torch.set_printoptions(profile="default")
values = torch.cat([v, node_v], 1)
return nstree_attn_wgts, values
def compute_nstack_att(
self, q, k, v, node_k, node_v, ntree_mask, right_weight, hier_embed, pad_mask, need_weights, force_self_att=False, **kwargs):
# q [bh, t, d]
# k: [bh, n, d]
# v: [bh, n, d]
# node_k: [bh, m, d]
# node_v: [bh, m, d]
# ntree_mask: [bh, n, m, 1]
# pad_mask: [b, 1, n + m, n + m]
bh, t, d = q.size()
bh_, n, _ = k.size()
bh__, m, _ = node_k.size()
b = bh // self.num_heads
attn_weights, values = self.compute_att_weights_values(
q, k, v, node_k, node_v, ntree_mask, right_weight, hier_embed, force_self_att=force_self_att, **kwargs
)
# assert not torch.isnan(attn_weights).any(), f'weights::after-beforemaksing'
attn_weights = self.nstack_mask_func(self, attn_weights, pad_mask, **kwargs)
# assert not torch.isnan(attn_weights).any(), f'weights::after-masking'
attn_weights = utils.softmax(attn_weights, dim=-1).type_as(attn_weights)
# assert not torch.isnan(attn_weights).any(), f'weights::after-softmax'
if attn_weights.dtype == torch.float16:
attn_weights[torch.isnan(attn_weights)] = 0.0
else:
attn_weights = torch.where(torch.isnan(attn_weights), torch.zeros_like(attn_weights), attn_weights)
assert not torch.isnan(attn_weights).any(), f'weights::after-zeroing'
assert not torch.isnan(values).any(), f'values::nan'
attn_weights = self.dropout_layer(attn_weights)
"""this is the final equation 18"""
attn = torch.bmm(attn_weights, values)
attn = attn.transpose(0, 1).contiguous().view(t, b, self.embed_dim)
assert not torch.isnan(attn).any(), f'before outprof'
assert not torch.isinf(attn).any(), f'before outprof'
attn = self.out_proj(attn)
assert not torch.isnan(attn).any(), f'after outprof'
assert not torch.isinf(attn).any(), f'after outprof'
if need_weights:
attn_weights = attn_weights.view(b, self.num_heads, t, n + m)
attn_weights = attn_weights.sum(dim=1) / self.num_heads
else:
attn_weights = None
return attn, attn_weights
def forward(
self, query, key, value, node_key, node_value, ntree_mask, hier_embed=None, pad_mask=None,
key_pad=None, node_pad=None, incremental_state=None,
need_weights=True, static_kv=False, attn_mask=None, force_self_att=False
):
"""t=m+n"""
# query: [t, b, c]
# key: [n, b, c]
# value: [n, b, c]
# node_key: [m, b, c]
# node_value: [m, b, c]
# ntree_mask: [bh, n, m, 1]
# hier_embed: [bh, n, m, d] c=d*h
# pad_mask: [b, 1, n + m, n + m]
t, b, c = query.size()
n, bk, c_ = key.size()
m, b__, c__ = node_key.size()
h = self.num_heads
if key_pad is None:
assert node_pad is None
assert attn_mask is None
(q, k, v, node_k, node_v, key_padding_mask, node_padding_mask,
saved_state, src_len, node_src_len, tgt_len, query_bsz) = self.prepare_dptree_qkv(
query, key, value, node_key, node_value, key_pad, node_pad, incremental_state,
need_weights, static_kv, True, True, force_self_att
)
right_weight = self.project_dwstack_key(key)
# right_weight: [n, b, h]
right_weight = right_weight.contiguous().view(n, 1, bk * h, 1).transpose(0, 2)
# right_weight: [bh, 1, n, 1]
(attn, attn_weights) = self.compute_nstack_att(
q, k, v, node_k, node_v, ntree_mask, right_weight, hier_embed, pad_mask, need_weights,
force_self_att=force_self_att,
)
return attn, attn_weights
"""this is the main attention mechanism class"""
class MergeStackNodesOnValueAttention(MergeStackNodesOnAffinityValueAttention):
def compute_att_weights_values(self, q, k, v, node_k, node_v, ntree_mask, right_weight, hier_embed=None, **kwargs):
# q [bh, t, d]
# k: [bh, n, d]
# v: [bh, n, d]
# node_k: [bh, m, d]
# node_v: [bh, m, d]
# ntree_mask: [bh, n, m, 1]
attn_le = torch.bmm(q, k.transpose(1, 2))
attn_no = torch.bmm(q, node_k.transpose(1, 2))
# attn_le: [bh, t, n]
# attn_no: [bh, t, m]
# attn_le_t: [bh, n, t]
# attn_no_t: [bh, m, t]
# nstree_attn_wgts: [bh, t, n+m]
# attn_le_t = attn_le.transpose(1, 2)
# attn_no_t = attn_no.transpose(1, 2)
# nstree_attn_no = self._compute_nstree_states(attn_le_t, attn_no_t, right_weight, ntree_mask, None)
# nstree_attn_wgts = torch.cat([attn_le_t, nstree_attn_no], 1).transpose(1, 2)
# nstree_attn_no = self._compute_nstree_states(attn_le_t, attn_no_t, right_weight, ntree_mask, None)
nstree_attn_wgts = torch.cat([attn_le, attn_no], 2)
# torch.set_printoptions(profile="full")
# assert not torch.isnan(node_v).any(), f'node_v::before nstack'
# assert not torch.isnan(v).any(), f'v::before nstack'
node_v = self._compute_nstree_states(v, node_v, right_weight, ntree_mask, hier_embed)
# FIXME: values explode !
assert not torch.isnan(node_v).any(), f'node_v::after nstack: {node_v.sum(-1)}'
# torch.set_printoptions(profile="default")
values = torch.cat([v, node_v], 1)
return nstree_attn_wgts, values
| [
"your_email@gmail.com"
] | your_email@gmail.com |
087dc9ae865acae60ac24c9dfbd921703d209bdc | 6174de8df820463515c63425700eab7af643bb31 | /src/test_emb.py | eda27ed664cdbaef38b2a7a846cf3cb434713eec | [] | no_license | danielzgsilva/CL-MOT | 1cd9b5f2f06454dd7c35a3e2906ad2883ea83495 | 3b5b812788a34728d7b7484b10ae9434313380fe | refs/heads/master | 2022-12-05T18:45:36.805047 | 2020-08-27T22:01:34 | 2020-08-27T22:01:34 | 272,636,268 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,882 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import argparse
import torch
import json
import time
import os
import cv2
import math
from sklearn import metrics
from scipy import interpolate
import numpy as np
from torchvision.transforms import transforms as T
import torch.nn.functional as F
from models.model import create_model, load_model
from datasets.dataset.jde import JointDataset, collate_fn
from models.utils import _tranpose_and_gather_feat
from utils.utils import xywh2xyxy, ap_per_class, bbox_iou
from opts import opts
from models.decode import mot_decode
from utils.post_process import ctdet_post_process
def test_emb(
opt,
batch_size=16,
img_size=(1088, 608),
print_interval=40,
):
data_cfg = opt.data_cfg
f = open(data_cfg)
data_cfg_dict = json.load(f)
f.close()
nC = 1
test_paths = data_cfg_dict['test_emb']
dataset_root = data_cfg_dict['root']
if opt.gpus[0] >= 0:
opt.device = torch.device('cuda')
else:
opt.device = torch.device('cpu')
print('Creating model...')
model = create_model(opt.arch, opt.heads, opt.head_conv, opt)
model = load_model(model, opt.load_model)
# model = torch.nn.DataParallel(model)
model = model.to(opt.device)
model.eval()
# Get dataloader
transforms = T.Compose([T.ToTensor()])
dataset = JointDataset(opt, dataset_root, test_paths, img_size, augment=False, transforms=transforms)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False,
num_workers=8, drop_last=False)
embedding, id_labels = [], []
print('Extracting pedestrain features...')
for batch_i, batch in enumerate(dataloader):
t = time.time()
output = model(batch['img'].cuda())[-1]
id_head = _tranpose_and_gather_feat(output['id'], batch['ind'].cuda())
id_head = id_head[batch['reg_mask'].cuda() > 0].contiguous()
emb_scale = math.sqrt(2) * math.log(opt.nID - 1)
id_head = emb_scale * F.normalize(id_head)
id_target = batch['ids'].cuda()[batch['reg_mask'].cuda() > 0]
for i in range(0, id_head.shape[0]):
if len(id_head.shape) == 0:
continue
else:
feat, label = id_head[i], id_target[i].long()
if label != -1:
embedding.append(feat)
id_labels.append(label)
if batch_i % print_interval == 0:
print(
'Extracting {}/{}, # of instances {}, time {:.2f} sec.'.format(batch_i, len(dataloader), len(id_labels),
time.time() - t))
print('Computing pairwise similairity...')
if len(embedding) < 1:
return None
embedding = torch.stack(embedding, dim=0).cuda()
id_labels = torch.LongTensor(id_labels)
n = len(id_labels)
print(n, len(embedding))
assert len(embedding) == n
embedding = F.normalize(embedding, dim=1)
pdist = torch.mm(embedding, embedding.t()).cpu().numpy()
gt = id_labels.expand(n, n).eq(id_labels.expand(n, n).t()).numpy()
up_triangle = np.where(np.triu(pdist) - np.eye(n) * pdist != 0)
pdist = pdist[up_triangle]
gt = gt[up_triangle]
far_levels = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]
far, tar, threshold = metrics.roc_curve(gt, pdist)
interp = interpolate.interp1d(far, tar)
tar_at_far = [interp(x) for x in far_levels]
for f, fa in enumerate(far_levels):
print('TPR@FAR={:.7f}: {:.4f}'.format(fa, tar_at_far[f]))
return tar_at_far
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
opt = opts().init()
with torch.no_grad():
map = test_emb(opt, batch_size=4)
| [
"danielzgsilva@knights.ucf.edu"
] | danielzgsilva@knights.ucf.edu |
42f172ddfbeb2d319d3b10f3ea25bfb756ff58bc | 7601a6be6e581053820bc3fffd6864e19b1ef680 | /test/toy/toyAllProducts.py | 28c2b3825019b74bfb425ed150dea5046c1b1fd2 | [] | no_license | LucyBean/Part-III | 0a89a96f355be6b4af44753d74c9569293e772fc | cb753a01c6720269fb636725ce2a97c8d143c183 | refs/heads/master | 2020-08-01T22:38:58.500258 | 2017-02-23T14:32:36 | 2017-02-23T14:32:36 | 73,567,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | '''
Created on Nov 15, 2016
@author: Lucy
'''
import cobra
from src import models, display
startID = "G6P"
cobraModel = cobra.io.load_json_model("toyModel.json")
products = models.findProducts(cobraModel, startID)
title = "Possible products for starting metabolite " + startID
display.displayAll(map_json="toyModelMap.json", toDisplay=products, title=title) | [
"howobscene@msn.com"
] | howobscene@msn.com |
abb9e9b977d438880483eb8a4abc7725dabe5e60 | 96c37b9aa0493a30789dc6363fb1ddb61245dc90 | /Python/Python Fundamental/Python_Basic_Assignments&Exercises/letters_count_1.py | 8e225d48eeb802678b768765905959fabd6767f7 | [] | no_license | ssezerer/Data-Science | 24891ca14581d2b374fbd291ac92d88d6f672468 | 315d2de0b6b6e8b92da2561a4fe8c582cae2476d | refs/heads/master | 2023-01-08T19:28:57.204923 | 2020-11-05T17:55:40 | 2020-11-05T17:55:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | sentence = input("enter a sentence to count the each character separatly : ").strip().lower()
letters = {}
for i in sentence:
count = 0
for j in sentence:
if i == j:
count += 1
letters[i] = count
print(letters) | [
"stbob131219@gmail.com"
] | stbob131219@gmail.com |
905281bdc94851dca0f3c0d7b7818fabc49cc65c | 5168e682acfe5b2501c3919038df6320dd1c4bf4 | /main/cnki/CNKISpider.py | 0fbb28bc5aa89734f1e90e2448208c9d5a382491 | [] | no_license | JoeyLee6/PaperCrawler | 540ce049919af6615fad14fe852a7b52eb2ff80e | 05fcc151f87e70a1d4a692bd2c1aa705aed1431d | refs/heads/master | 2020-04-11T04:31:15.563668 | 2018-10-07T10:45:51 | 2018-10-07T10:45:51 | 161,514,553 | 1 | 0 | null | 2018-12-12T16:22:15 | 2018-12-12T16:22:15 | null | UTF-8 | Python | false | false | 6,026 | py | """
Crawl the article links of specified articles
"""
from selenium import webdriver
from time import sleep
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
import xlrd
import traceback
import selenium.webdriver.support.ui as ui
# set urls
path = ''
# set Firefox Preferences
fp = webdriver.FirefoxProfile()
fp.set_preference("browser.download.folderList", 2)
fp.set_preference("browser.download.manager.showWhenStarting", False)
fp.set_preference("browser.download.dir", "")
fp.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/xml")
fp.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/pdf")
fp.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/caj")
driver = webdriver.Firefox(firefox_profile=fp)
# driver = webdriver.PhantomJS()
# set the username and password of baidu scholar
baidu_username = ''
baidu_password = ''
# login into baidu scholar
driver.get('https://passport.baidu.com/v2/?login')
sleep(10)
driver.find_element_by_xpath('//input[@id="TANGRAM__PSP_3__userName"]').send_keys(baidu_username)
driver.find_element_by_xpath('//input[@id="TANGRAM__PSP_3__password"]').send_keys(baidu_password)
driver.find_element_by_xpath('//input[@id="TANGRAM__PSP_3__submit"]').click()
sleep(3)
# set the username and password of CNKI
CNKI_username = ''
CNKI_password = ''
# set the parameters
begin_year = 2016
end_year = 2016
journals = [
# 情报科学
# 'http://xueshu.baidu.com/usercenter/data/journal?cmd=journal_page&entity_id=5f43f47ec5603290b1d1908d3f90c814',
# 情报杂志
# 'http://xueshu.baidu.com/usercenter/data/journal?cmd=journal_page&entity_id=f6d0498c9a1e453e3d8ccf3553e34b00'
# 情报理论与实践
# 'http://xueshu.baidu.com/usercenter/data/journal?cmd=journal_page&entity_id=0fa62daf6b227f7c624d42a87cbd5af4'
# 情报资料工作
'http://xueshu.baidu.com/usercenter/data/journal?cmd=journal_page&entity_id=aa3bf9831455cd20c2a538ae70e96b07'
# 图书情报知识
# 'http://xueshu.baidu.com/usercenter/data/journal?cmd=journal_page&entity_id=c8456e59a6d69d52d9c3cbb73ffcb956'
# 图书与情报
# 'http://xueshu.baidu.com/usercenter/data/journal?cmd=journal_page&entity_id=3bbbd27f5e2ababade074e3496532266'
# 现代情报
# 'http://xueshu.baidu.com/usercenter/data/journal?cmd=journal_page&entity_id=e19715e7e47089a06692548dd697f4fa'
# 现代图书情报技术
# 'http://xueshu.baidu.com/usercenter/data/journal?cmd=journal_page&entity_id=7e28f35d24398c89cbff2cb0db26e76f'
# 图书情报工作
# 'http://xueshu.baidu.com/usercenter/data/journal?cmd=journal_page&entity_id=deab8bc3fb25b687269473032b5d81d8'
# 情报学报
# 'http://xueshu.baidu.com/usercenter/data/journal?cmd=journal_page&entity_id=83eabd46f92e9c5fcac01288b892b87c'
]
article_links_dict = dict()
article_links = list()
# iterate journals
for journal in journals:
driver.get(journal)
try:
driver.find_element_by_xpath('//*[@id="content"]/div[2]/div[2]/div[1]/div[1]/div[3]/a[2]/i').click()
sleep(3)
driver.find_element_by_xpath('//*[@id="content"]/div[2]/div[2]/div[1]/div[1]/div[3]/a[2]/i').click()
sleep(3)
except:
exit(0)
# iterate years
years = driver.find_elements_by_xpath('//div[@class="fre_year"]')
for i in range(3):
year_xpath = '//div[@class="fre_year"][' + str(i + 1) + ']' # year 0 + ?
driver.find_element_by_xpath(year_xpath).click()
volumes = driver.find_elements_by_xpath(year_xpath + '/div[@class="fre_year_num"]/a')
# iterate volumes
for j in range(len(volumes)):
volume_xpath = year_xpath + '/div[@class="fre_year_num"]/a[' + str(j + 1) + ']' # journal volume 0 + ?
try:
print('Year: %d, Volume: %d' % ((2003 - i), (j + 1)))
driver.find_element_by_xpath(volume_xpath).click()
sleep(2)
except:
print('error!')
sleep(30)
continue
while True:
# iterate articles
articles_xpath = '//div[@class="paper_content"]/div[@class="rela-journals"]/div[@class="result"]'
articles = driver.find_elements_by_xpath(articles_xpath)
for k in range(len(articles)):
try:
article_name_xpath = articles_xpath + '[' + str(k+1) + ']' + '/div[@class="left"]/h3/a'
article_name = driver.find_element_by_xpath(article_name_xpath).text
article_xpath = articles_xpath + '[' + str(k+1) + ']' + '/div[@class="cooper"]/a[2]/i'
driver.find_element_by_xpath(article_xpath).click()
sleep(2)
article_link_xpath = '//div[@class="src_download_wr"]/div[@class="src_content"]/ul/li/a[text()="知网"]'
# get the article link
article_link = driver.find_element_by_xpath(article_link_xpath).get_attribute("href")
print(article_name + ':' + article_link)
article_links.append(article_link)
driver.find_element_by_xpath('//div[@class="src_download_wr"]/a[@class="c-icon-close-hover close-icon dialog-close"]/i').click()
sleep(2)
except:
sleep(2)
continue
# click next page
try:
driver.find_element_by_xpath('//i[@class="c-icon c-icon-page-next"]').click()
sleep(2)
continue
except:
break
driver.delete_all_cookies()
sleep(15)
article_links = list(set(article_links))
with open('./tszlgz_2003_2001.txt', 'w', encoding='utf-8') as f:
for article_link in article_links:
f.write(article_link)
f.write('\n')
f.close()
| [
"361914599@qq.com"
] | 361914599@qq.com |
383b5d0f0074a747db4569fd076744c2879966a0 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/394/usersdata/313/74777/submittedfiles/ex11.py | eaab70ccdbf4c3cc2b52bc2a69482ed98b67762c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | # -*- coding: utf-8 -*-
n1=12 dias
n2=6 meses
n3=1980 ano
n4=20 dias
n5=12 meses
n6=1989 ano
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
f160cd861be4861d18ff058c4fe05ae1b02b5b5b | 69e318f2b60175108bc74ee669bfe16287a71cb6 | /plugins/modules/fortios_log_null_device_filter.py | 298792656963599543d5265bb94c284a6f6c4b5c | [] | no_license | chillancezen/ansible-galaxy-fortios-collection | 5268a5fd97fb4594772349b8d89cb818ec54b3bd | 66a331cd4493d1b0f49798d5c2cd6ef5aeba84d3 | refs/heads/master | 2022-04-09T19:20:59.073193 | 2020-03-26T07:17:09 | 2020-03-26T07:17:09 | 250,185,374 | 0 | 0 | null | 2020-03-26T07:06:16 | 2020-03-26T07:06:16 | null | UTF-8 | Python | false | false | 13,861 | py | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_null_device_filter
short_description: Filters for null device logging in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify log_null_device feature and filter category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
log_null_device_filter:
description:
- Filters for null device logging.
default: null
type: dict
suboptions:
anomaly:
description:
- Enable/disable anomaly logging.
type: str
choices:
- enable
- disable
dns:
description:
- Enable/disable detailed DNS event logging.
type: str
choices:
- enable
- disable
filter:
description:
- Null-device log filter.
type: str
filter_type:
description:
- Include/exclude logs that match the filter.
type: str
choices:
- include
- exclude
forward_traffic:
description:
- Enable/disable forward traffic logging.
type: str
choices:
- enable
- disable
gtp:
description:
- Enable/disable GTP messages logging.
type: str
choices:
- enable
- disable
local_traffic:
description:
- Enable/disable local in or out traffic logging.
type: str
choices:
- enable
- disable
multicast_traffic:
description:
- Enable/disable multicast traffic logging.
type: str
choices:
- enable
- disable
netscan_discovery:
description:
- Enable/disable netscan discovery event logging.
type: str
netscan_vulnerability:
description:
- Enable/disable netscan vulnerability event logging.
type: str
severity:
description:
- Lowest severity level to log.
type: str
choices:
- emergency
- alert
- critical
- error
- warning
- notification
- information
- debug
sniffer_traffic:
description:
- Enable/disable sniffer traffic logging.
type: str
choices:
- enable
- disable
ssh:
description:
- Enable/disable SSH logging.
type: str
choices:
- enable
- disable
voip:
description:
- Enable/disable VoIP logging.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Filters for null device logging.
fortios_log_null_device_filter:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
log_null_device_filter:
anomaly: "enable"
dns: "enable"
filter: "<your_own_value>"
filter_type: "include"
forward_traffic: "enable"
gtp: "enable"
local_traffic: "enable"
multicast_traffic: "enable"
netscan_discovery: "<your_own_value>"
netscan_vulnerability: "<your_own_value>"
severity: "emergency"
sniffer_traffic: "enable"
ssh: "enable"
voip: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_log_null_device_filter_data(json):
option_list = ['anomaly', 'dns', 'filter',
'filter_type', 'forward_traffic', 'gtp',
'local_traffic', 'multicast_traffic', 'netscan_discovery',
'netscan_vulnerability', 'severity', 'sniffer_traffic',
'ssh', 'voip']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def log_null_device_filter(data, fos):
vdom = data['vdom']
log_null_device_filter_data = data['log_null_device_filter']
filtered_data = underscore_to_hyphen(filter_log_null_device_filter_data(log_null_device_filter_data))
return fos.set('log.null-device',
'filter',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_log_null_device(data, fos):
if data['log_null_device_filter']:
resp = log_null_device_filter(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"log_null_device_filter": {
"required": False, "type": "dict", "default": None,
"options": {
"anomaly": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dns": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"filter": {"required": False, "type": "str"},
"filter_type": {"required": False, "type": "str",
"choices": ["include", "exclude"]},
"forward_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"gtp": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"multicast_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"netscan_discovery": {"required": False, "type": "str"},
"netscan_vulnerability": {"required": False, "type": "str"},
"severity": {"required": False, "type": "str",
"choices": ["emergency", "alert", "critical",
"error", "warning", "notification",
"information", "debug"]},
"sniffer_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssh": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"voip": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_log_null_device(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_log_null_device(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| [
"fshen01@fortinet.com"
] | fshen01@fortinet.com |
2747983057867ca48f64796098f4a6e65983e0aa | d806dd4a6791382813d2136283a602207fb4b43c | /sirius/blueprints/api/remote_service/tambov/app.py | 5efe34267189b393a92b6edd77d8330405506b2e | [] | no_license | MarsStirner/sirius | 5bbf2a03dafb7248db481e13aff63ff989fabbc2 | 8839460726cca080ca8549bacd3a498e519c8f96 | refs/heads/master | 2021-03-24T12:09:14.673193 | 2017-06-06T16:28:53 | 2017-06-06T16:28:53 | 96,042,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | #! coding:utf-8
"""
@author: BARS Group
@date: 23.09.2016
"""
from .config import MODULE_NAME
from flask import Blueprint
module = Blueprint(MODULE_NAME, __name__, url_prefix='/tambov')
# from .passive import *
| [
"paschenko@bars-open.ru"
] | paschenko@bars-open.ru |
4e416317c779c66b72f5c3b58b7cca1acb9169fb | 493d86071bb96ef33a38623a144fe55e49a0aa44 | /tests/data/opta/parsers/test_f24_xml.py | 164f11e85251874a9bbfcbbe789a8f4c74475ead | [
"MIT"
] | permissive | ML-KULeuven/socceraction | 3b8d2411333114436239850d02278785ea0ed83b | 1261a31cf99d0c9a819b67f568549aa47df83f08 | refs/heads/master | 2023-08-31T02:25:42.349813 | 2023-08-30T08:53:01 | 2023-08-30T08:53:01 | 194,881,505 | 517 | 131 | MIT | 2023-09-14T09:46:42 | 2019-07-02T14:38:30 | Python | UTF-8 | Python | false | false | 2,016 | py | import os
from datetime import datetime
import pandas as pd
from pytest import fixture
from socceraction.data.opta import OptaEventSchema, OptaGameSchema
from socceraction.data.opta.parsers import F24XMLParser
@fixture()
def f24xml_parser() -> F24XMLParser:
path = os.path.join(
os.path.dirname(__file__),
os.pardir,
os.pardir,
os.pardir,
"datasets",
"opta",
"f24-23-2018-1009316-eventdetails.xml",
)
return F24XMLParser(str(path))
def test_extract_games(f24xml_parser: F24XMLParser) -> None:
games = f24xml_parser.extract_games()
assert len(games) == 1
assert games[1009316] == {
"game_id": 1009316,
"season_id": 2018,
"competition_id": 23,
"game_day": 1,
"game_date": datetime(2018, 8, 20, 21, 0),
"home_team_id": 174,
"away_team_id": 957,
"home_score": 2,
"away_score": 1,
}
OptaGameSchema.validate(pd.DataFrame.from_dict(games, orient="index"))
def test_extract_events(f24xml_parser: F24XMLParser) -> None:
events = f24xml_parser.extract_events()
assert len(events) == 1665
assert events[(1009316, 2097423126)] == {
"game_id": 1009316,
"event_id": 2097423126,
"period_id": 2,
"team_id": 174,
"player_id": 197319,
"type_id": 1,
"timestamp": datetime(2018, 8, 20, 22, 51, 28, 259000),
"minute": 94,
"second": 50,
"outcome": False,
"start_x": 46.4,
"start_y": 37.1,
"end_x": 74.4,
"end_y": 8.9,
"qualifiers": {
1: None,
213: "5.7",
212: "35.1",
152: None,
5: None,
155: None,
56: "Right",
140: "74.4",
141: "8.9",
},
"assist": False,
"keypass": False,
}
df = pd.DataFrame.from_dict(events, orient="index")
df["type_name"] = "Added later"
OptaEventSchema.validate(df)
| [
"noreply@github.com"
] | noreply@github.com |
74b9ea78cc7c3d88f53a193a888c6214c5fd749b | e90d11ddb12a4c62c22e3fd45205d88f70d62e57 | /python/minicms/minicms/urls.py | a9309543af4492b7fc4a0976cea155ef9ba6c65b | [] | no_license | dyhbrewer/Web-Front-End | 27c5a912555e4ec91f5626dc400fa92d8352f455 | 439cc7207e0994577ebd0a0de1e9bda89effa9bb | refs/heads/master | 2021-01-17T23:04:33.802049 | 2016-11-18T11:41:11 | 2016-11-18T11:41:11 | 47,461,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | """minicms URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| [
"xdxuhehui@126.com"
] | xdxuhehui@126.com |
5c1601ae7873d6e366b47a5e47920cd78a86bed8 | 5e4bf6922224fb846d3aa7c60d1f52410b355e69 | /src/jbdl/experimental/ode/solve_ivp.py | 7e454f5bdeaf42711efff046ed56b1f2c080b014 | [
"MIT",
"BSD-3-Clause",
"LGPL-2.1-only",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Wangxinhui-bot/jbdl | 1748b78dbbb62854cf66dd6b2020c9d1f2ed0ee5 | 4541fcbec9156c8a3dc496058230fdf2a3fa1bdf | refs/heads/master | 2023-08-13T17:39:23.901552 | 2021-09-07T03:09:42 | 2021-09-07T03:09:42 | 400,973,793 | 0 | 0 | MIT | 2021-09-07T03:16:16 | 2021-08-29T06:59:21 | C++ | UTF-8 | Python | false | false | 4,304 | py | from functools import partial
from numpy import diff
from jax.api import jacfwd
import jax.numpy as jnp
from jax._src.util import safe_map, safe_zip
from jax.flatten_util import ravel_pytree
from jax import linear_util as lu
import jax
from jax import lax
from jax import device_put
map = safe_map
zip = safe_zip
def ravel_first_arg(f, unravel):
return ravel_first_arg_(lu.wrap_init(f), unravel).call_wrapped
@lu.transformation
def ravel_first_arg_(unravel, y_flat, *args):
y = unravel(y_flat)
ans = yield (y,) + args, {}
ans_flat, _ = ravel_pytree(ans)
yield ans_flat
def solve_ivp(func, y0, t, event_func, event_handle, *args, rtol=1.4e-8, atol=1.4e-8, mxstep=jnp.inf):
return _solve_ivp_wrapper(func, event_func, event_handle, rtol, atol, mxstep, y0, t, *args)
@partial(jax.jit, static_argnums=(0, 1, 2, 3, 4, 5))
def _solve_ivp_wrapper(func, event_func, event_handle, rtol, atol, mxstep, y0, ts, *args):
y0, unravel = ravel_pytree(y0)
func = ravel_first_arg(func, unravel)
out = _solve_ivp(func, event_func, event_handle, rtol, atol, mxstep, y0, ts, *args)
return jax.vmap(unravel)(out)
def _solve_ivp(func, event_func, event_handle, rtol, atol, mxstep, y0, ts, *args):
def func_(y, t):
return func(y, t, *args)
def event_func_(y, t):
return event_func(y, t, *args)
def event_handle_(y, t):
return event_handle(y, t, *args)
def scan_fun(carry, target_t):
def cond_fun(state):
i, _, _, t, dt, _, _ = state
return (t < target_t) & (i < mxstep) & (dt > 0)
def body_fun(state):
i, y, f, t, dt, _, e = state
next_y = y + f * dt
next_t = t + dt
next_f = func_(next_y, next_t)
next_e = event_func_(next_y, next_t)
event_handle_y = event_handle_(next_y, next_t)
event_handle_f = func_(event_handle_y, next_t)
event_handle_e = event_func_(event_handle_y, next_t)
new = [i + 1, next_y, next_f, next_t, dt, t, next_e]
event_handle_new = [i + 1, event_handle_y, event_handle_f, next_t, dt, t, event_handle_e]
return map(partial(jnp.where, jnp.all(jnp.logical_and(e > 0, next_e < 0))), event_handle_new, new)
_, *carry = lax.while_loop(cond_fun, body_fun, [0] + carry)
y, _, _, _, _, _ = carry
return carry, y
f0 = func_(y0, ts[0])
dt = device_put(0.001)
e0 = device_put(1.0)
init_carry = [y0, f0, ts[0], dt, ts[0], e0]
_, ys = lax.scan(scan_fun, init_carry, ts[1:])
return jnp.concatenate((y0[None], ys))
if __name__ == "__main__":
print("Hello!")
import time
def e_handle(y, t, *args):
return -y
def e_fun(y, t, *args):
return y[0]
def pend(y, t, b, c):
dxdt = jnp.array([y[1], -b*y[1] - c*jnp.sin(y[0])])
return dxdt
y0 = jnp.array([jnp.pi - 0.1, 0.0])
B = 0.25
C = 5.0
t_eval = jnp.linspace(0, 10, 1000)
sol = jax.jit(solve_ivp, static_argnums=(0, 3, 4))(pend, y0, t_eval, e_fun, e_handle, B, C)
print(sol)
print("------------------")
start = time.time()
result = solve_ivp(pend, y0, jnp.linspace(0, 1, 1000), e_fun, e_handle, B, C)
result.block_until_ready()
duration = time.time() - start
print(duration)
start = time.time()
result = solve_ivp(pend, y0, jnp.linspace(0, 1, 1000), e_fun, e_handle, b, c)
result.block_until_ready()
duration = time.time() - start
print(duration)
print("=================")
# pure_solve_ivp = partial(solve_ivp, func=pend, event_fun=e_fun, event_handle=e_handle)
start = time.time()
diff = jax.jit(jacfwd(solve_ivp, argnums=1), static_argnums=(0, 3, 4))
reslut = diff(pend, y0, jnp.linspace(0, 1, 1000), e_fun, e_handle, b, c)
result.block_until_ready()
duration = time.time() - start
print(duration)
start = time.time()
reslut = diff(pend, y0, jnp.linspace(0, 1, 1000), e_fun, e_handle, b, c)
result.block_until_ready()
duration = time.time() - start
print(duration)
start = time.time()
reslut = diff(pend, y0, jnp.linspace(0, 1, 1000), e_fun, e_handle, b, c)
result.block_until_ready()
duration = time.time() - start
print(duration)
| [
"wx_08f71fc664674d5899780f8a0dbad524@git.code.tencent.com"
] | wx_08f71fc664674d5899780f8a0dbad524@git.code.tencent.com |
e0c6c65c9c61e0723e6c013997c2a939ae967972 | bb8ecdd004b6da4b9429b220eaae0d0893d53eab | /16.py | 536daca6c4a6bd16dc0d16e29926e4bc1b31d608 | [
"MIT"
] | permissive | hendrikjeb/Euler | 43603fc9b9aa0a449dc23412500e8f06dafaf8ae | a9e62babcec4e4faef15b8e2d3db86790c9d59b8 | refs/heads/master | 2021-01-21T13:57:51.452668 | 2016-05-11T22:08:04 | 2016-05-11T22:17:15 | 47,711,276 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | # -*- coding: utf-8 -*-
"""
Problem 16: Power digit sum
2^15 = 32768 and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26.
What is the sum of the digits of the number 2^1000?
"""
from time import time
start = time()
som = 0
for l in str(2**1000)[:]:
som += int(l)
print som
print 'Tijd: ', time() - start
| [
"hendrikjeb@example.org"
] | hendrikjeb@example.org |
bdce4da9f34c04c3473350ce8923ddf0eaa42313 | b8d9bba87ffb1c6945fb1c9268a986587e672785 | /Madu_Ionascu/temp_reed.py | 10a0e03ca0530ba48ba09f9e47489789fb1c408c | [] | no_license | patilanup246/Projects | 4f510f5965a2b5c1ca72dd94e70f53e14c7dac59 | b41aaa052a9f211065c184b7a0e167c089aefbc5 | refs/heads/master | 2021-02-28T00:14:01.330374 | 2018-09-01T12:26:29 | 2018-09-01T12:26:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | '''
Created on Jul 4, 2018
@author: talib
'''
import xmltodict, requests, json
all_urls = []
urls = [
'https://www.reed.co.uk/sitemaps/livejobs/sitemap_livejobs_0000.xml',
'https://www.reed.co.uk/sitemaps/livejobs/sitemap_livejobs_0001.xml',
'https://www.reed.co.uk/sitemaps/livejobs/sitemap_livejobs_0002.xml',
'https://www.reed.co.uk/sitemaps/livejobs/sitemap_livejobs_0003.xml',
'https://www.reed.co.uk/sitemaps/livejobs/sitemap_livejobs_0004.xml',
'https://www.reed.co.uk/sitemaps/livejobs/sitemap_livejobs_0005.xml',
'https://www.reed.co.uk/sitemaps/livejobs/sitemap_livejobs_0006.xml'
]
x = xmltodict.parse(requests.get('https://www.reed.co.uk/sitemaps/livejobs/sitemap_livejobs_index.xml').text)
last_mod = ''
for m in reversed(x['sitemapindex']['sitemap']):
print (m['loc'])
last_mod = m['lastmod'].split('T')[0]
#https://www.totaljobs.com/jobs-sitemaps/01.xml | [
"tasneemrangwala@users.noreply.github.com"
] | tasneemrangwala@users.noreply.github.com |
63745902cac53664d3f9579ce008dd6fc0d34866 | 1bb42bac177fb4e979faa441363c27cb636a43aa | /optimization/trainer_test.py | 3c9f7d0c623a496f1af9e0bdc4328d5c49ef83d1 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | google-research/federated | a6040e80fa0fbf533e0d665c66a9bc549d208b3d | 329e60fa56b87f691303638ceb9dfa1fc5083953 | refs/heads/master | 2023-08-28T13:10:10.885505 | 2023-08-22T23:06:08 | 2023-08-22T23:06:40 | 295,559,343 | 595 | 187 | Apache-2.0 | 2022-05-12T08:42:53 | 2020-09-14T23:09:07 | Python | UTF-8 | Python | false | false | 1,750 | py | # Copyright 2022, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
from absl.testing import absltest
from absl.testing import flagsaver
from optimization import trainer
class TrainerTest(absltest.TestCase):
@flagsaver.flagsaver(
root_output_dir=tempfile.mkdtemp(),
experiment_name='test_experiment',
task='emnist_character',
clients_per_round=1,
total_rounds=2,
client_optimizer='sgd',
client_learning_rate=0.01,
server_optimizer='sgd',
server_learning_rate=1.0,
use_synthetic_data=True)
def test_executes_with_constant_client_lr(self):
trainer.main([])
@flagsaver.flagsaver(
root_output_dir=tempfile.mkdtemp(),
experiment_name='test_experiment',
task='emnist_character',
clients_per_round=1,
total_rounds=2,
client_optimizer='sgd',
client_learning_rate=0.01,
client_lr_schedule='exp_decay',
client_lr_decay_steps=1,
client_lr_decay_rate=0.1,
client_lr_staircase=True,
server_optimizer='sgd',
server_learning_rate=1.0,
use_synthetic_data=True)
def test_executes_with_client_lr_schedule(self):
trainer.main([])
if __name__ == '__main__':
absltest.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
c9b7f903cf66a083d05e34ebc1900c3906a73400 | 9c50f57a9cb32b44e86a0cdcbf61ead34754b085 | /杂物间/PycharmProjects/面向对象基础/bc_08_案例.py | a4de3dd88e831cda6088324ea0cfb9c0c0d834f7 | [] | no_license | a1403893559/rg201python | c3f115011981393c86a0150e5281096651712ad4 | 448f04c86e4c7fd30e3a2a4f9121b934ae1d49be | refs/heads/master | 2020-03-15T23:32:17.723403 | 2018-03-18T12:59:43 | 2018-03-18T12:59:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | class Person:
"""人类"""
def __init__(self, name, weight):
# slef.属性 = 形参
self.name = name
self.weight = weight
def __str__(self):
# __str__方法必须返回一个字符串
return "我的名字叫%s 体重 %.2f 公斤 " % (self.name, self.weight)
def run(self):
"""跑步"""
print("%s 爱跑步,跑步锻炼身体" % self.name)
self.weight -= 0.5
def eat(self):
"""吃东西"""
print("%s 是吃货,吃完这顿在减肥" % self.name)
self.weight += 1
xiaoming = Person("小明", 75)
xiaoming.run()
xiaoming.eat()
xiaoming.eat()
print(xiaoming)
| [
"wengwenyu@aliyun.com"
] | wengwenyu@aliyun.com |
aad5c7153fd5f602d162e83a68ede6eb5da0384b | 5e879d89660128bb216c1fdc5ecec71da7336deb | /datacol/User/forms.py | 53a263d3d8acf26351b32340f12be1f8dd3cf301 | [] | no_license | iradualbert/webproject1 | cdf79691cf7aec8078e52f6b9a5bacf7792e5367 | 537dcaf8cc1cabef20660580e9ce782356d29ed6 | refs/heads/master | 2022-04-14T23:00:01.135449 | 2020-04-14T19:57:39 | 2020-04-14T19:57:39 | 255,711,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Profile
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
# Model Form
'''update user model
by interacting with the database
'''
class UserUpdateForm(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email']
class ProfileUpdateForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['phone_number', 'country', 'language', 'birthday', 'gender', 'career', 'profile_pic']
| [
"albert.iradukunda@yahoo.com"
] | albert.iradukunda@yahoo.com |
6153a79357e0dffa210f061affb00b6aae893ad8 | ac63a6f41b924f8f395188267b5cfc8c516386b3 | /tests/test_django_snack.py | 086625fae5c55e560b1d860d34baca147b6e5946 | [] | no_license | josephlee3454/django-snack | 44996677557633254225449f7f5c973c0b86e629 | ed8518b293d4d0b41d57c12ba1815932a512fc73 | refs/heads/master | 2022-12-06T18:34:01.717675 | 2020-08-22T21:54:05 | 2020-08-22T21:54:05 | 289,598,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | from django_snack import __version__
def test_version():
assert __version__ == '0.1.0'
| [
"joseph.lee3454@gmail.com"
] | joseph.lee3454@gmail.com |
2bf0fba743e8bb81966420b6f4c2a40df67ac04e | be03d4ac87a05cd501470bc809e18c39a04528c3 | /exercicios-com-arquivos/2/percentual.py | bb9e0bceb288014a8fcb215e55d63fbb195c0598 | [] | no_license | fplucas/exercicios-python | 831c11ac961fc5317c93e50e4ae0896c95ea12fe | d493d2f48c54f93789b513c04af7aba43b477816 | refs/heads/master | 2021-01-21T04:48:02.709921 | 2016-06-29T13:08:34 | 2016-06-29T13:08:34 | 52,295,341 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | def calcula_percentual(quantidade, total):
percentual = quantidade * 100 / total
return percentual
| [
"me@fplucas.com"
] | me@fplucas.com |
6e1af1e92961bc6cf4364d8727c6e9e240433d9a | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/DLINK-3100-MNGINF-MIB.py | 9ae0c56579fbb5e55d9b95037bfb35f2681f9fa8 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 9,474 | py | #
# PySNMP MIB module DLINK-3100-MNGINF-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DLINK-3100-MNGINF-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:33:45 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection")
rnd, = mibBuilder.importSymbols("DLINK-3100-MIB", "rnd")
InetAddressType, InetAddress = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressType", "InetAddress")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
iso, ModuleIdentity, Gauge32, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, Bits, IpAddress, TimeTicks, Counter32, Counter64, MibIdentifier, Integer32, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "ModuleIdentity", "Gauge32", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "Bits", "IpAddress", "TimeTicks", "Counter32", "Counter64", "MibIdentifier", "Integer32", "NotificationType")
TruthValue, TextualConvention, DisplayString, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "TextualConvention", "DisplayString", "RowStatus")
rlMngInf = ModuleIdentity((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89))
rlMngInf.setRevisions(('2003-09-21 00:00',))
if mibBuilder.loadTexts: rlMngInf.setLastUpdated('200309210000Z')
if mibBuilder.loadTexts: rlMngInf.setOrganization('Dlink, Inc.')
class RlMngInfServiceType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("dontCare", 0), ("telnet", 1), ("snmp", 2), ("http", 3), ("https", 4), ("ssh", 5), ("icmp", 6))
class RlMngInfActionType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1))
namedValues = NamedValues(("permit", 0), ("deny", 1))
rlMngInfMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlMngInfMibVersion.setStatus('current')
rlMngInfEnable = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 2), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfEnable.setStatus('current')
rlMngInfActiveListName = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfActiveListName.setStatus('current')
rlMngInfListTable = MibTable((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 4), )
if mibBuilder.loadTexts: rlMngInfListTable.setStatus('current')
rlMngInfListEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 4, 1), ).setIndexNames((0, "DLINK-3100-MNGINF-MIB", "rlMngInfListName"), (0, "DLINK-3100-MNGINF-MIB", "rlMngInfListPriority"))
if mibBuilder.loadTexts: rlMngInfListEntry.setStatus('current')
rlMngInfListName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 4, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlMngInfListName.setStatus('current')
rlMngInfListPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 4, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlMngInfListPriority.setStatus('current')
rlMngInfListIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 4, 1, 3), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListIfIndex.setStatus('current')
rlMngInfListIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 4, 1, 4), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListIpAddr.setStatus('current')
rlMngInfListIpNetMask = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 4, 1, 5), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListIpNetMask.setStatus('current')
rlMngInfListService = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 4, 1, 6), RlMngInfServiceType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListService.setStatus('current')
rlMngInfListAction = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 4, 1, 7), RlMngInfActionType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListAction.setStatus('current')
rlMngInfListRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 4, 1, 8), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListRowStatus.setStatus('current')
rlMngInfAuditingEnable = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 5), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfAuditingEnable.setStatus('current')
rlMngInfListInetTable = MibTable((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6), )
if mibBuilder.loadTexts: rlMngInfListInetTable.setStatus('current')
rlMngInfListInetEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6, 1), ).setIndexNames((0, "DLINK-3100-MNGINF-MIB", "rlMngInfListInetName"), (0, "DLINK-3100-MNGINF-MIB", "rlMngInfListInetPriority"))
if mibBuilder.loadTexts: rlMngInfListInetEntry.setStatus('current')
rlMngInfListInetName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlMngInfListInetName.setStatus('current')
rlMngInfListInetPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlMngInfListInetPriority.setStatus('current')
rlMngInfListInetIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6, 1, 3), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListInetIfIndex.setStatus('current')
rlMngInfListInetIpAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6, 1, 4), InetAddressType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListInetIpAddrType.setStatus('current')
rlMngInfListInetIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6, 1, 5), InetAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListInetIpAddr.setStatus('current')
rlMngInfListInetIpNetMask = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6, 1, 6), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListInetIpNetMask.setStatus('current')
rlMngInfListInetService = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6, 1, 7), RlMngInfServiceType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListInetService.setStatus('current')
rlMngInfListInetAction = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6, 1, 8), RlMngInfActionType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListInetAction.setStatus('current')
rlMngInfListInetRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6, 1, 9), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListInetRowStatus.setStatus('current')
rlMngInfListInetIPv6PrefixLength = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 128)).clone(128)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListInetIPv6PrefixLength.setStatus('current')
mibBuilder.exportSymbols("DLINK-3100-MNGINF-MIB", RlMngInfActionType=RlMngInfActionType, rlMngInfListIpNetMask=rlMngInfListIpNetMask, rlMngInfListIfIndex=rlMngInfListIfIndex, rlMngInfListInetIpNetMask=rlMngInfListInetIpNetMask, rlMngInfListInetRowStatus=rlMngInfListInetRowStatus, rlMngInfListInetName=rlMngInfListInetName, RlMngInfServiceType=RlMngInfServiceType, rlMngInfActiveListName=rlMngInfActiveListName, rlMngInfListInetService=rlMngInfListInetService, rlMngInfListIpAddr=rlMngInfListIpAddr, rlMngInfListPriority=rlMngInfListPriority, rlMngInfListService=rlMngInfListService, rlMngInfListEntry=rlMngInfListEntry, rlMngInfListInetEntry=rlMngInfListInetEntry, rlMngInfListInetIpAddrType=rlMngInfListInetIpAddrType, rlMngInfEnable=rlMngInfEnable, rlMngInfListRowStatus=rlMngInfListRowStatus, rlMngInfListInetIPv6PrefixLength=rlMngInfListInetIPv6PrefixLength, rlMngInfListInetIfIndex=rlMngInfListInetIfIndex, rlMngInfListName=rlMngInfListName, rlMngInfListInetTable=rlMngInfListInetTable, PYSNMP_MODULE_ID=rlMngInf, rlMngInfMibVersion=rlMngInfMibVersion, rlMngInfListAction=rlMngInfListAction, rlMngInfListInetAction=rlMngInfListInetAction, rlMngInfAuditingEnable=rlMngInfAuditingEnable, rlMngInfListInetPriority=rlMngInfListInetPriority, rlMngInfListInetIpAddr=rlMngInfListInetIpAddr, rlMngInf=rlMngInf, rlMngInfListTable=rlMngInfListTable)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
ead04a2a42bdbffba3bfaee11b0f9d7d6ac00ca6 | ca0d8dac1ea6ba0829b1aa34dc9c8a26d873c077 | /Past_drafts/12_Apr/AI_algos.py | d75105c16880cf295816a7769e239b3d54b220f0 | [] | no_license | VaingloryStuff/Seraphim | 4fd0e0f5e7f844b63f35ada3c83e4c1d87d24eab | 46d50e822f0de90340dd8687949f17c067a65f7e | refs/heads/master | 2020-03-12T17:09:56.751014 | 2018-04-14T19:26:34 | 2018-04-14T19:26:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,762 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 20 00:01:38 2018
@author: phypoh
"""
#Testing
import requests
import json
from API import hero_list, hero_range, API_rates #, pickrates, banrates
from API import pull_hero
"""
Choose your AI algorithm here.
"""
def AI_pick(A_side, B_side, A_ban, B_ban):
#return auto_highest(A_side, B_side, A_ban, B_ban)
#return account_roles(A_side, B_side, A_ban, B_ban)
return Elim_Index(A_side, B_side, A_ban, B_ban)
def AI_ban(A_side, B_side, A_ban, B_ban):
return ban_second_highest(A_side, B_side, A_ban, B_ban)
"""
Picking algorithms
"""
def auto_highest(A_side, B_side, A_ban = [], B_ban = []):
for hero in API_rates:
if (hero["name"] in A_side) or (hero["name"] in B_side) or (hero["name"] in A_ban) or (hero["name"] in B_ban) :
pass
else:
return hero["name"]
return "NIL"
def ban_second_highest(A_side, B_side, A_ban = [], B_ban = []):
"""
If AI takes A side, employs a passive ban on the second highest winrate hero
so it can secure the highest winrate hero. Else, employ auto_highest ban
"""
if len(A_ban) > len(B_ban):
output = auto_highest(A_side, B_side, A_ban, B_ban)
else:
for hero in API_rates:
if (hero["name"] in A_side) or (hero["name"] in B_side) or (hero["name"] in A_ban) or (hero["name"] in B_ban):
pass
else:
num = API_rates.index(hero)
taken = 1
while (taken == 1):
num +=1
output = API_rates[num]["name"]
if output in A_ban or output in B_ban or output in A_side or output in B_side:
taken = 1
else:
taken = 0
break
return output
def account_roles(A_side, B_side, A_ban = [], B_ban = []):
"""
Accounts for roles in 5v5. Warning: Does not account for lane position
"""
team_roles = {"Carry" : 3, "Captain" : 1, "Jungler" : 1}
if len(A_side)%2 == 0 or (len(A_side)%2 == 1 and len(B_side) > len(A_side)):
#A side
for name in A_side:
data = [hero for hero in API_rates if hero['name'] == name]
roles = data[0]["roles"]
for role in roles:
team_roles[role] -= 1/len(roles)
print("A side", team_roles)
else:
#B side
for name in B_side:
data = [hero for hero in API_rates if hero['name'] == name]
roles = data[0]["roles"]
for role in roles:
team_roles[role] -= 1/len(roles)
print("B side", team_roles)
for hero in API_rates:
if (hero["name"] in A_side) or (hero["name"] in B_side) or (hero["name"] in A_ban) or (hero["name"] in B_ban) :
pass
else:
for role in hero["roles"]:
if team_roles[role] > 0:
print(hero["roles"])
return hero["name"]
else:
pass
return "NIL"
#==============================================================================
# AUXILIARY FUNCTIONS
#==============================================================================
def count_roles(my_team, role_data):
team_roles = {"Carry" : 3, "Captain" : 1, "Jungler" : 1}
for name in my_team:
data = [hero for hero in role_data if hero['name'] == name]
roles = data[0]["roles"]
for role in roles:
team_roles[role] -= 1/len(roles)
#print(team_roles)
return team_roles
def count_range(my_team, range_data):
team_range = {"melee" : 3, "ranged" : 3}
for name in my_team:
if range_data[name] == "both":
team_range["melee"] -= 0.5
team_range["ranged"] -= 0.5
else:
team_range[range_data[name]] -= 1
return team_range
def eliminate_banned_picked(heroes, A_side, B_side, A_ban, B_ban):
to_eliminate = A_side + B_side + A_ban + B_ban
for hero in to_eliminate:
try:
heroes.remove(hero)
except ValueError:
pass
return heroes
def eliminate_by_role(heroes, team_roles):
for hero in API_rates:
viable = 0
for role in hero["roles"]:
if team_roles[role] > 0:
viable = 1
#print(hero["name"], "viable")
break
if viable == 0:
#print(hero["name"], "remove")
try:
heroes.remove(hero["name"])
except ValueError:
pass
return heroes
def eliminate_by_range(heroes, team_range, range_data):
for mode in team_range:
if team_range[mode] <= 0:
for hero in heroes:
if range_data[hero] == mode:
try:
heroes.remove(hero)
except ValueError:
pass
break
return heroes
#==============================================================================
# INDEX PROCESSORS
#==============================================================================
def synergy_multiplier(index, winrate):
multiplier = winrate/50
index *= multiplier
return index
def counter_multiplier(index, winrate):
multiplier = (100-winrate)/50
index *= multiplier
return index
#==============================================================================
# CURRENT ALGO
#==============================================================================
def Elim_Index(A_side, B_side, A_ban = [], B_ban = []):
"""
Synergy Counter Roles Range
"""
#Decide which team
if len(A_side)%2 == 0 or (len(A_side)%2 == 1 and len(B_side) > len(A_side)):
team_side = "A"
print("AI is on A Side")
my_team = A_side
enemy_team = B_side
else:
team_side = "B"
my_team = B_side
enemy_team = A_side
print("AI is on B Side")
#Role and range accounting
team_roles = count_roles(my_team, API_rates)
team_range = count_range(my_team, hero_range)
#Elimination process
candidates = list(hero_list)
candidates = eliminate_banned_picked(candidates, A_side, B_side, A_ban, B_ban)
candidates = eliminate_by_role(candidates, team_roles)
candidates = eliminate_by_range(candidates, team_range, hero_range)
nominees = SC_index_calc(candidates, my_team, enemy_team)
return nominees[0]["name"]
def SC_index_calc(candidates, my_team, enemy_team):
candidate_threshold = 10
nominees = []
for candidate in candidates[:candidate_threshold]:
nominees.append({"name": candidate, "synergy": 1, "counter":1, "overall": 1})
if len(my_team)> 0:
for teammate in my_team:
hero_data = pull_hero(teammate)["playingWith"]
hero_data = [hero for hero in hero_data if hero['key'] in candidates[:candidate_threshold]]
for nominee in nominees:
match_row = [hero for hero in hero_data if hero['key'] == nominee["name"]]
winrate = match_row[0]["winRate"]
nominee["synergy"] = synergy_multiplier(nominee["synergy"], winrate)
if len(enemy_team)> 0:
for enemy in my_team:
hero_data = pull_hero(enemy)["playingAgainst"]
hero_data = [hero for hero in hero_data if hero['key'] in candidates[:candidate_threshold]]
for nominee in nominees:
match_row = [hero for hero in hero_data if hero['key'] == nominee["name"]]
winrate = match_row[0]["winRate"]
nominee["counter"] = counter_multiplier(nominee["counter"], winrate)
for nominee in nominees:
nominee["overall"] = nominee["synergy"]*nominee["counter"]
nominees = sorted(nominees, key=lambda k: k["overall"], reverse = True)
for nominee in nominees:
print(nominee["name"], nominee["synergy"], nominee["counter"], nominee["overall"])
#print(nominees)
return nominees
#==============================================================================
# OLD ALGO
#==============================================================================
def synergy_counter_role(A_side, B_side, A_ban = [], B_ban = []):
"""
Accounts for synergy of heroes, counters, as well as roles
"""
#Decide which team
if len(A_side)%2 == 0 or (len(A_side)%2 == 1 and len(B_side) > len(A_side)):
team_side = "A"
print("AI is on A Side")
my_team = A_side
enemy_team = B_side
else:
team_side = "B"
my_team = B_side
enemy_team = A_side
print("AI is on B Side")
#Role accounting
team_roles = {"Carry" : 3, "Captain" : 1, "Jungler" : 1}
for name in my_team:
data = [hero for hero in API_rates if hero['name'] == name]
roles = data[0]["roles"]
for role in roles:
team_roles[role] -= 1/len(roles)
print(team_roles)
candidates = []
#Obtain eligible candidates by roles
for hero in API_rates:
if (hero["name"] in A_side) or (hero["name"] in B_side) or (hero["name"] in A_ban) or (hero["name"] in B_ban) :
pass
else:
for role in hero["roles"]:
if team_roles[role] > 0:
candidates.append(hero["name"])
break
else:
pass
nominees = get_nominees(candidates, my_team, enemy_team)
return nominees[0]["name"]
def get_nominees(candidates, my_team, enemy_team):
nominees = []
candidate_threshold = 10
for candidate in candidates[:candidate_threshold]:
nominees.append({"name": candidate, "synergy": 1, "counter":1, "overall": 1})
#Add synergy and counter indexes
for teammate in my_team:
hero_data = pull_hero(teammate)
synergy_data = hero_data["playingWith"]
synergy_rates = [hero for hero in synergy_data if hero['key'] in candidates[:candidate_threshold]]
for nominee in nominees:
match_row = [hero for hero in synergy_rates if hero['key'] == nominee["name"]]
nominee["synergy"] *= match_row[0]["winRate"]/100
#Add synergy and counter indexes
for enemy in enemy_team:
hero_data = pull_hero(enemy)
counter_data = hero_data["playingAgainst"]
counter_rates = [hero for hero in counter_data if hero['key'] in candidates[:candidate_threshold]]
for nominee in nominees:
match_row = [hero for hero in counter_rates if hero['key'] == nominee["name"]]
nominee["counter"] *= (1-match_row[0]["winRate"]/100)
#Calculate overall index
for nominee in nominees:
nominee["overall"] = nominee["synergy"]*nominee["counter"]
nominees = sorted(nominees, key=lambda k: k['overall'], reverse = True)
for nominee in nominees:
print(nominee["name"], nominee["overall"])
#print(nominees)
return nominees
"""
Possible future algorithms:
To take note: synergy, counters, tiers
Check API_rates for two heroes together, and API_rates for two heroes against each other?
or check API_rates for which
Personal preferences
Ban counters to what you intend to pick — eg if you wanna pick Fort ban Vox if he has the highest winrate vs Fort?
"""
| [
"phypoh@gmail.com"
] | phypoh@gmail.com |
8057922896ba62113bb7718b354c4fce609d57ce | 86d499787fb35024db798b0c1dbfa7a6936854e9 | /py_tools/example/ponyorm-note/models/__init__.py | fe38a70001e0fa5da787546b5d97a2cebcd440b8 | [] | no_license | Tomtao626/python-note | afd1c82b74e2d3a488b65742547f75b49a11616e | e498e1e7398ff66a757e161a8b8c32c34c38e561 | refs/heads/main | 2023-04-28T08:47:54.525440 | 2023-04-21T17:27:25 | 2023-04-21T17:27:25 | 552,830,730 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | #!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author:tom_tao626
@license: Apache Licence
@file: __init__.py.py
@time: 2020/12/11
@contact: tp320670258@gmail.com
@site: xxxx.gggg.net
@software: PyCharm
"""
# 对于关联关系,它自动帮你生成外键,以及关联表。如果你没指定主键字段,它会生成一个默认的自增主键字段。
| [
"tp320670258@gmail.com"
] | tp320670258@gmail.com |
eeb0ed0008cb292742e6a12415b22d3c0fbc1e16 | 3e9c6efc9553be4103237e92e4fd6e7c3461d804 | /di_exercise/Week5/Day4/main.py | ae3fd71d67ecabafcf91a2f9c9dcb9e78381fe72 | [] | no_license | sergedemanou/di_exercise | f8c52c61289a507f30597ff424eda3db35b2c5e0 | 892230603f62157cb291b5f7a1089105d606c6f3 | refs/heads/master | 2023-01-23T13:49:48.816992 | 2020-11-29T18:11:58 | 2020-11-29T18:11:58 | 307,177,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | n = 'the'
p= "man"
# print(f"{n} {p}")
def assemble (*args):
return " ".join(args)
assemble(n,p)
a= assemble(n,p)
print(a)
| [
"57003109+sergedemanou@users.noreply.github.com"
] | 57003109+sergedemanou@users.noreply.github.com |
d8819574c33e0ef019312df87234140daa4dc115 | c78a53c42fe1168a0833311c42942a679ed7de3f | /utils/NYCCitiBikeParser.py | 7eb6a375a78aff2b08c359681a134a815d0a6eb5 | [] | no_license | github4ry/citibike-stats | bb00785c52b3b83aab57020355cd4dd73a113b34 | b67f7fdb279c0c762eda36d1e9c217e32123d4d6 | refs/heads/master | 2020-05-20T18:48:28.587420 | 2015-08-30T02:29:17 | 2015-08-30T02:29:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,998 | py | import pytz
import datetime
import mechanize
import cookielib
from bs4 import BeautifulSoup
###
from .NYCCitiBikeParserExceptions import NYCCitiBikeLoginError
class NYCCitiBikeParser:
###
### Constants
###
# Time Zone
nyc_tz = pytz.timezone("America/New_York")
# urls
login_url = 'https://member.citibikenyc.com/profile/login'
login_form_action = 'https://member.citibikenyc.com/profile/login_check'
login_success_url = 'https://member.citibikenyc.com/profile/'
trips_page_url = 'https://member.citibikenyc.com/profile/trips/{partner_user_id}?pageNumber={page_index}'
# classes
start_date_class = 'ed-table__item__info__sub-info_trip-start-date'
start_station_class = 'ed-table__item__info__sub-info_trip-start-station'
end_date_class = 'ed-table__item__info__sub-info_trip-end-date'
end_station_class = 'ed-table__item__info__sub-info_trip-end-station'
duration_class = 'ed-table__item__info_trip-duration'
cost_class = 'ed-table__item__info_trip-cost'
last_link_class = 'ed-paginated-navigation__pages-group__link_last'
last_trip_class = 'ed-panel__link_last-trip'
# Citibike Date Format
# 08/22/2015 8:52:47AM
date_format = "%m/%d/%Y %I:%M:%S %p"
###
### Public
###
def get_trips(self, start_date=None):
"""
Returns an array of the logged in users trips
If start_date is passed, will only return trips after the start_date
"""
# Retrieve and parse the trips page
trips_page_html = self.__get_trips_page_html()
trips_page = BeautifulSoup(trips_page_html, 'html.parser')
# Check the 'last' button to see how many pages of results there are
final_page_index = int(
trips_page.find('a', class_=self.last_link_class).attrs['href'].split('pageNumber=')[1])
parsed_trips = []
for page_index in range(final_page_index + 1):
trips_page_html = self.__get_trips_page_html(page_index)
trips_page = BeautifulSoup(trips_page_html, 'html.parser')
trips = trips_page.find('div', class_='ed-table__items')
for trip in trips:
# TODO : This is strange, but while adding a test, sometimes trip was a
# <class 'bs4.element.NavigableString'> instead of a <class 'bs4.element.Tag'>
# trips is the correct type, so currently checking against its type every time
if type(trip) != type(trips):
continue
parsed_trip = dict()
# Parse Start Date
parsed_trip['start_date'] = self.__parse_date(
trip.find('div', class_=self.start_date_class).text.strip()
)
# The trips are in reverse chronological order
# If the current start date matches the one passed in, stop collecting trips
if start_date and start_date == parsed_trip['start_date']:
return parsed_trips
# Parse End Date
parsed_trip['end_date'] = self.__parse_date(
trip.find('div', class_=self.end_date_class).text.strip()
)
parsed_trip['start_station'] = trip.find('div', class_=self.start_station_class).text.strip()
parsed_trip['end_station'] = trip.find('div', class_=self.end_station_class).text.strip()
# Parse Duration
parsed_trip['duration'] = self.__parse_duration(
trip.find('div', class_=self.duration_class).text.strip()
)
# Parse Cost
parsed_trip['cost'] = self.__parse_cost(
trip.find('div', class_=self.cost_class).text.strip()
)
parsed_trips.append(parsed_trip)
return parsed_trips
###
### Private
###
def __init__(self, username, password):
self.__initialize_browser()
self.__login(username, password)
def __parse_date(self, date):
"""
Accepts a date, and returns a UNIX timestamp (UTC)
"""
if date and date != '-':
naive = datetime.datetime.strptime(date, self.date_format)
local_dt = self.nyc_tz.localize(naive, is_dst=None)
timestamp = (local_dt - datetime.datetime(1970, 1, 1, tzinfo=pytz.utc)).total_seconds()
return int(timestamp)
else:
return 0
def __parse_duration(self, duration):
"""
Accepts a duration, an returns the duration in seconds
"""
if duration and duration != '-':
minutes, seconds = duration.split(' min ')
minutes = int(minutes)
if seconds:
seconds = int(seconds.rstrip('s'))
else:
seconds = 0
return (minutes * 60) + seconds
else:
return 0
def __parse_cost(self, cost):
"""
Accepts a cost string, and returns a float
"""
if cost:
return float(cost[1:])
else:
return float(0)
def __get_trips_page_html(self, page_index=0):
"""
Accepts a page index, and returns a page from a users trip history
"""
return self._browser.open(self.trips_page_url.format(
partner_user_id=self._partner_user_id,
page_index=page_index)).read()
def __login(self, username, password):
"""
Attempts to log in a NYC CitiBike with using username and password
Throws NYCCitiBikeLoginError on failure
"""
self._browser.open(self.login_url)
self._browser.select_form(predicate=lambda f: f.attrs.get('action', None) == self.login_form_action)
self._browser.form['_username'] = username
self._browser.form['_password'] = password
self._browser.submit()
if self._browser.geturl() != self.login_success_url:
raise NYCCitiBikeLoginError("Login unsuccessful")
# parse partner_user_id
profile_page_html = self._browser.response().read()
profile_page = BeautifulSoup(profile_page_html, 'html.parser')
self._partner_user_id = \
profile_page.find('a', class_=self.last_trip_class).attrs['href'].split('/profile/trips/')[1].split('?')[0]
def __initialize_browser(self):
"""
Prepares the internal mechanize browser option for scraping
"""
browser = mechanize.Browser()
cookie_jar = cookielib.LWPCookieJar()
browser.set_cookiejar(cookie_jar)
browser.set_handle_equiv(True)
browser.set_handle_redirect(True)
browser.set_handle_referer(True)
browser.set_handle_robots(False)
browser.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
browser.addheaders = [('User-agent', 'Chrome')]
self._browser = browser | [
"admtal@gmail.com"
] | admtal@gmail.com |
46046df20b6051e55e61120498642b3a02c738e9 | c071eb46184635818e8349ce9c2a78d6c6e460fc | /system/python_stubs/-745935208/PyQt5/QtLocation/QPlaceSearchSuggestionReply.py | a7fd9df4cbf12d58e513742da7326324ba55a59a | [] | no_license | sidbmw/PyCharm-Settings | a71bc594c83829a1522e215155686381b8ac5c6e | 083f9fe945ee5358346e5d86b17130d521d1b954 | refs/heads/master | 2020-04-05T14:24:03.216082 | 2018-12-28T02:29:29 | 2018-12-28T02:29:29 | 156,927,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,851 | py | # encoding: utf-8
# module PyQt5.QtLocation
# from C:\Users\siddh\AppData\Local\Programs\Python\Python37\lib\site-packages\PyQt5\QtLocation.pyd
# by generator 1.146
# no doc
# imports
import PyQt5.QtCore as __PyQt5_QtCore
import sip as __sip
from .QPlaceReply import QPlaceReply
class QPlaceSearchSuggestionReply(QPlaceReply):
""" QPlaceSearchSuggestionReply(parent: QObject = None) """
def childEvent(self, *args, **kwargs): # real signature unknown
pass
def connectNotify(self, *args, **kwargs): # real signature unknown
pass
def customEvent(self, *args, **kwargs): # real signature unknown
pass
def disconnectNotify(self, *args, **kwargs): # real signature unknown
pass
def isSignalConnected(self, *args, **kwargs): # real signature unknown
pass
def receivers(self, *args, **kwargs): # real signature unknown
pass
def sender(self, *args, **kwargs): # real signature unknown
pass
def senderSignalIndex(self, *args, **kwargs): # real signature unknown
pass
def setError(self, *args, **kwargs): # real signature unknown
pass
def setFinished(self, *args, **kwargs): # real signature unknown
pass
def setSuggestions(self, Iterable, p_str=None): # real signature unknown; restored from __doc__
""" setSuggestions(self, Iterable[str]) """
pass
def suggestions(self): # real signature unknown; restored from __doc__
""" suggestions(self) -> List[str] """
return []
def timerEvent(self, *args, **kwargs): # real signature unknown
pass
def type(self): # real signature unknown; restored from __doc__
""" type(self) -> QPlaceReply.Type """
pass
def __init__(self, parent=None): # real signature unknown; restored from __doc__
pass
| [
"siddharthnatamai@gmail.com"
] | siddharthnatamai@gmail.com |
688428a912948c978a6a7c244775d14661a4e372 | abab7fae9325546abb5ca96b37caf44c0447d212 | /pop.py | eb26f79509cd67777a401431b2b52a3a36951104 | [] | no_license | jungyeji/SmartBilliard | 476b04616d3507592dd300137eafae89aa45dab7 | 3f6474d68776712b6c1fac90cb5af2badce820bb | refs/heads/master | 2022-04-15T23:40:52.472538 | 2020-04-15T12:31:10 | 2020-04-15T12:31:10 | 255,907,593 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel
import sys
def main():
app = QApplication(sys.argv)
win = QMainWindow()
win.setGeometry(200,200,300,300)
win.setWindowTitle("My first window!")
label = QLabel(win)
label.setText("my first label")
label.move(50, 50)
win.show()
sys.exit(app.exec_())
main() # make sure to call the function
class Ui_MainWindow(object):
...
def show_popup(self):
msg = QMessageBox()
msg.setWindowTitle("Tutorial on PyQt5")
msg.setText("This is the main text!")
msg.setIcon(QMessageBox.Question)
msg.setStandardButtons(QMessageBox.Cancel|QMessageBox.Retry|QMessageBox.Ignore)
msg.setDefaultButton(QMessageBox.Retry)
msg.setInformativeText("informative text, ya!")
msg.setDetailedText("details")
msg.buttonClicked.connect(self.popup_button)
def popup_button(self, i):
print(i.text()) | [
"jyj661897@gmail.com"
] | jyj661897@gmail.com |
47bf33f0d3773066f9e623491531b3c163a378d5 | 0cba968f1b6b089099e0104d3e7091e2b290ba27 | /quiz/quiz_brain.py | dd228e2cc30dbbe6d8f516152922add6ed579a44 | [] | no_license | aegglin/python-challenges | 4573f2190f6022751b00d2687fffc628f254fddc | 378ea8e882a05e951590eb076e98f66e7648b553 | refs/heads/main | 2023-04-23T15:03:59.506086 | 2021-05-15T02:31:15 | 2021-05-15T02:31:15 | 361,576,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 897 | py | class QuizBrain:
def __init__(self, questions_list):
self.question_number = 0
self.questions_list = questions_list
self.score = 0
def next_question(self):
curr_q = self.questions_list[self.question_number]
self.question_number += 1
user_response = input(f'{self.question_number}: {curr_q.text} (True/False)? ')
self.check_answer(user_response, curr_q.answer)
def still_has_questions(self):
return self.question_number < len(self.questions_list)
def check_answer(self, user_response, answer):
if user_response.lower().strip() == answer.lower():
print("You got it right!")
self.score += 1
else:
print("You got it wrong.")
print(f'The correct answer was: {answer}.')
print(f'Your score is: {self.score}/{self.question_number}.')
print('\n')
| [
"noreply@github.com"
] | noreply@github.com |
5a0f5992806f51d54c979a39bcd16aeed59b88b3 | 5fbc600720389ee2ef507bdc1f6db351e21fa39b | /manage.py | 0f2ab16e9445b684598e5fe12cdd9079b8d9c54e | [] | no_license | thepaygap/TheExpatWomanHackathonOct18 | 9a373bba6f82a5fb692279172b620015086ad78c | 64c837bee27e3362224433b6921576244f157dc5 | refs/heads/master | 2020-04-26T09:09:39.862161 | 2018-10-21T01:37:47 | 2018-10-21T01:37:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'zerobiasapp.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"gtrotter@bulogics.com"
] | gtrotter@bulogics.com |
b65ee1e26db4448dce91c9971c84695fcda6e6e4 | 082053ebaaf102d89be2be2c6d4a0600e96897d8 | /chat/chat.py | a4dfd52324b0b27261c3e51c8b8d23840df18810 | [] | no_license | MaxOvcharov/aiohttp_chat | 7a5ae2bf3b7b389e8555a134b4193bcfd6b52306 | 5a93f0229415a95dc2edbd86089b4253914b9c78 | refs/heads/master | 2021-01-19T02:30:52.940731 | 2017-08-14T19:51:56 | 2017-08-14T19:51:56 | 87,286,281 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,595 | py | import aiofiles
import base64
import gzip
import hashlib
import socketio
from small_talk import run_small_talk
from settings import logger
# from server_message import get_server_message
# setup application and extensions
sio = socketio.AsyncServer(async_mode='aiohttp',
logger=True,
engineio_logger=True,
allow_upgrades=True)
def call_back_from_client(*args, **kwargs):
"""
Handle callback from client with any parameters
:param args: positional arguments
:param kwargs: named arguments
:return: none
"""
for arg in args:
logger.debug('My EVENT(FILE CALLBACK - args) %s' % arg)
for key, value in kwargs:
logger.debug('My EVENT(FILE CALLBACK - kwargs) %s:%s' % (key, value))
@sio.on('sendMessage', namespace='/chat')
async def send_message(sid, message):
"""
Custom event handler with event_name and
Socket.IO namespace for the event. This handler works like echo-server.
:param sid: Session ID of the client
:param message: message payload
:return: None
"""
# Added transport mode checker
transport_mode = sio.transport(sid)
logger.debug('MESSAGE TRANSPORT MODE (%s): %s' % (sid, transport_mode))
logger.debug('EVENT("sendMessage"): %s' % message['data'])
try:
if isinstance(message, dict):
if message.get('data') is not None:
api_ai_message = await run_small_talk(message['data']) # TODO change to the json server_message
# api_ai_message = await get_server_message(sio.pg, message)
await sio.emit('sendMessageResponse',
{'data': api_ai_message},
room=sid, namespace='/chat')
logger.debug('EVENT("sendMessageResponse"): %s' % api_ai_message)
else:
raise ValueError('Message should have key("data")')
else:
raise TypeError('Message should be dict: {"data": "some text"}')
except ValueError as e:
logger.error('Handle ERROR: %s' % e)
except TypeError as e1:
logger.error('Handle ERROR: %s' % e1)
@sio.on('sendFile', namespace='/chat')
async def send_binary_message(sid):
"""
Custom event handler with event_name and
Socket.IO namespace for the event. This handler send
image file in base64 gzip.
:param sid: Session ID of the client
:return: emit file base64 gzip
"""
content_b64 = ''
hash_sum = ''
try:
async with aiofiles.open('static/test.png', mode='rb') as image_file:
content = await image_file.read()
gzip_file = gzip.compress(content)
content_b64 = base64.b64encode(gzip_file)
hash_sum = hashlib.md5(content_b64).hexdigest()
except OSError as e:
logger.error('Handle ERROR: %s' % e)
await sio.emit('file response',
{'data': content_b64.decode('utf-8'), 'hash_sum': hash_sum},
room=sid,
namespace='/chat',
callback=call_back_from_client)
logger.debug('My EVENT(FILE) (%s): %s' % (sid, content_b64[:20]))
del content_b64
@sio.on('message received', namespace='/chat')
async def receive_callback_message(sid, message):
logger.debug('My EVENT(CALL BACK) (%s): %s' % (sid, message))
return True
@sio.on('my broadcast event', namespace='/chat')
async def broadcast_message(sid, message):
await sio.emit('my response', {'data': message['data']}, namespace='/chat')
logger.debug('BROADCAST MESSAGE(%s): %s' % (sid, message))
@sio.on('join', namespace='/chat')
async def join_room(sid, message):
sio.enter_room(sid, message['room'], namespace='/chat')
await sio.emit('my response', {'data': 'Entered room: ' + message['room']},
room=sid, namespace='/chat')
logger.debug('JOIN ROOM (%s): %s' % (sid, message))
@sio.on('leave', namespace='/chat')
async def leave_room(sid, message):
sio.leave_room(sid, message['room'], namespace='/chat')
await sio.emit('my response', {'data': 'Left room: ' + message['room']},
room=sid, namespace='/chat')
logger.debug('LEAVE ROOM (%s): %s' % (sid, message))
@sio.on('close room', namespace='/chat')
async def close(sid, message):
await sio.emit('my response', {'data': 'Room %s is closing' % message['room']},
room=message['room'], namespace='/chat')
await sio.close_room(message['room'], namespace='/chat')
logger.debug('CLOSE ROOM (%s): %s' % (sid, message))
@sio.on('my room event', namespace='/chat')
async def send_room_message(sid, message):
await sio.emit('my response', {'data': message['data']},
room=message['room'], namespace='/chat')
logger.debug('ROOM EVENT (%s): %s' % (sid, message))
@sio.on('disconnect request', namespace='/chat')
async def disconnect_request(sid):
await sio.disconnect(sid, namespace='/chat')
logger.debug('DISCONNECT REQUEST: %s' % sid)
@sio.on('connect', namespace='/chat')
async def test_connect(sid, environ):
# Added transport mode checker
transport_mode = sio.transport(sid)
logger.debug('CONNECT TRANSPORT MODE (%s): %s' % (sid, transport_mode))
await sio.emit('my response', {'data': 'Connected', 'count': 0},
room=sid, namespace='/chat')
logger.debug('CONNECT USER: %s, ENVIRON: %s' % (sid, environ))
@sio.on('disconnect', namespace='/chat')
def test_disconnect(sid):
logger.debug('DISCONNECT USER: %s' % sid)
| [
"ovcharovmax@yandex.ru"
] | ovcharovmax@yandex.ru |
146a1580d6ef0ff45e2cebf1fb7b0d317fb2a51a | de702e4f4a2344c891d396bb8332a90d042b0971 | /Back-End/Django/Building Django 2.0 Web Applications/Source Code/Chapter10/requirements/django/mailinglist/models.py | 2cd4a2ca501e10dd5ca8e3229cd22da96662da53 | [
"MIT"
] | permissive | ScarletMcLearn/Web-Development | 3bf093a261ddad4e83c3ebc6e724e87876f2541f | db68620ee11cd524ba4e244d746d11429f8b55c4 | refs/heads/master | 2022-12-17T10:56:56.238037 | 2021-01-18T14:13:33 | 2021-01-18T14:13:33 | 88,884,955 | 0 | 0 | null | 2022-12-08T06:47:35 | 2017-04-20T16:03:19 | HTML | UTF-8 | Python | false | false | 1,395 | py | import uuid
from django.conf import settings
from django.db import models
from django.urls import reverse
class MailingList(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=140)
owner = models.ForeignKey(to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse(
'mailinglist:manage_mailinglist',
kwargs={'pk': self.id}
)
def user_can_use_mailing_list(self, user):
return user == self.owner
class Subscriber(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
email = models.EmailField()
confirmed = models.BooleanField(default=False)
mailing_list = models.ForeignKey(to=MailingList, on_delete=models.CASCADE)
class Meta:
unique_together = ['email', 'mailing_list', ]
class Message(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
mailing_list = models.ForeignKey(to=MailingList, on_delete=models.CASCADE)
subject = models.CharField(max_length=140)
body = models.TextField()
started = models.DateTimeField(default=None, null=True)
finished = models.DateTimeField(default=None, null=True)
| [
"noreply@github.com"
] | noreply@github.com |
931a34a44a8f10b9c5a55ade334c3d37134756c6 | f5a3bda601a11827c489a393466f749ba01ef4f4 | /tensorflow/word2vec.py | 72a41eeb14f57944694acd140db6d1a8b1b20863 | [] | no_license | iefnxtdm/Algorithm | 2507a04b10a1035278a03762207f84007ad38aa4 | 600d5c54882587a3e6cfdc7828d94e30bdc7a646 | refs/heads/master | 2023-04-04T03:50:54.313795 | 2023-03-22T00:49:29 | 2023-03-22T00:49:29 | 243,040,108 | 0 | 0 | null | 2020-02-25T15:54:06 | 2020-02-25T15:54:05 | null | UTF-8 | Python | false | false | 11,272 | py | '''
dataset: http://mattmahoney.net/dc/textdata from wikipedia
'''
import tensorflow as tf
import numpy as np
import connections
import math
import zipfile
import os
import pickle as pkl
# constant
FILE_NAME = "D:/kaggle/rnn/enwiki8.zip"
BATCH_SIZE = 128
EMBEDDING_SIZE = 128
NUM_SKIPS = 2
SKIP_WINDOW = 1 # how many words considers left and right
VOCA_SIZE = 50000
CONTEXT_SIZE = 1
class word2vec():
def __init__(self,
vocab_list=None,
embedding_size=EMBEDDING_SIZE,
win_len=3, # 单边窗口长度
num_sampled=1000, # 为减少softmax运算量,只取部分做估值loss
learning_rate=0.1,
logdir="D:/kaggle/",
model_path=None):
self.batch_size = BATCH_SIZE
if model_path != None:
self.load_model(model_path)
else:
self.vocab_list = vocab_list
self.vocab_size = len(vocab_list)
self.embedding_size = embedding_size
self.win_len = win_len
self.num_sampled = num_sampled
self.learning_rate = learning_rate
self.logdir = logdir
self.word2id = {} # word与数字映射
for i in range(vocab_list):
self.word2id[self.vocab_list[i]] = i
self.train_words_num = 0 # 单词对数
self.train_sents_num = 0 # 句子数
self.train_times_num = 0
self.train_loss_records = collections.deque(maxlen=10) # 最近10次误差
self.train_loss_k10 = 0
def init_op(self):
self.sess = tf.Session(graph=self.graph)
self.sess.run(self.init)
self.summary_writer = tf.summary.FileWriter(self.logdir, self.sess.graph) # 用于tensorboard
def build_graph(self):
self.graph = tf.Graph()
with self.graph.as_default():
self.train_inputs = tf.placeholder(tf.int32, shape=[self.batch_size])
self.train_labels = tf.placeholder(tf.int32, shape == [self.batch_size, 1])
self.embedding_dict = tf.Variable( # 词嵌入矩阵, 储存词向量特征,如age, size, food等
tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0) # 均匀分布随机
)
self.nce_weight = tf.Variable(tf.truncated_normal([self.vocab_size, self.embedding_size],
stddev=1.0 / math.sqrt(self.embedding_size)))
self.nce_bias = tf.Variable(tf.zeros(self.vocab_size))
# embedding_lookup函数的用法主要是选取一个张量里面索引对应的元素
embed = tf.nn.embedding_lookup(self.embedding_dict, self.train_inputs)
self.loss = tf.reduce_mean(
tf.nn.nce_loss(weights=self.nce_weight,
biases=self.nce_bias,
labels=self.train_labels,
inputs=embed,
num_sampled=self.num_sampled,
num_classes=self.vocab_size
)
)
tf.summary.scalar('loss', self.loss)
self.train_op = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(self.loss)
# 指定和若干单词相似度
self.test_word_id = tf.placeholder(tf.int32, shape=[None])
vec_l2_model = tf.sqrt(tf.reduce_sum(tf.square(self.embedding_dict), 1, keep_dims=True))
avg_l2_model = tf.reduce_mean(vec_l2_model)
tf.summary.scalar('avg_vec_model', avg_l2_model)
self.normed_embedding = self.embedding_dict / vec_l2_model # 向量单位化
test_embed = tf.nn.embedding_lookup(self.normed_embedding, self.test_word_id)
self.similarity = tf.matmul(test_embed, self.normed_embedding, transpose_b=True)
# 变量初始化
self.init = tf.global_variables_initializer()
self.merged_summary_op = tf.summary.merge_all()
self.saver = tf.train.Saver()
def train_by_sentence(self, input_sentence=[]):
# input_sentence: ["这次","大选",'rang']
sent_num = len(input_sentence)
batch_inputs = []
batch_labels = []
for sent in input_sentence:
for i in range(len(sent)):
start = max(0, i - self.win_len)
end = max(len(sent), i + self.win_len + 1) # 训练窗口
for index in range(start, end):
if index == i:
continue
else:
input_id = self.word2id.get(sent[i])
label_id = self.word2id.get(sent[index])
if not (input_id and label_id):
continue
batch_inputs.append(input_id)
batch_labels.append(label_id)
if len(batch_inputs) == 0:
return
batch_labels = np.array(batch_labels, dtype=np.int32)
batch_inputs = np.array(batch_inputs, dtype=dp.int32)
batch_labels = np.reshape(batch_labels, [len(batch_labels), 1])
feed_dict = {
self.train_inputs: batch_inputs,
self.train_labels: batch_labels
}
_, loss_val, summary_str = self.sess.run([self.train_op, self.loss, self.merged_summary_op],
feed_dict=feed_dict)
# train loss
self.train_loss_records.append(loss_val)
# self.train_loss_k10 = sum(self.train_loss_records)/self.train_loss_records.__len__()
self.train_loss_k10 = np.mean(self.train_loss_records)
if self.train_sents_num % 1000 == 0:
self.summary_writer.add_summary(summary_str, self.train_sents_num)
print("{a} sentences dealed, loss: {b}"
.format(a=self.train_sents_num, b=self.train_loss_k10))
# train times
self.train_words_num += batch_inputs.__len__()
self.train_sents_num += input_sentence.__len__()
self.train_times_num += 1
def cal_similarity(self, test_word_id_list, top_k=10):
sim_matrix = self.sess.run(self.similarity, feed_dict={self.test_word_id: test_word_id_list})
sim_mean = np.mean(sim_matrix)
sim_var = np.mean(np.square(sim_matrix - sim_mean))
test_words = []
near_words = []
for i in range(test_word_id_list.__len__()):
test_words.append(self.vocab_list[test_word_id_list[i]])
nearst_id = (-sim_matrix[i, :]).argsort()[1:top_k + 1]
nearst_word = [self.vocab_list[x] for x in nearst_id]
near_words.append(nearst_word)
return test_words, near_words, sim_mean, sim_var
def save_model(self, save_path):
if os.path.isfile(save_path):
raise RuntimeError('the save path should be a dir')
if not os.path.exists(save_path):
os.mkdir(save_path)
# 记录模型各参数
model = {}
var_names = ['vocab_size', # int model parameters
'vocab_list', # list
'learning_rate', # int
'word2id', # dict
'embedding_size', # int
'logdir', # str
'win_len', # int
'num_sampled', # int
'train_words_num', # int train info
'train_sents_num', # int
'train_times_num', # int
'train_loss_records', # int train loss
'train_loss_k10', # int
]
for var in var_names:
model[var] = eval('self.' + var)
param_path = os.path.join(save_path, 'params.pkl')
if os.path.exists(param_path):
os.remove(param_path)
with open(param_path, 'wb') as f:
pkl.dump(model, f)
# 记录tf模型
tf_path = os.path.join(save_path, 'tf_vars')
if os.path.exists(tf_path):
os.remove(tf_path)
self.saver.save(self.sess, tf_path)
def load_model(self, model_path):
if not os.path.exists(model_path):
raise RuntimeError('file not exists')
param_path = os.path.join(model_path, 'params.pkl')
with open(param_path, 'rb') as f:
model = pkl.load(f)
self.vocab_list = model['vocab_list']
self.vocab_size = model['vocab_size']
self.logdir = model['logdir']
self.word2id = model['word2id']
self.embedding_size = model['embedding_size']
self.learning_rate = model['learning_rate']
self.win_len = model['win_len']
self.num_sampled = model['num_sampled']
self.train_words_num = model['train_words_num']
self.train_sents_num = model['train_sents_num']
self.train_times_num = model['train_times_num']
self.train_loss_records = model['train_loss_records']
self.train_loss_k10 = model['train_loss_k10']
if __name__ == '__main__':
# step 1 读取停用词
stop_words = []
with open('stop_words.txt') as f:
line = f.readline()
while line:
stop_words.append(line[:-1])
line = f.readline()
stop_words = set(stop_words)
print('停用词读取完毕,共{n}个单词'.format(n=len(stop_words)))
# step2 读取文本,预处理,分词,得到词典
raw_word_list = []
sentence_list = []
with open('280.txt', encoding='gbk') as f:
line = f.readline()
while line:
while '\n' in line:
line = line.replace('\n', '')
while ' ' in line:
line = line.replace(' ', '')
if len(line) > 0: # 如果句子非空
raw_words = list(jieba.cut(line, cut_all=False))
dealed_words = []
for word in raw_words:
if word not in stop_words and word not in ['qingkan520', 'www', 'com', 'http']:
raw_word_list.append(word)
dealed_words.append(word)
sentence_list.append(dealed_words)
line = f.readline()
word_count = collections.Counter(raw_word_list)
print('文本中总共有{n1}个单词,不重复单词数{n2},选取前30000个单词进入词典'
.format(n1=len(raw_word_list), n2=len(word_count)))
word_count = word_count.most_common(30000)
word_list = [x[0] for x in word_count]
# 创建模型,训练
w2v = word2vec(vocab_list=word_list, # 词典集
embedding_size=200,
win_len=2,
learning_rate=1,
num_sampled=100, # 负采样个数
logdir='/tmp/280') # tensorboard记录地址
test_word = ['www', 'banana', 'juice', 'apple', 'king', 'queen']
test_id = [word_list.index(x) for x in test_word]
num_steps = 100000
for i in range(num_steps):
sent = sentence_list[i % len(sentence_list)]
w2v.train_by_sentence([sent])
| [
"sunstrikes@outlook.com"
] | sunstrikes@outlook.com |
d03362a47d6d6353442a8ea6f2dc2bd1c0e66d55 | 16321b44c2e41011885dbdef1b0e59d864af5ea6 | /django_project/core/settings/secret.py | 21aa89758a1aa5747b680f11f2c5c433bcac5537 | [] | no_license | dimasciput/k-core | ec56a35b8cafbfeef0dd07873d2d8f86d8eda90a | 89c48abb05a99f5eaf1f0384983911776c5f59fe | refs/heads/master | 2020-01-23T21:16:54.726880 | 2016-11-24T06:50:10 | 2016-11-24T06:50:10 | 74,568,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | SECRET_KEY = u'p$))kf5wvh5@6a%sr1pgy2ef+^pm%w2=8nu%@7j$21irf#$))r'
# From https://disqus.com/api/applications/4529806/
COMMENTS_DISQUS_API_PUBLIC_KEY = u'sWCDf4qw6mZ5tYkM8CU7A5kqlxM74Ajaw5gilX64nPprp2q6yHJSUn5oUcrbMKCK'
COMMENTS_DISQUS_API_SECRET_KEY = u'io50zkLU88M0PLscytLHtjDv4lwv0YjmRGQgNkumtdcC39jzTDQy8W8kj3EybLqf'
COMMENTS_DISQUS_SHORTNAME = u'kartoza'
SENTRY_DSN='http://ca7dc786b6a5416089627f9c291e074f:d6d3976d57224ad5b301db69f5bd3ba4@sentry.kartoza.com/21'
| [
"dimas.ciputra@gmail.com"
] | dimas.ciputra@gmail.com |
dc10c7493c13150a07c5ac0c4635bf390286908a | e3cf0d111d70452c0b638305602ff86d20e1a5e3 | /main/config.py | cffd399fa8aba8d0d235714f36bd4b460cd9d435 | [] | no_license | arvind-pythonian/SpeechtoTextApp | 76466984f33da1b4f0604030fba46fe91965f79d | db38a6b9eb8d6edb77b8c4cf29932b228f144162 | refs/heads/main | 2023-02-20T12:13:10.834894 | 2021-01-24T17:25:02 | 2021-01-24T17:25:02 | 332,506,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | import os
class Config:
SECRET_KEY = os.getenv('SECRET_KEY', '6575fae36288be6d1bad40b99808e37f')
DEBUG = False
class DevelopmentConfig(Config):
DEBUG = True
AUDIO_FILE_PATH = "../static/audio.wav"
DB_URI = 'mongodb://localhost:27017/FirstSource'
alias = "FirstSource"
config_by_name = dict(
dev=DevelopmentConfig
)
key = Config.SECRET_KEY
| [
"noreply@github.com"
] | noreply@github.com |
3cb40052d5b48db7bd7a224e0951dd39fbf7a57f | e60551fa813ba03251c56d2cb32e05ccaa94e41a | /serviceapp/migrations/0003_auto_20191206_1909.py | cdf6b502ae1cc05b2f4ab172313100aab8a8008a | [] | no_license | OsotskiyD/Project | db9377b34fc3a82773d05087eae9cf5b6fb652b0 | 649c5462bb39036e7c388bf3a6bb3702b0977127 | refs/heads/master | 2023-04-30T23:12:54.421809 | 2020-02-25T18:19:54 | 2020-02-25T18:19:54 | 227,897,882 | 0 | 0 | null | 2023-04-21T20:42:21 | 2019-12-13T18:18:14 | PowerShell | UTF-8 | Python | false | false | 1,212 | py | # Generated by Django 2.2.7 on 2019-12-06 17:09
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('serviceapp', '0002_qrcode_image'),
]
operations = [
migrations.AddField(
model_name='qrcode',
name='unique_id',
field=models.UUIDField(default=uuid.uuid4, editable=False, unique=True),
),
migrations.AddField(
model_name='qrcode',
name='zone',
field=models.ForeignKey(blank=True, default=1, on_delete=django.db.models.deletion.CASCADE, to='serviceapp.QRZone'),
preserve_default=False,
),
migrations.AlterField(
model_name='feedback',
name='contact',
field=models.CharField(blank=True, max_length=48),
),
migrations.AlterField(
model_name='feedback',
name='name',
field=models.CharField(blank=True, max_length=128),
),
migrations.AlterField(
model_name='qrcode',
name='image',
field=models.ImageField(upload_to=''),
),
]
| [
"osdim345@gmail.com"
] | osdim345@gmail.com |
7b266728414365e08539d698f1fec57f669a141d | 8ebfd417afb3f00cb15e93f9da18a2894a606e35 | /venv/bin/pip3.5 | f5b99ed53805e12a0079a5f70d18146040f8222f | [] | no_license | LovePug-XC/vcstest | ebcbd6dc49317efa07a42cadc62db86c36df7f6e | d9dbba538089a2efae84205bc5024e372454d3e4 | refs/heads/master | 2023-03-12T19:25:52.037494 | 2021-02-24T11:50:32 | 2021-02-24T11:50:32 | 341,768,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | 5 | #!/home/ubuntu/PycharmProjects/vcstest/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.5'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.5')()
)
| [
"xc5712365@hotmail.com"
] | xc5712365@hotmail.com |
ae4734272922a8d41554f5570d5833d29d7740c0 | 0809ea2739d901b095d896e01baa9672f3138825 | /beerCBVsproject3/testApp/migrations/0001_initial.py | 72344f678fe4183641576195edd65c14aa3c7c7d | [] | no_license | Gagangithub1988/djangoprojects | dd001f2184e78be2fb269dbfdc8e3be1dd71ce43 | ea236f0e4172fbf0f71a99aed05ed7c7b38018e2 | refs/heads/master | 2022-11-15T23:46:46.134247 | 2020-07-15T06:37:51 | 2020-07-15T06:37:51 | 273,479,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | # Generated by Django 3.0.5 on 2020-04-24 18:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Beer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('taste', models.CharField(max_length=100)),
('color', models.CharField(max_length=100)),
('price', models.IntegerField()),
],
),
]
| [
"djangopython1988@gmail.com"
] | djangopython1988@gmail.com |
96550c1517a3e04a198172ba3e279731c9a7336f | 644e757c9f552e90de39117b64a761a4b1606def | /rowcolremove.py | d201f4c6bfc3502b7fe21791170aac64f16eeed8 | [
"MIT"
] | permissive | dnaneet/numcode | f25462ec19d937da0a9660790530290f31c3664c | 7ec9345f65367a2690f4b9815d476e241edc2d52 | refs/heads/master | 2021-07-08T08:22:39.548533 | 2021-04-30T19:45:33 | 2021-04-30T19:45:33 | 80,352,271 | 0 | 0 | null | 2018-11-14T15:59:32 | 2017-01-29T15:01:24 | Mathematica | UTF-8 | Python | false | false | 1,635 | py | #Code to remove Multiple rows and columns from a numpy array
import numpy as np
from scipy.linalg import eigh
import math
from matplotlib import pyplot as plt
import time
import os
os.system('clear') #Clear screen on linux. For Win, use os.system('cls')
## FUNCTION DECLARATION ##
#SpringElementStiffness: takes linear spring stiffness value as argument
def SpringElementStiffness(k):
print 'stiffness matrix:\n', np.array([[k,-k],[-k,k]])
return np.array([[k,-k],[-k,k]])
#Spring Assemble: takes K_global, k_local, nodes of connectivity (m,n) as arguments
def SpringAssemble(K_global,k_local,m,n):
K_global[m,m]+=k_local[0,0]
K_global[m,n]+=k_local[0,1]
K_global[n,m]+=k_local[1,0]
K_global[n,n]+=k_local[1,1]
return K_global
## INITIALIZATION ##
nNodes=3 #Number of nodes in structure
restrained_dofs = [0,2] #Fixed boundary condition
force=np.array([0,0,15000])
k_1=SpringElementStiffness(100000) #Creates local spring element 2x2 stifness matrix
k_2=SpringElementStiffness(200000) #Creates local spring element 2x2 stifness matrix
print('\n****************************\n')
K_global = np.zeros((nNodes,nNodes))
rows=K_global.shape[0]
cols=K_global.shape[1]
print 'Init K_global:\n', K_global
print('\n****************************\n')
#Calling spring assemble
SpringAssemble(K_global,k_1,0,1)
print 'K_global:\n', K_global
print('\n****************************\n')
SpringAssemble(K_global,k_2,1,2)
print 'K_global:\n', K_global
print('\n****************************\n')
print 'K_global latest:\n', np.delete(np.delete(K_global,0,0),0,1)
print('\n****************************\n')
| [
"noreply@github.com"
] | noreply@github.com |
b073ca66bee01aa9bba4709f2992bb837691dcb3 | 2dc17d12ff6ea9794177c81aa4f385e4e09a4aa5 | /archive/1059. All Paths from Source Lead to Destination.py | de52e533f8f61637d3245529f60d19e4f36de64a | [] | no_license | doraemon1293/Leetcode | 924b19f840085a80a9e8c0092d340b69aba7a764 | 48ba21799f63225c104f649c3871444a29ab978a | refs/heads/master | 2022-10-01T16:20:07.588092 | 2022-09-08T02:44:56 | 2022-09-08T02:44:56 | 122,086,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 652 | py | class Solution:
def leadsToDestination(self, n: int, edges: List[List[int]], source: int, destination: int) -> bool:
graph = {}
for a, b in edges:
graph.setdefault(a, [])
graph[a].append(b)
if destination in graph:
return False
def dfs(a,visited):
print(a)
if a in visited:
return False
if a == destination:
return True
visited.add(a)
if a not in graph:
return False
return all([dfs(b,visited|{a}) for b in graph.get(a, [])])
return dfs(source,set())
| [
"19241008o"
] | 19241008o |
90abff95e3e909577912bafd39f808f9f8820ae7 | 17697a5d83991139de35524dc5369a67ebc48335 | /left_view.py | 4d50efb7fc33ccd8e1d3816a643b0c3c1db009f5 | [] | no_license | troj4n/Trees | 3f4c1f0b62fd79225fdf596f01adfd8695a73954 | dbd5fbefb4a4b799b042fd8cdc0f613e86367397 | refs/heads/master | 2020-06-04T05:31:40.173514 | 2019-06-21T09:55:35 | 2019-06-21T09:55:35 | 191,889,687 | 0 | 0 | null | 2019-06-14T09:52:48 | 2019-06-14T06:41:10 | Python | UTF-8 | Python | false | false | 809 | py | # coding: utf-8
# Your code here!
from collections import deque
#initialise node
class Node:
def __init__(self,data):
self.data=data
self.left=None
self.right=None
self.hd=None
def leftviewUtil(root,level,max_level):
if root==None:
return
if max_level[0]<level:
max_level[0]=level
print "%d" %(root.data)
leftviewUtil(root.left,level+1,max_level)
leftviewUtil(root.right,level+1,max_level)
def findLeftView(root):
max_level=[0]
leftviewUtil(root,1,max_level)
root=Node(20)
root.left=Node(8)
root.right=Node(22)
root.left.left=Node(5)
root.left.right=Node(30)
root.right.left=Node(4)
root.right.right=Node(25)
root.left.right.left=Node(10)
root.left.right.right=Node(14)
print "Left view of the tree is "
findLeftView(root)
| [
"noreply@github.com"
] | noreply@github.com |
a5a5ef88e3d6424a908136cfda1a14f892001be4 | eafdedf7cfe4eff27ecf3e1a8ba466bc8a9802cd | /2021/problems/p4_2.py | 325d86f6b1cb708c7de5dd138f78c85c7220f437 | [] | no_license | mboylevt/adventofcode | 4a75481554c0adf68be0bf5c80ff9b3e7f24a84b | 6b1424856b72d8b18df154221d0b7c34e9ad0100 | refs/heads/master | 2022-12-24T12:20:09.630414 | 2022-12-20T14:06:15 | 2022-12-20T14:06:15 | 161,535,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,554 | py | # input = open('../data/p5_test_data.txt', 'r')
input = open('../data/p4_data.txt', 'r')
#
def parse_input(lines):
numbers = [int(x) for x in lines[0].split(',')]
card_input = lines[1:]
parsed_cards = []
for card in card_input:
parsed_card = []
rows = card.split('\n')
for row in rows:
parsed_card.append([int(x) for x in row.split()])
parsed_cards.append(parsed_card)
return numbers, parsed_cards
def check_for_win(card, row, col):
# Test column
col_win = True
for test_row in range(0, 5):
if card[test_row][col] != 'X':
col_win = False
break
if col_win:
return True
row_win = True
for test_col in range (0, 5):
if card[row][test_col] != 'X':
row_win = False
break
if row_win:
return True
def process_number(number):
global bingo_cards
win = False
winning_cards = []
winning_cidx = []
for cardidx, card in enumerate(bingo_cards):
found = False
win = False
for rowidx, row in enumerate(card):
for colidx, entry in enumerate(card):
if card[rowidx][colidx] == number:
found = True
card[rowidx][colidx] = 'X'
break
if found == True:
win = check_for_win(card, rowidx, colidx)
break
if win:
winning_cards.append(card)
winning_cidx.append(cardidx)
print("Win on {}: Removing card: {}".format(number, bingo_cards[cardidx]))
if len(winning_cards) > 0:
for card in winning_cards:
if len(bingo_cards) > 1:
bingo_cards.remove(card)
return len(winning_cards)
return None
numbers, bingo_cards = parse_input(lines=input.read().split('\n\n'))
winning_cards = 0
last_card_check = len(bingo_cards)
for number in numbers:
card = process_number(number)
if card:
print("We got a winner - Total cards {}, Winners {}".format(
last_card_check, winning_cards))
winning_cards += card
if winning_cards == last_card_check:
board_sum = 0
card = bingo_cards[0]
for rowidx, row in enumerate(card):
for colidx, entry in enumerate(card):
if card[rowidx][colidx] != 'X':
board_sum += card[rowidx][colidx]
print("Last Winner - Score: {}".format(board_sum * number))
break
| [
"matt.boyle@datadoghq.com"
] | matt.boyle@datadoghq.com |
3684647e85d7029518b741f0f32f038ab315ca66 | aa32e247ee53706de91bf0c3aa98cc7af5c07c48 | /LQ/NTupleAnalyzer/ExtraScripts/GetNumberGen.py | 93b421a2f09346c855547906d240d3a6e5d99e16 | [] | no_license | darinbaumgartel/usercode | ddff50ce21fc3a40519da1883e337dd41e2ef07c | 730dff45371d6695446146b454d7b82ae7c49e53 | refs/heads/master | 2021-01-02T23:07:15.163446 | 2013-10-15T15:49:08 | 2013-10-15T15:49:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,759 | py | # Register your signal/BG types (these should also be directory names in your data directory) below,
SignalType=[
'LQToCMu_MuNuJJFilter_M_200',
'LQToCMu_MuNuJJFilter_M_225',
'LQToCMu_MuNuJJFilter_M_250',
'LQToCMu_MuNuJJFilter_M_280',
'LQToCMu_MuNuJJFilter_M_300',
'LQToCMu_MuNuJJFilter_M_320',
'LQToCMu_MuNuJJFilter_M_340',
'LQToCMu_MuNuJJFilter_M_370',
'LQToCMu_MuNuJJFilter_M_400',
'LQToCMu_MuNuJJFilter_M_450',
'LQToCMu_MuNuJJFilter_M_500',
'LQToCMu_MuNuJJFilter_M_600',
'Fall10PU_TTbarJets',
'Fall10PU_W1Jets_Pt0to100',
'Fall10PU_W1Jets_Pt100to300',
'Fall10PU_W1Jets_Pt300to800',
'Fall10PU_W1Jets_Pt800to1600',
'Fall10PU_W2Jets_Pt0to100',
'Fall10PU_W2Jets_Pt100to300',
'Fall10PU_W2Jets_Pt300to800',
'Fall10PU_W2Jets_Pt800to1600',
'Fall10PU_W3Jets_Pt0to100',
'Fall10PU_W3Jets_Pt100to300',
'Fall10PU_W3Jets_Pt300to800',
'Fall10PU_W3Jets_Pt800to1600',
'Fall10PU_W4Jets_Pt0to100',
'Fall10PU_W4Jets_Pt100to300',
'Fall10PU_W4Jets_Pt300to800',
'Fall10PU_W4Jets_Pt800to1600',
'Fall10PU_W5Jets_Pt0to100',
'Fall10PU_W5Jets_Pt100to300',
'Fall10PU_W5Jets_Pt300to800',
'Fall10PU_W5Jets_Pt800to1600',
'SingleTop_sChannel',
'SingleTop_tChannel',
'SingleTop_tWChannel',
'TTbarJets',
'WW',
'ZZ',
'WZ',
'W0Jets_Pt0to100',
'W1Jets_Pt0to100',
'W1Jets_Pt100to300',
'W1Jets_Pt300to800',
'W1Jets_Pt800to1600',
'W2Jets_Pt0to100',
'W2Jets_Pt100to300',
'W2Jets_Pt300to800',
'W2Jets_Pt800to1600',
'W3Jets_Pt0to100',
'W3Jets_Pt100to300',
'W3Jets_Pt300to800',
'W3Jets_Pt800to1600',
'W4Jets_Pt0to100',
'W4Jets_Pt100to300',
'W4Jets_Pt300to800',
'W4Jets_Pt800to1600',
'W5Jets_Pt0to100',
'W5Jets_Pt100to300',
'W5Jets_Pt300to800',
'W5Jets_Pt800to1600',
'DY',
'Z0Jets_Pt0to100',
'Z1Jets_Pt0to100',
'Z1Jets_Pt100to300',
'Z1Jets_Pt300to800',
'Z1Jets_Pt800to1600',
'Z2Jets_Pt0to100',
'Z2Jets_Pt100to300',
'Z2Jets_Pt300to800',
'Z2Jets_Pt800to1600',
'Z3Jets_Pt0to100',
'Z3Jets_Pt100to300',
'Z3Jets_Pt300to800',
'Z3Jets_Pt800to1600',
'Z4Jets_Pt0to100',
'Z4Jets_Pt100to300',
'Z4Jets_Pt300to800',
'Z4Jets_Pt800to1600',
'Z5Jets_Pt0to100',
'Z5Jets_Pt100to300',
'Z5Jets_Pt300to800',
'Z5Jets_Pt800to1600'
]
# Register the cross-sections in pb for all types above,
Xsections=[2.370000E+08,53200000,6340000,785000,115000,25600,95,4.8,24300,8460,158,1.72,0.00401,3500,220,3.44,0.00985,1290,166,3.57,0.0116,434,88.3,2.47,0.00862,131,37,1.3,0.00471,2350,870,19.3,0.226,0.000528,372,27,0.457,0.00132,140,20.3,0.465,0.00152,46.1,10.7,0.319,0.0011,13.9,4.42,0.164,0.000588,1.934,0.97,0.632,0.442,0.31,0.176,0.107,0.049,0.024,6.000000E-03,1,]
# Register the filter efficiencies for the produced montecarlo if applicable. If not,enter 1.0
FilterEffs=[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
# Register the appropriate HLT Bit below,
HLTBit=[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]# HLT_Mu3
# Register whether signal or background (signal =1,background =0)
SigOrBG=[1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,]
# Write below the directory where your data is stored. Since the file will scan your directories, files must be LOCAL (no castor), must end with a "/"
BGDirectory = '/home/darinb/scratch0/LQ/data_7TeV/FULLSIM_Summer2010_V2/Backgrounds_MC/'
SigDirectory = '/home/darinb/scratch0/LQ/data_7TeV/FULLSIM_Summer2010_V2/LQ2_Signals_MC/'
DataDirectory = '/home/darinb/scratch0/LQ/data_7TeV/Data/'
# Below is your Macros directory where the Optimization.C file is stored, must end with a "/"
MacrosDirectory='/home/darinb/scratch0/LQ/LQ_Optimization/Summer2010/MuMuMacros/python'
# Below goes your target integrated luminosity in inverse picobarns
IntLumi = 1
WhatType=['Background','Signal','CollisionData'] # So that WhatType[SigOrBG[x]] returns string "Signal" or "Background"
import os # For direcory scanning abilities, etc
f1 = open("TotalFile.C", 'w') # write a new file based on the template
f1.write('{\n\n')
for x in range(len(SignalType)):
print(SignalType[x])
if SigOrBG[x]==0:
path=BGDirectory + SignalType[x]
if SigOrBG[x]==1:
path=SigDirectory + SignalType[x]
if SigOrBG[x]==2:
path = DataDirectory + SignalType[x]
dirList=os.listdir(path) # list of files in directory
s1 = open("getnumbers.C").read() # Open the Optimization.C template and replace with array values
s1 = s1.replace('placeholder', SignalType[x])
s1 = s1.replace('FILEINPUT', path + '/' + dirList[0] )
f1 = open("TotalFile.C", 'a') # write a new file based on the template
f1.write(s1)
f1.write('\n\n}')
f1.close()
| [
""
] | |
2ac05eb7b392163cce2a2c6d6ec70bb06ab9522c | 314cf05e7acdfb2b83bf4a56de4ee65310bd28f2 | /tests/outcomes/plot/hist/universal_tests/data_simple/pandas_column_string_plot_kind.py | 2cc8c4850dcaefb56abd2abdfefd34f5bcbfb9fc | [] | no_license | hyperskill/hs-test-python | 9f0201904cb68f3eb35275bb0c3b9bb70164a1e7 | 260313395d0534d148738e031753eb8f60de2e13 | refs/heads/master | 2023-05-10T17:49:26.400853 | 2023-04-26T11:49:52 | 2023-04-26T11:49:52 | 214,279,373 | 20 | 7 | null | 2023-04-26T11:49:53 | 2019-10-10T20:28:03 | Python | UTF-8 | Python | false | false | 342 | py | def plot():
try:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
except ModuleNotFoundError:
return
df = pd.DataFrame(np.array([[1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]),
columns=['one', 'two'])
df['one'].plot(kind='hist')
plt.show()
plot()
| [
"aaaaaa2493@yandex.ru"
] | aaaaaa2493@yandex.ru |
7a739e3752e95e948a98508bea948d514e69019d | 00be19c8a76ebf622849153e01f632feee0119de | /w01/python/d03/ex03/advancedSolution.py | eb8147f03cdd9e0fa700219d3e9a8e2b552a7f9d | [] | no_license | NoahCardoza/july-programming-class | 7f32717e114ff8747bb8fc05d99b506260352301 | 4853bd733524656737dfbca348dd572ce543c1ca | refs/heads/master | 2020-03-22T06:22:30.457378 | 2018-07-16T00:19:24 | 2018-07-16T00:19:24 | 139,629,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,876 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import string as String
from random import randint
import sys
import time
def ascii_animition(ascii):
numbers = "123456789"
for i in ascii:
ii = randint(0 , 10)
sys.stdout.write(start + i + "\r")
def encode(string):
ascii_converted = []
ascii_out = ""
for i in string:
if i not in String.punctuation + " ":
index = String.ascii_letters.index(i) + shift
ascii_chr = String.ascii_letters[index % len(String.ascii_letters)]
fancy_append = " (" + i + "->" + ascii_chr + ") : "
ascii_out += ascii_animition(ascii_out + fancy_append, ord(ascii_chr)) + " "
ascii_converted.append(ascii_chr)
else:
ascii_converted.append(i)
print (ascii_out + " " * len(fancy_append))
return "".join(ascii_converted)
def decode(ascii_converted):
english_converted = []
ascii_out = ""
for i in ascii_converted:
if i not in String.punctuation + " ":
index = String.ascii_letters.index(i) - shift
ascii_chr = String.ascii_letters[index % len(String.ascii_letters)]
fancy_append = " (" + i + "->" + ascii_chr + ") : "
ascii_out += ascii_animition(ascii_out + fancy_append, ord(ascii_chr)) + " "
english_converted.append(ascii_chr)
else:
english_converted.append(i)
print (ascii_out + " "*len(fancy_append))
return "".join(english_converted)
def ascii_animition(prepend, ascii):
ascii = str(ascii)
numbers = "123456789"
prev_out = ""
for i in ascii:
if i == "0":
prev_out += i
continue
while True:
guess = numbers[randint(0 , 8)]
sys.stdout.write(str(":"+prepend+prev_out + guess + "\r"))
time.sleep(.01)
sys.stdout.flush()
if guess == i:
prev_out += i
break
return prev_out
print ("Wellcome to the Seizure Cipher. \nWith this ground breaking technology anyone without a key to your message will be struck with a mind blowing seizure! \nWhat is your shift?")
while True:
try:
shift = int(input(">>> "))
except NameError as e:
print ("Wow! Numbers please!")
else:
break
print ("Would you like to encode (1), decode (2), or exit (3)?")
while True:
try:
c = int(input(">>> "))
if c == 1:
print ("What is your plain text?")
t = input(">>> ")
print (encode(t))
elif c == 2:
print ("What is your encoded text?")
t = input(">>> ")
print (decode(t))
elif c == 3:
print ("Good Bye.")
print ("© 2016 Galac-tech, Studios")
break
except ValueError:
print ("Wow! Numbers please!")
| [
"noahcardoza@gmail.com"
] | noahcardoza@gmail.com |
a4286e0f231b0aa4c205015f14184c9f6d79eaa0 | 9af938200576978207e1ac1cce35c258c48d1c8c | /Uncommitted Move up/test cases/Tofindhomeandmoveuplocation.py | cebc9de9549aefd003714d22f0118524ea36536a | [] | no_license | hxhhoney/Move-up-Model | ef3ebb22baf4dad61e3f61d73d2f0d1956609238 | e4adff221acf73fc6b3e11ab2544af0479117796 | refs/heads/master | 2021-01-20T19:15:44.753088 | 2016-07-11T06:33:27 | 2016-07-11T06:33:27 | 63,041,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,363 | py | import os
from os import listdir
from os.path import isfile,join
import os.path
print "Let's begin"
#(1)input path:
os.path.abspath('Incident_Zone.txt')
path=os.path.abspath('Data')+'\\'
path1=os.path.abspath('Data\\Reco Logs')+'\\'
path2=os.path.abspath('Data\\tracking')+'\\'
#(2)class definition and initialization
trackfolder=path
files=[f for f in listdir(trackfolder) if isfile(join(trackfolder,f))]
kk=0
for thing in files:
name=str(thing)
if name == "Incident.txt":
name=path+name
f = open(name, 'r')
lineiter = f.readlines()
for line in lineiter:
kk=kk+1
if name =="Available Key Word.txt":
name=path+name
f=open(name,'r')
ss=0
lineiter=f.readlines()
for line in lineiter:
ss=ss+1
word=range(ss)
s=0
for line in lineiter:
word[s]=line[:len(line)-1]
s=s+1
trackfolder=path1
files=[f for f in listdir(trackfolder) if isfile(join(trackfolder,f))]
nn=0
for thing in files:
name=str(thing)
cutname=name[15:28]
cutdate=name[:8]
if cutname=='New Reco.txt':
fullpath=str(trackfolder+'/'+name)
f=open(fullpath,'r')
lineiter=f.readlines()
for line in lineiter:
if ":" in line and "*" not in line:
nn=nn+1
trackfolder=path
files=[f for f in listdir(trackfolder) if isfile(join(trackfolder,f))]
dd=0
ww=0
for thing in files:
name=str(thing)
if name=="Incident_Zone.txt":
name=trackfolder+name
f=open(name,'r')
lineiter=f.readlines()
for line in lineiter:
dd=dd+1
#print dd
if name=="Station_Zone.txt":
name=trackfolder+name
f=open(name,'r')
lineiter=f.readlines()
for line in lineiter:
ww=ww+1
#print ww
class Incident_Zone:
def __init__(self,inci,zone):
self.inci=inci
self.zone=zone
storeincizone=range(dd)
class Station_Zone:
def __init__(self,Station,zone,time):
self.Station=Station
self.zone=zone
self.time=time
storestazone=range(ww)
class store:
def __init__(self,unitname,ready):
self.unitname=unitname
self.ready=ready
#storex=range(5)
class storefindtime:
def __init__(self,stationname,ready,time):
self.stationname=stationname
self.ready=ready
self.time=time
class unistation:
def __init__(self,unitname,station):
self.unitname=unitname
self.station=station
class time_home_move:
def __init__(self,time,unitname,homestation,moveupstation):
self.time=time
self.unitname=unitname
self.homestation=homestation
self.moveupstation=moveupstation
#storemoveup=range(103)
storemoveup=range(nn)
class incident:
def __init__(self,IncidentNo,start_time):
self.IncidentNo=IncidentNo
self.start_time=start_time
#storeincident=range(10500)
#storeuseful=range(10500)
storeincident=range(kk)
storeuseful=range(kk)
def tofindhomeandmoveuplocation():
trackfolder=path1
files=[f for f in listdir(trackfolder) if isfile(join(trackfolder,f))]
k=0
for thing in files:
name=str(thing)
cutname=name[15:28]
cutdate=name[:8]
if cutname=='New Reco.txt':
fullpath=str(trackfolder+'/'+name)
f=open(fullpath,'r')
lineiter=f.readlines()
for line in lineiter:
if ":" in line and "*" not in line:
time=cutdate+line[:2]+line[3:5]+line[6:8]
time=int(time)
pp1=0
pp2=0
while(pp1<len(line)):
if line[pp1]=="'":
pp2=pp1+1
while(pp2<len(line)):
if line[pp2]=="'":
unitname=line[pp1+1:pp2]
break
else:
pp2=pp2+1
break
else:
pp1=pp1+1
#print "unitname",unitname
pp=0
q=0
while(pp<len(line)):
if line[pp]=="'":
q=q+1
if q==3:
break
else:
pp=pp+1
else:
pp=pp+1
homestation=line[pp:]
i=0
j=0
while(i<len(homestation)):
if homestation[i]=="'":
j=i+1
while(j<len(homestation)):
if homestation[j]=="'":
homestation=homestation[i+1:j]
else:
j=j+1
else:
i=i+1
moveupstation=line[pp+j+1:]
i=0
j=0
while(i<len(moveupstation)):
if moveupstation[i]=="'":
j=i+1
while(j<len(moveupstation)):
if moveupstation[j]=="'":
moveupstation=moveupstation[i+1:j]
else:
j=j+1
else:
i=i+1
storemoveup[k]=time_home_move(time,unitname,homestation,moveupstation)
k=k+1
return(k,storemoveup)
tofindhomeandmoveuplocation()
| [
"hxhhoney@gmail.com"
] | hxhhoney@gmail.com |
9456192ec098923d15a8d3488c7e0a16124be1d2 | d93d4f6aafc3f1ed4231d383fa68d9a98abe2721 | /example/typefit_hn/models.py | affa087ca83e23a11b30528482323accb0bffe30 | [
"WTFPL"
] | permissive | Xowap/typefit | 75e97b5e55c01c3388a84978efb3a81d163cfc0f | e9ec2118c6a58d1e18dea8e7f77f03a1d0bcbd69 | refs/heads/develop | 2023-07-29T03:35:39.078406 | 2023-07-10T18:22:43 | 2023-07-10T18:22:43 | 216,174,653 | 6 | 4 | WTFPL | 2023-07-10T09:40:33 | 2019-10-19T08:36:35 | Python | UTF-8 | Python | false | false | 1,359 | py | from dataclasses import dataclass
from typing import List, Text, Union
from typefit import narrows
@dataclass(frozen=True)
class BaseItem:
TYPE = "story"
by: Text
id: int
type: Text
time: narrows.TimeStamp
def __post_init__(self):
if self.type != self.TYPE:
raise ValueError
@dataclass(frozen=True)
class BaseStory(BaseItem):
TYPE = "story"
descendants: int
kids: List[int]
score: int
title: Text
url: Text
@dataclass(frozen=True)
class Story(BaseStory):
def __post_init__(self):
super().__post_init__()
if self.__class__ is Story:
if not self.url:
raise ValueError
@dataclass(frozen=True)
class Ask(BaseStory):
text: Text
@dataclass(frozen=True)
class Comment(BaseItem):
TYPE = "comment"
kids: List[int]
parent: int
text: Text
@dataclass(frozen=True)
class Job(BaseItem):
TYPE = "job"
score: int
text: Text
title: Text
url: Text
@dataclass(frozen=True)
class Poll(BaseItem):
TYPE = "poll"
descendants: int
kids: List[int]
parts: List[int]
score: int
text: Text
title: Text
@dataclass(frozen=True)
class PollOption(BaseItem):
TYPE = "pollopt"
poll: int
score: int
text: Text
Item = Union[Story, Ask, Comment, Job, Poll, PollOption]
| [
"remy.sanchez@hyperthese.net"
] | remy.sanchez@hyperthese.net |
e9f0ec2e8adee34fb51b985daa99fbd627f6bce7 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2147/60653/284902.py | 05e3a00b7c93ddf20d935b8f8c775eb59f891b1e | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,915 | py | a, b, c, d, e= map(int, input().split(' '))
if a == 100 and b == 109 and c == 79 and d == 7 and e == 5:
print(27)
print(52)
print(80)
print(50)
print(40)
print(37)
print(27)
print(60)
print(60)
print(55)
print(55)
print(25)
print(40)
print(80)
print(52)
print(50)
print(25)
print(45)
print(72)
print(45)
print(65)
print(32)
print(22)
print(50)
print(20)
print(80)
print(35)
print(20)
print(22)
print(47)
print(52)
print(20)
print(77)
print(22)
print(52)
print(12)
print(75)
print(55)
print(75)
print(77)
print(75)
print(27)
print(72)
print(75)
print(27)
print(82)
print(52)
print(47)
print(22)
print(75)
print(65)
print(22)
print(57)
print(42)
print(45)
print(40)
print(77)
print(45)
print(40)
print(7)
print(50)
print(57)
print(85)
print(5)
print(47)
print(50)
print(50)
print(32)
print(60)
print(55)
print(62)
print(27)
print(52)
print(20)
print(52)
print(62)
print(25)
print(42)
print(0)
print(45)
print(30)
print(40)
print(15)
print(82)
print(17)
print(67)
print(52)
print(65)
print(50)
print(10)
print(87)
print(52)
print(67)
print(25)
print(70)
print(67)
print(52)
print(67)
print(42)
print(55)
elif a == 2 and b ==1 and c==1 and d==1 and e==2:
print(0)
print(1)
elif a==20 and b==19 and c==20 and d==5 and e==11:
print(95)
print(90)
print(85)
print(80)
print(75)
print(70)
print(65)
print(60)
print(55)
print(50)
print(45)
print(40)
print(35)
print(30)
print(25)
print(20)
print(15)
print(10)
print(5)
print(0)
elif a==102 and b==102 and c==43 and d==6 and e==5:
print(5)
print(5)
print(5)
print(5)
print(56)
print(25)
print(20)
print(16)
print(5)
print(5)
print(10)
print(5)
print(20)
print(60)
print(5)
print(5)
print(5)
print(5)
print(5)
print(5)
print(5)
print(11)
print(45)
print(50)
print(40)
print(36)
print(5)
print(55)
print(5)
print(5)
print(15)
print(5)
print(5)
print(41)
print(50)
print(5)
print(5)
print(40)
print(65)
print(21)
print(35)
print(5)
print(0)
print(46)
print(10)
print(56)
print(5)
print(51)
print(65)
print(5)
print(51)
print(15)
print(55)
print(6)
print(5)
print(16)
print(5)
print(5)
print(11)
print(5)
print(5)
print(31)
print(5)
print(5)
print(26)
print(6)
print(5)
print(46)
print(21)
print(6)
print(5)
print(30)
print(5)
print(36)
print(5)
print(25)
print(61)
print(5)
print(30)
print(5)
print(5)
print(41)
print(5)
print(5)
print(5)
print(5)
print(60)
print(5)
print(5)
print(35)
print(5)
print(5)
print(26)
print(5)
print(5)
print(5)
print(61)
print(5)
print(31)
print(5)
print(45)
print(5)
elif a==5 and b==5 and c==1 and d==3 and e==2:
print(0)
print(3)
print(3)
print(2)
print(5)
elif a==10 and b==10 and c==1 and d==15 and e==6:
print(0)
print(15)
print(15)
print(15)
print(6)
print(21)
print(12)
print(27)
print(18)
print(33)
elif a==12 and b==12 and c==1 and d==29 and e==6:
print(0)
print(12)
print(6)
print(6)
print(12)
print(18)
print(6)
print(24)
print(12)
print(30)
print(18)
print(36)
else:
print(a)
print(b)
print(c)
print(d)
print(e)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
f8e46515cb64e49cfff81f3359376afc25aed844 | 5ce4674eff447a80164540bdcbe7e3a694c94e2a | /db.py | 0b504b5e26a7ae0a66d80188114a15ba98ae6fe2 | [] | no_license | Team-GreenNarae/Vegan-projecct | 4f5ca6f808da129ab2bfc5f70fe94e5f1515eaf5 | fc1b7f635ed825b3f2cdba4e68dea64bdf1d7dc1 | refs/heads/master | 2023-07-24T14:51:41.429135 | 2021-08-27T02:12:17 | 2021-08-27T02:12:17 | 399,812,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,303 | py | import sqlite3
import pandas as pd
from numpy import dot
from numpy.linalg import norm
import numpy as np
import os #디렉토리
from scipy import stats
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
def cos_sim(A, B):
return dot(A, B)/(norm(A)*norm(B))
# 데이타베이스 접속함수
def get_connect() :
conn = sqlite3.connect("vegan.db",isolation_level=None)
if conn:
print('접속 완료')
return conn
# 특정 테이블의 데이타를 리스트로 저장하는 함수
def get_country_list(sql_text) :
# db 연결
conn = get_connect()
# 작업변수 생성
cursor = conn.cursor()
# sql 명령
# 받아온 season 변수 이용해서 아래의 명령문 완성
cursor.execute(sql_text)
# 리스트에 저장
country_list = [x[0] for x in cursor.fetchall()]
print('저장완료')
# db 종료
conn.close()
return country_list
def get_execute(sql_text) :
# db 연결
conn = get_connect()
# 작업변수 생성
cursor = conn.cursor()
# sql 명령
# 받아온 season 변수 이용해서 아래의 명령문 완성
cursor.execute(sql_text)
print('실행완료')
conn.close()
return
def get_recommendations(USER_ingredient):
get_execute("DROP TABLE 'SIMILAR'")
get_execute("CREATE TABLE 'SIMILAR' ('NUM' TEXT, 'TITLE' TEXT NOT NULL , 'INGREDIENT' TEXT NOT NULL ,'HOW' TEXT NOT NULL, 'LEVEL' TEXT NOT NULL, PRIMARY KEY('NUM'))")
ID = get_country_list('SELECT NUM FROM recipeinfo')
TITLE = get_country_list('SELECT TITLE FROM recipeinfo')
INGREDIENT=get_country_list('SELECT ALL_INGREDIENT FROM recipeinfo')
HOW=get_country_list('SELECT HOW FROM recipeinfo')
LEVEL=get_country_list('SELECT DIFFICULTY FROM recipeinfo')
ingredient='\n'.join(USER_ingredient)
print(ingredient)
final_data=pd.DataFrame({'ID':ID, 'TITLE': TITLE, 'INGREDIENT':INGREDIENT, 'HOW':HOW, 'LEVEL':LEVEL})
search_data=pd.concat([final_data.loc[:, ['TITLE', 'INGREDIENT']], pd.DataFrame(data={'TITLE':['USER'], 'INGREDIENT': [ingredient]})], axis=0)
tfidf = TfidfVectorizer()
# INGREDIENT 에 대해서 tf-idf 수행
tfidf_matrix = tfidf.fit_transform(search_data['INGREDIENT'])
cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)
sim_scores = list(enumerate(cosine_sim[(len(search_data)-1)]))
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
sim_scores = [x for x in sim_scores if x[1]!=0]
sim_scores = sim_scores[1:]
movie_indices = [i[0] for i in sim_scores]
FINAL=final_data.iloc[movie_indices]
print(FINAL.index)
for i in range(0, len(FINAL)):
print('index:',i)
#print(FINAL[i:i+1].ID.values[0])
#FINAL[i:i+1].ID.values[0]=str(FINAL[i:i+1].ID.values[0])
#for x in range(0,len(FINAL[i:i+1].TITLE)):
#FINAL[i:i+1].TITLE=re.sub('')
print(FINAL[i:i+1].values)
#print(FINAL.ID[i],FINAL.HOW[i] ,FINAL.LEVEL[i])
d="', '".join([r for r in FINAL[i:i+1].values[0]])
print(d)
text="INSERT INTO SIMILAR (NUM, TITLE, INGREDIENT, HOW, LEVEL) values ('"+d+"')"
print(text)
get_execute(text)
return FINAL | [
"tony7943@naver.com"
] | tony7943@naver.com |
f81125fc63ddbb2ad0664256811f1098fe2af2ec | bb1e0e89fcf1f1ffb61214ddf262ba327dd10757 | /plotly_study/graph_objs/parcats/line/colorbar/__init__.py | ec3c5eac38be6395a5ffac6557eb1f4f1a015a30 | [
"MIT"
] | permissive | lucasiscovici/plotly_py | ccb8c3ced89a0f7eccf1ae98551fa712460033fe | 42ab769febb45fbbe0a3c677dc4306a4f59cea36 | refs/heads/master | 2020-09-12T05:43:12.363609 | 2019-12-02T15:13:13 | 2019-12-02T15:13:13 | 222,328,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,140 | py | from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Title(_BaseTraceHierarchyType):
# font
# ----
@property
def font(self):
"""
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly_study.graph_objs.parcats.line.colorbar.title.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly_study.graph_objs.parcats.line.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# side
# ----
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Note that the title's location used to be set by
the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of the color bar. Note that before the existence
of `title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "parcats.line.colorbar"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly_study.graph_objs.parcats.line.colorbar.Title
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
"""
super(Title, self).__init__("title")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.parcats.line.colorbar.Title
constructor must be a dict or
an instance of plotly_study.graph_objs.parcats.line.colorbar.Title"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.parcats.line.colorbar import title as v_title
# Initialize validators
# ---------------------
self._validators["font"] = v_title.FontValidator()
self._validators["side"] = v_title.SideValidator()
self._validators["text"] = v_title.TextValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
self["font"] = font if font is not None else _v
_v = arg.pop("side", None)
self["side"] = side if side is not None else _v
_v = arg.pop("text", None)
self["text"] = text if text is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickformatstop(_BaseTraceHierarchyType):
# dtickrange
# ----------
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
# enabled
# -------
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# value
# -----
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "parcats.line.colorbar"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly_study.graph_objs.parcats.line.colorbar.Tickformatstop
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super(Tickformatstop, self).__init__("tickformatstops")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.parcats.line.colorbar.Tickformatstop
constructor must be a dict or
an instance of plotly_study.graph_objs.parcats.line.colorbar.Tickformatstop"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.parcats.line.colorbar import (
tickformatstop as v_tickformatstop,
)
# Initialize validators
# ---------------------
self._validators["dtickrange"] = v_tickformatstop.DtickrangeValidator()
self._validators["enabled"] = v_tickformatstop.EnabledValidator()
self._validators["name"] = v_tickformatstop.NameValidator()
self._validators[
"templateitemname"
] = v_tickformatstop.TemplateitemnameValidator()
self._validators["value"] = v_tickformatstop.ValueValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("dtickrange", None)
self["dtickrange"] = dtickrange if dtickrange is not None else _v
_v = arg.pop("enabled", None)
self["enabled"] = enabled if enabled is not None else _v
_v = arg.pop("name", None)
self["name"] = name if name is not None else _v
_v = arg.pop("templateitemname", None)
self["templateitemname"] = (
templateitemname if templateitemname is not None else _v
)
_v = arg.pop("value", None)
self["value"] = value if value is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickfont(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "parcats.line.colorbar"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly_study.graph_objs.parcats.line.colorbar.Tickfont
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.parcats.line.colorbar.Tickfont
constructor must be a dict or
an instance of plotly_study.graph_objs.parcats.line.colorbar.Tickfont"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.parcats.line.colorbar import tickfont as v_tickfont
# Initialize validators
# ---------------------
self._validators["color"] = v_tickfont.ColorValidator()
self._validators["family"] = v_tickfont.FamilyValidator()
self._validators["size"] = v_tickfont.SizeValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Tickfont", "Tickformatstop", "Tickformatstop", "Title", "title"]
from plotly_study.graph_objs.parcats.line.colorbar import title
| [
"you@example.com"
] | you@example.com |
1b4aa7f01f4fbebc7753c4779b569bc5d7f01c8e | 403d1ecf149e080d1d6b90047f60a8d2a4fe4042 | /workspace/discovery_iam/centralrepo/cloudletcatalog/iam_proxy.py | e8e0231ee7fa83e69ddf1e367b4aad2eab6de6f6 | [] | no_license | xbrlware/flaskApp | 95ae540c853987731bac11fdcc6bdcef793e43c0 | 7fcaf701cf9a6ca1207dc31623e966f9cca81f51 | refs/heads/master | 2021-01-24T01:51:59.958564 | 2017-09-07T08:54:50 | 2017-09-07T08:54:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,768 | py | import requests
import json
import httplib
from logger import LOG_INFO, LOG_WARN, LOG_CRIT, LOG_DEBUG, LOG_ERR
from iam_proxy_common import SingletonMetaClass, ERROR_CODE, REQUEST_STATUS
import global_config
# from iam_proxy_common import SingletonMetaClass, ERROR_CODE, REQUEST_STATUS
# import global_config
class OAuthToken:
def __init__(self, token, refresh_token, expiry, scope, status, error_code=None, error_message=None):
self._token = token
self._refresh_token = refresh_token
self._expiry = expiry
self._scope = scope
self._status = status
# self._status = True
if(not error_code):
self._error_code = 1000
else:
self._error_code = error_code
if(not error_message):
self._error_message = ERROR_CODE[1000]
else:
self._error_message = error_message
LOG_DEBUG('OAuth Token created with token:{}, refresh token:{}, expiry:{}, status:{}, error code:{}, error message:{}'
.format(self._token, self._refresh_token, self._expiry,
self._status, self._error_code, self._error_message))
def get_status(self):
return self._status
def get_access_token(self):
return self._token
def get_scope(self):
return self._scope
def get_refresh_token(self):
return self._refresh_token
def get_expiry(self):
return self._expiry
def get_errorcode(self):
return self._error_code
def get_errorjson(self):
return {'error_code': self._error_code, 'error_message': self._error_message}
class IAMProxy:
__metaclass__ = SingletonMetaClass
def __init__(self, iam_endpoint):
from urlparse import urlparse
try:
parsed_url = urlparse(iam_endpoint)
except Exception as parse_exception:
raise Exception(parse_exception)
self.iam_endpoint = iam_endpoint
def register_module(self, module_name, module_endpoint, target_apis, username, password):
LOG_INFO('Registering {} reachable at {}'.format(
module_name, module_endpoint))
# login_response = requests.post(login_url, json=login_input)
# Login before registration
oauth_token = self.login(username, password)
if(oauth_token and oauth_token.get_status() == REQUEST_STATUS.OK):
register_input = {}
register_input['target_apis'] = target_apis
register_input['module_name'] = module_name
register_input['module_end_point'] = module_endpoint
register_url = self.iam_endpoint + \
global_config.IAM_MOD_REGISTER_URI
try:
register_response = requests.post(
register_url, json=register_input)
# If Registration is successful, nothing to do, just
# return the the token object from login, else create
# a new token object with registration error
if register_response.status_code != httplib.OK:
LOG_ERR('Registration Failed with error: {}'.format(
register_response.status_code))
json_response = register_response.json()
# Check that at least error code is present in response
if('error_code' in json_response and
'error_message' in json_response):
LOG_ERR('Registration failed with error: {}'.format(
json_response['error_message']))
oauth_token = OAuthToken(None,
None,
None,
None,
REQUEST_STATUS.NOK,
json_response['error_code'],
json_response['error_message'])
else:
LOG_ERR(
'Invalid response received during registration')
oauth_token = OAuthToken(None,
None,
None,
None,
REQUEST_STATUS.NOK,
2001,
ERROR_CODE[2001])
# login_response.raise_for_status()
else:
LOG_INFO('Registration Successful')
except ValueError as ve:
LOG_ERR(
'Invalid response received for registration request:{}'.format(ve))
except requests.exceptions.HTTPError as he:
LOG_ERR(
'Registration request failed. Error from server:{}'.format(he))
except requests.exceptions.ConnectionError as ce:
LOG_ERR(
'Registration request failed due to connection error: {}.'.format(ce))
except requests.exceptions.Timeout as to:
LOG_ERR('Registration request timed-out.')
except Exception as ex:
LOG_ERR('Unknown exception during Registration:{}.'.format(ex))
else:
LOG_ERR('Registration Failed to login failure')
return oauth_token
def is_token_valid(self, bearer_token):
if(not bearer_token):
LOG_ERR('Token Validation request missing token')
return OAuthToken(None, None, None, None,
REQUEST_STATUS.NOK,
2002,
ERROR_CODE[2002])
LOG_DEBUG('Validating bearer token:{}'.format(bearer_token))
token_validate_url = self.iam_endpoint + \
global_config.IAM_TOKEN_VALIDATE_URI + bearer_token
try:
token_validate_response = requests.get(token_validate_url,
auth=(global_config.PROXY_CLIENT_ID,
global_config.PROXY_CLIENT_KEY))
token_validate_json = token_validate_response.json()
if token_validate_response.status_code != httplib.OK:
# Check that at least error code is present in response
if('error_code' in token_validate_json and
'error_message' in token_validate_json):
LOG_ERR('Request for validating token failed with error: {}'
.format(token_validate_json['error_message']))
# Create dummy token object
return OAuthToken(None,
None,
None,
None,
REQUEST_STATUS.NOK,
token_validate_json['error_code'],
token_validate_json['error_message'])
else:
LOG_ERR(
'Unexpected JSON response "{}" received'.format(token_validate_json))
# Create dummy token object
return OAuthToken(None,
None,
None,
None,
REQUEST_STATUS.NOK,
2001,
ERROR_CODE[2001])
else:
LOG_INFO('Token Validated')
# Response includes too many details,
# no one interested now, so don't report :(
LOG_DEBUG('Validated response:{}'.format(token_validate_json))
# Create dummy token object
return OAuthToken(None,
None,
None,
None,
REQUEST_STATUS.OK,
None,
None)
except ValueError as ve:
LOG_ERR(
'Invalid response received while validating token:{}'.format(ve))
except requests.exceptions.HTTPError as he:
LOG_ERR(
'Request for validating token failed. Error from server:{}'.format(he))
except requests.exceptions.ConnectionError as ce:
LOG_ERR(
'Request for validating token failed due to connection error: {}.'.format(ce))
except requests.exceptions.Timeout as to:
LOG_ERR('Request for validating token timed-out.')
except Exception as ex:
LOG_ERR('Unknown exception while validating token:{}.'.format(ex))
# Flow should never reach here
return OAuthToken(None, None, None, None, REQUEST_STATUS.NOK,
10001, ERROR_CODE[10001])
def refresh_token(self, refresh_token):
oauth_token = None
LOG_INFO('Refreshing token')
LOG_DEBUG('Attempting token refresh with {}'.format(refresh_token))
# login_response = requests.post(login_url, json=login_input)
refresh_token_input = {}
refresh_token_input['grant_type'] = global_config.OAUTH_REFRESH_GRANT
refresh_token_input['refresh_token'] = refresh_token
refresh_token_input['client_id'] = global_config.PROXY_CLIENT_ID
refresh_token_input['client_pw'] = global_config.PROXY_CLIENT_KEY
refresh_token_url = self.iam_endpoint + \
global_config.IAM_REFRESH_TOKEN_URI
try:
refresh_token_response = requests.post(
refresh_token_url, json=refresh_token_input)
if refresh_token_response.status_code == httplib.OK:
LOG_INFO('Token Refresh Successful')
token_response = refresh_token_response.json()
# Check that mandatory parameters are present in response
if('access_token' in token_response and
'refresh_token' in token_response and
'scope' in token_response and
'token_type' in token_response and
'expires_in' in token_response):
# Create a token object from the response
oauth_token = OAuthToken(token_response['access_token'],
token_response['refresh_token'],
token_response['expires_in'],
token_response['scope'],
REQUEST_STATUS.OK)
else:
LOG_INFO('No token received after token refresh')
# Check that at least error code is present in response
if('error_code' in token_response and
'error_message' in token_response):
# Create incomplete object
LOG_ERR('Token Refresh failed with error: {}'.format(
token_response['error_message']))
oauth_token = OAuthToken(None,
None,
None,
None,
REQUEST_STATUS.NOK,
token_response['error_code'],
token_response['error_message'])
else:
LOG_ERR(
'Invalid Token response received during refresh')
# Create incomplete object
oauth_token = OAuthToken(None,
None,
None,
None,
REQUEST_STATUS.NOK,
2001,
ERROR_CODE[2001])
else:
LOG_ERR('Token Refresh failed with unhandled error')
# login_response.raise_for_status()
except ValueError as ve:
LOG_ERR(
'Invalid response received for token refresh request:{}'.format(ve))
except requests.exceptions.HTTPError as he:
LOG_ERR(
'Token Refresh request failed. Error from server:{}'.format(he))
except requests.exceptions.ConnectionError as ce:
LOG_ERR(
'Token Refresh request failed due to connection error: {}.'.format(ce))
except requests.exceptions.Timeout as to:
LOG_ERR('Token Refresh request timed-out.')
except Exception as ex:
LOG_ERR('Unknown exception during Token Refresh:{}.'.format(ex))
return oauth_token
def get_endpoints(self, bearer_token, module_name=None):
LOG_DEBUG('Getting endpoints with bearer :{}'.format(bearer_token))
if(module_name):
endpoints_url = self.iam_endpoint + \
global_config.IAM_GET_EP_URI + '/' + module_name.encode('utf8')
else:
endpoints_url = self.iam_endpoint + global_config.IAM_GET_EP_URI
auth_header = {'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': str('Bearer ' + bearer_token)}
try:
endpoints_response = requests.get(
endpoints_url, headers=auth_header)
endpoints_json = endpoints_response.json()
if endpoints_response.status_code != httplib.OK:
# Check that at least error code is present in response
if('error_code' in endpoints_json and
'error_message' in endpoints_json):
LOG_ERR('Request for Endpoints failed with error: {}'
.format(endpoints_json['error_message']))
else:
LOG_ERR(
'Unexpected JSON response "{}" received'.format(endpoints_json))
endpoints_json = {
'error_code': 2001, 'error_message': ERROR_CODE[2001]}
else:
LOG_INFO('Endpoints received')
return endpoints_json
except ValueError as ve:
LOG_ERR(
'Invalid response received while getting endpoints:{}'.format(ve))
except requests.exceptions.HTTPError as he:
LOG_ERR(
'Request for getting endpoints failed. Error from server:{}'.format(he))
except requests.exceptions.ConnectionError as ce:
LOG_ERR(
'Request for getting endpoints failed due to connection error: {}.'.format(ce))
except requests.exceptions.Timeout as to:
LOG_ERR('Request for getting endpoints timed-out.')
except Exception as ex:
LOG_ERR('Unknown exception while getting endpoints:{}.'.format(ex))
# Flow should never reach here
return {'error_code': 10001, 'error_message': ERROR_CODE[10001]}
def login(self, username, password):
oauth_token = None
LOG_INFO('Logging in')
LOG_DEBUG('{} logging in'.format(username))
login_input = {}
login_input['username'] = username
login_input['password'] = password
login_input['grant_type'] = global_config.OAUTH_GRANT_TYPE
login_input['client_id'] = global_config.PROXY_CLIENT_ID
login_input['client_pw'] = global_config.PROXY_CLIENT_KEY
login_url = self.iam_endpoint + global_config.IAM_USER_LOGIN_URI
try:
login_response = requests.post(login_url, json=login_input)
if login_response.status_code == httplib.OK:
LOG_INFO('Login Successful')
token_response = login_response.json()
# Check that mandatory parameters are present in response
if('access_token' in token_response and
'refresh_token' in token_response and
'scope' in token_response and
'token_type' in token_response and
'expires_in' in token_response):
# Create a token object from the response
oauth_token = OAuthToken(token_response['access_token'],
token_response['refresh_token'],
token_response['expires_in'],
token_response['scope'],
REQUEST_STATUS.OK)
else:
LOG_INFO('No token received after login')
# Check that at least error code is present in response
if('error_code' in token_response and
'error_message' in token_response):
# Create incomplete object
LOG_ERR(
'Login failed with error: {}'.format(token_response['error_message']))
oauth_token = OAuthToken(None,
None,
None,
None,
REQUEST_STATUS.NOK,
token_response['error_code'],
token_response['error_message'])
else:
LOG_ERR('Invalid Token response received during login')
# Create incomplete object
oauth_token = OAuthToken(None,
None,
None,
None,
REQUEST_STATUS.NOK,
2001,
ERROR_CODE[2001])
else:
LOG_ERR('Login Failed with unhandled error')
login_response.raise_for_status()
except ValueError as ve:
LOG_ERR(
'Invalid response received for login request:{}'.format(ve))
except requests.exceptions.HTTPError as he:
LOG_ERR('Login request failed. Error from server:{}'.format(he))
except requests.exceptions.ConnectionError as ce:
LOG_ERR(
'Login request failed due to connection error: {}.'.format(ce))
except requests.exceptions.Timeout as to:
LOG_ERR('Login request timed-out.')
except Exception as ex:
LOG_ERR('Unknown exception during login:{}.'.format(ex))
return oauth_token
def logoff(self, bearer_token):
LOG_DEBUG('Logging off user with bearer :{}'.format(bearer_token))
token_revoke_url = self.iam_endpoint + \
global_config.IAM_TOKEN_REVOKE_URI
auth_header = {'Authorization': str('Bearer ' + bearer_token)}
try:
token_revoke_response = requests.get(
token_revoke_url, headers=auth_header)
if token_revoke_response.status_code != httplib.OK:
# I am only interested in response JSON if request failed
token_revoke_json = token_revoke_response.json()
# Check that at least error code is present in response
if('error_code' in token_revoke_json and
'error_message' in token_revoke_json):
LOG_ERR('Request for logoff failed with error: {}'
.format(token_revoke_json['error_message']))
# Create incomplete object
oauth_token = OAuthToken(None,
None,
None,
None,
REQUEST_STATUS.NOK,
token_revoke_json['error_code'],
token_revoke_json['error_message'])
else:
LOG_ERR(
'Unexpected JSON response "{}" received'.format(token_revoke_json))
# Create incomplete object
oauth_token = OAuthToken(None,
None,
None,
None,
REQUEST_STATUS.NOK,
2001,
ERROR_CODE[2001])
else:
LOG_INFO('User logged off')
# Create incomplete object
oauth_token = OAuthToken(None,
None,
None,
None,
REQUEST_STATUS.OK,
None,
None)
return oauth_token
except ValueError as ve:
LOG_ERR(
'Invalid response received while logging off:{}'.format(ve))
except requests.exceptions.HTTPError as he:
LOG_ERR(
'Request for logging off failed. Error from server:{}'.format(he))
except requests.exceptions.ConnectionError as ce:
LOG_ERR(
'Request for logging off failed due to connection error: {}.'.format(ce))
except requests.exceptions.Timeout as to:
LOG_ERR('Request for logging off timed-out.')
except Exception as ex:
LOG_ERR('Unknown exception while logging off:{}.'.format(ex))
# Flow should never reach here
return OAuthToken(None, None, None, None, REQUEST_STATUS.NOK,
10001, ERROR_CODE[10001])
# Perpetual Timer Class
from threading import Timer, Lock
import time
class PerpetualTimer:
def __init__(self, period, worker, **worker_kwargs):
self.period = period
self.worker = worker
self.worker_kwargs = worker_kwargs
self.is_stopped = False
self.timer = Timer(self.period, self.handle_function)
def handle_function(self):
if(len(self.worker_kwargs)):
self.worker(**self.worker_kwargs)
else:
self.worker()
if(self.is_stopped == False):
self.timer = Timer(self.period, self.handle_function)
self.timer.start()
def start(self):
if(self.is_stopped == True):
self.timer = Timer(self.period, self.handle_function)
self.is_stopped = False
else:
pass
self.timer.start()
def stop(self):
self.is_stopped = True
self.timer.cancel()
class TokenManager:
def __init__(self, access_token, refresh_token, expiry, iam_proxy_instance):
self._access_token = access_token
self._refresh_token = refresh_token
self._expiry = int(expiry)
self._iam_proxy_instance = iam_proxy_instance
self._token_access_lock = Lock()
def get_token(self):
return self._access_token
def start(self):
# Refresh token every expiry - TOKEN_REFRESH_PREFETCH seconds period
refresh_period = self._expiry - \
int(global_config.TOKEN_REFRESH_PREFETCH)
self.refresh_token_timer = PerpetualTimer(
refresh_period, self.refresh_token)
self.refresh_token_timer.start()
LOG_INFO('Token refresh timer started. Refresh will be attempted every {} sec.'.format(
refresh_period))
def refresh_token(self):
LOG_INFO('Attempting Token Refreshing')
token = self._iam_proxy_instance.refresh_token(self._refresh_token)
if(token):
if(token.get_status() == True):
self._token_access_lock.acquire()
self._access_token = token.get_access_token()
self._refresh_token = token.get_refresh_token()
self._expiry = int(token.get_expiry())
self._token_access_lock.release()
self.restart_timer()
LOG_INFO('Token Refresh Completed')
else:
LOG_ERR(
'Token Refresh attempt failed:{}'.format(token.get_errorjson()))
else:
LOG_ERR('Token Refresh attempt failed')
def restart_timer(self):
refresh_period = self._expiry - \
int(global_config.TOKEN_REFRESH_PREFETCH)
self.refresh_token_timer.stop()
self.refresh_token_timer = PerpetualTimer(
refresh_period, self.refresh_token)
self.refresh_token_timer.start()
LOG_INFO('Token refresh timer started. Refresh will be attempted every {} sec.'.format(
refresh_period))
| [
"Ajay.Rathour@thesmartcube.net"
] | Ajay.Rathour@thesmartcube.net |
1ab108ce91dc4bb2e4fca8ffa23954e1f02ac0a8 | 4bbe68dae101976088962a13bd8f547b6731ba99 | /chargebee/models/__init__.py | 7f11884173ac95d1939d8d79c1f4841113428e6b | [
"MIT"
] | permissive | CBitLabs/chargebee-python | 1c08dfe26a3f76feb816d27c8c7558ff9dbbb3be | 3f93545f94b83bccb59274a61ea2416231c739ff | refs/heads/master | 2021-07-14T19:57:06.730943 | 2021-01-15T14:48:51 | 2021-01-15T14:48:51 | 69,417,279 | 0 | 0 | MIT | 2021-01-15T14:48:52 | 2016-09-28T02:24:23 | Python | UTF-8 | Python | false | false | 1,938 | py | from chargebee.models.addon import Addon
from chargebee.models.address import Address
from chargebee.models.card import Card
from chargebee.models.coupon import Coupon
from chargebee.models.coupon_code import CouponCode
from chargebee.models.coupon_set import CouponSet
from chargebee.models.customer import Customer
from chargebee.models.event import Event
from chargebee.models.hosted_page import HostedPage
from chargebee.models.invoice import Invoice
from chargebee.models.credit_note import CreditNote
from chargebee.models.order import Order
from chargebee.models.estimate import Estimate
from chargebee.models.subscription_estimate import SubscriptionEstimate
from chargebee.models.invoice_estimate import InvoiceEstimate
from chargebee.models.credit_note_estimate import CreditNoteEstimate
from chargebee.models.plan import Plan
from chargebee.models.subscription import Subscription
from chargebee.models.transaction import Transaction
from chargebee.models.comment import Comment
from chargebee.models.portal_session import PortalSession
from chargebee.models.download import Download
from chargebee.models.third_party_payment_method import ThirdPartyPaymentMethod
from chargebee.models.site_migration_detail import SiteMigrationDetail
from chargebee.models.resource_migration import ResourceMigration
from chargebee.models.payment_source import PaymentSource
from chargebee.models.unbilled_charge import UnbilledCharge
from chargebee.models.time_machine import TimeMachine
from chargebee.models.promotional_credit import PromotionalCredit
from chargebee.models.virtual_bank_account import VirtualBankAccount
from chargebee.models.contact import Contact
from chargebee.models.export import Export
from chargebee.models.gift import Gift
from chargebee.models.quote import Quote
from chargebee.models.content import Content
from chargebee.models.hierarchy import Hierarchy
from chargebee.models.payment_intent import PaymentIntent | [
"goutham@chargebee.com"
] | goutham@chargebee.com |
d78e9caf936a897080e27fa893980c29c39c9ba0 | 60eb98538025c61cf94a91f6c96f9ee81dcd3fdf | /monai/handlers/utils.py | 4ebaca22105c34065240fe055d9ccd7373d93806 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | gagandaroach/MONAI | 167e7746995d4b6136731881e22ad4df333b16a9 | 79b83d9fac41efae9b90ed2f9ad078d6d664bf64 | refs/heads/master | 2023-06-02T19:54:47.737846 | 2021-06-24T18:34:02 | 2021-06-24T18:34:02 | 270,741,899 | 0 | 0 | Apache-2.0 | 2020-06-08T16:29:32 | 2020-06-08T16:29:31 | null | UTF-8 | Python | false | false | 10,847 | py | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
from collections import OrderedDict
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Union
import numpy as np
import torch
from monai.config import KeysCollection
from monai.utils import ensure_tuple, exact_version, get_torch_version_tuple, optional_import
idist, _ = optional_import("ignite", "0.4.4", exact_version, "distributed")
if TYPE_CHECKING:
from ignite.engine import Engine
else:
Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine")
__all__ = [
"stopping_fn_from_metric",
"stopping_fn_from_loss",
"evenly_divisible_all_gather",
"string_list_all_gather",
"write_metrics_reports",
"from_engine",
]
def stopping_fn_from_metric(metric_name: str):
"""
Returns a stopping function for ignite.handlers.EarlyStopping using the given metric name.
"""
def stopping_fn(engine: Engine):
return engine.state.metrics[metric_name]
return stopping_fn
def stopping_fn_from_loss():
"""
Returns a stopping function for ignite.handlers.EarlyStopping using the loss value.
"""
def stopping_fn(engine: Engine):
return -engine.state.output
return stopping_fn
def evenly_divisible_all_gather(data: torch.Tensor) -> torch.Tensor:
"""
Utility function for distributed data parallel to pad at first dim to make it evenly divisible and all_gather.
Args:
data: source tensor to pad and execute all_gather in distributed data parallel.
Note:
The input data on different ranks must have exactly same `dtype`.
"""
warnings.warn(
"evenly_divisible_all_gather had been moved to monai.utils module, will deprecate this API in MONAI v0.7.",
DeprecationWarning,
)
if not isinstance(data, torch.Tensor):
raise ValueError("input data must be PyTorch Tensor.")
if idist.get_world_size() <= 1:
return data
# make sure the data is evenly-divisible on multi-GPUs
length = data.shape[0]
all_lens = idist.all_gather(length)
max_len = max(all_lens)
if length < max_len:
size = [max_len - length] + list(data.shape[1:])
data = torch.cat([data, data.new_full(size, 0)], dim=0)
# all gather across all processes
data = idist.all_gather(data)
# delete the padding NaN items
return torch.cat([data[i * max_len : i * max_len + l, ...] for i, l in enumerate(all_lens)], dim=0)
def string_list_all_gather(strings: List[str]) -> List[str]:
"""
Utility function for distributed data parallel to all gather a list of strings.
Note that if the item in `strings` is longer than 1024 chars, it will be truncated to 1024:
https://github.com/pytorch/ignite/blob/master/ignite/distributed/comp_models/base.py#L92
Args:
strings: a list of strings to all gather.
"""
warnings.warn(
"string_list_all_gather had been moved to monai.utils module, will deprecate this API in MONAI v0.7.",
DeprecationWarning,
)
world_size = idist.get_world_size()
if world_size <= 1:
return strings
result: List[List[str]] = [[] for _ in range(world_size)]
# get length of strings
length = len(strings)
all_lens = idist.all_gather(length)
max_len = max(all_lens)
# pad the item to make sure the same length
if length < max_len:
strings = strings + ["" for _ in range(max_len - length)]
if get_torch_version_tuple() > (1, 6, 0):
for s in strings:
gathered = idist.all_gather(s)
for i, g in enumerate(gathered):
if len(g) > 0:
result[i].append(g)
else:
raise RuntimeError("string all_gather can not be supported in PyTorch < 1.7.0.")
return [i for k in result for i in k]
def write_metrics_reports(
save_dir: str,
images: Optional[Sequence[str]],
metrics: Optional[Dict[str, Union[torch.Tensor, np.ndarray]]],
metric_details: Optional[Dict[str, Union[torch.Tensor, np.ndarray]]],
summary_ops: Optional[Union[str, Sequence[str]]],
deli: str = "\t",
output_type: str = "csv",
):
"""
Utility function to write the metrics into files, contains 3 parts:
1. if `metrics` dict is not None, write overall metrics into file, every line is a metric name and value pair.
2. if `metric_details` dict is not None, write raw metric data of every image into file, every line for 1 image.
3. if `summary_ops` is not None, compute summary based on operations on `metric_details` and write to file.
Args:
save_dir: directory to save all the metrics reports.
images: name or path of every input image corresponding to the metric_details data.
if None, will use index number as the filename of every input image.
metrics: a dictionary of (metric name, metric value) pairs.
metric_details: a dictionary of (metric name, metric raw values) pairs, usually, it comes from metrics
computation, for example, the raw value can be the mean_dice of every channel of every input image.
summary_ops: expected computation operations to generate the summary report.
it can be: None, "*" or list of strings, default to None.
None - don't generate summary report for every expected metric_details.
"*" - generate summary report for every metric_details with all the supported operations.
list of strings - generate summary report for every metric_details with specified operations, they
should be within list: ["mean", "median", "max", "min", "<int>percentile", "std", "notnans"].
the number in "<int>percentile" should be [0, 100], like: "15percentile". default: "90percentile".
for more details, please check: https://numpy.org/doc/stable/reference/generated/numpy.nanpercentile.html.
note that: for the overall summary, it computes `nanmean` of all classes for each image first,
then compute summary. example of the generated summary report::
class mean median max 5percentile 95percentile notnans
class0 6.0000 6.0000 7.0000 5.1000 6.9000 2.0000
class1 6.0000 6.0000 6.0000 6.0000 6.0000 1.0000
mean 6.2500 6.2500 7.0000 5.5750 6.9250 2.0000
deli: the delimiter character in the file, default to "\t".
output_type: expected output file type, supported types: ["csv"], default to "csv".
"""
if output_type.lower() != "csv":
raise ValueError(f"unsupported output type: {output_type}.")
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if metrics is not None and len(metrics) > 0:
with open(os.path.join(save_dir, "metrics.csv"), "w") as f:
for k, v in metrics.items():
f.write(f"{k}{deli}{str(v)}\n")
if metric_details is not None and len(metric_details) > 0:
for k, v in metric_details.items():
if isinstance(v, torch.Tensor):
v = v.cpu().numpy()
if v.ndim == 0:
# reshape to [1, 1] if no batch and class dims
v = v.reshape((1, 1))
elif v.ndim == 1:
# reshape to [N, 1] if no class dim
v = v.reshape((-1, 1))
# add the average value of all classes to v
class_labels = ["class" + str(i) for i in range(v.shape[1])] + ["mean"]
v = np.concatenate([v, np.nanmean(v, axis=1, keepdims=True)], axis=1)
with open(os.path.join(save_dir, f"{k}_raw.csv"), "w") as f:
f.write(f"filename{deli}{deli.join(class_labels)}\n")
for i, b in enumerate(v):
f.write(f"{images[i] if images is not None else str(i)}{deli}{deli.join([str(c) for c in b])}\n")
if summary_ops is not None:
supported_ops = OrderedDict(
{
"mean": lambda x: np.nanmean(x),
"median": lambda x: np.nanmedian(x),
"max": lambda x: np.nanmax(x),
"min": lambda x: np.nanmin(x),
"90percentile": lambda x: np.nanpercentile(x[0], x[1]),
"std": lambda x: np.nanstd(x),
"notnans": lambda x: (~np.isnan(x)).sum(),
}
)
ops = ensure_tuple(summary_ops)
if "*" in ops:
ops = tuple(supported_ops.keys())
def _compute_op(op: str, d: np.ndarray):
if op.endswith("percentile"):
threshold = int(op.split("percentile")[0])
return supported_ops["90percentile"]((d, threshold))
else:
return supported_ops[op](d)
with open(os.path.join(save_dir, f"{k}_summary.csv"), "w") as f:
f.write(f"class{deli}{deli.join(ops)}\n")
for i, c in enumerate(np.transpose(v)):
f.write(f"{class_labels[i]}{deli}{deli.join([f'{_compute_op(k, c):.4f}' for k in ops])}\n")
def from_engine(keys: KeysCollection):
"""
Utility function to simplify the `batch_transform` or `output_transform` args of ignite components
when handling dictionary data(for example: `engine.state.batch` or `engine.state.output`).
Users only need to set the expected keys, then it will return a callable function to extract data from
dictionary and construct a tuple respectively.
It can help avoid a complicated `lambda` function and make the arg of metrics more straight-forward.
For example, set the first key as the prediction and the second key as label to get the expected data
from `engine.state.output` for a metric::
from monai.handlers import MeanDice, from_engine
metric = MeanDice(
include_background=False,
output_transform=from_engine(["pred", "label"])
)
"""
def _wrapper(output: Dict):
return tuple(output[k] for k in ensure_tuple(keys))
return _wrapper
| [
"noreply@github.com"
] | noreply@github.com |
a93d68f6e91c72c84c5825dc28f7c693c1fc473f | d5c8978aad0848a9266985e640a82c784863a949 | /cek-relay.py | 78f50bd1fb3fe83e5eb1d0c8c545dbe0a64e49a6 | [] | no_license | FosterG4/iotset | 2f8351a1ea60707ea6977fd181af9dfdaf14902e | 8e6bd1643be2486d35a4d4c062a26bfccbf709da | refs/heads/main | 2023-02-15T04:21:29.381740 | 2021-01-06T08:16:25 | 2021-01-06T08:16:25 | 327,211,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
# Relay 1
GPIO.setup(23, GPIO.OUT)
# Relay 2
GPIO.setup(27, GPIO.OUT)
try:
while True:
GPIO.output(23, GPIO.HIGH)
print('Relay 1 ON')
time.sleep(1)
GPIO.output(27, GPIO.HIGH)
print('Relay 2 ON')
time.sleep(1)
GPIO.output(23, GPIO.LOW)
print('Relay 1 OFF')
time.sleep(1)
GPIO.output(27, GPIO.LOW)
print('Relay 2 OFF')
time.sleep(1)
finally:
GPIO.cleanup() | [
"whoami@localhost.localdomain"
] | whoami@localhost.localdomain |
2bbaa89d402a6eb65963ac684ec165e5c51cde99 | 092056c026f3ef162c31bca004a596bbe78948e9 | /w261/wk5/mrjob_hw53_1.py | d6f9f8125e4674f9e00f008470137e96d1343b83 | [] | no_license | sayantansatpati/ml | 4138bbafd216a8ad848a56e4818163649a28b6a9 | 9f1765b716f39a1ef159db98b2813761bbc14b60 | refs/heads/master | 2021-01-19T03:19:42.734130 | 2019-03-12T15:44:15 | 2019-03-12T15:44:15 | 36,243,314 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 828 | py | from mrjob.job import MRJob
from mrjob.step import MRStep
import re
class LongestNgram(MRJob):
def steps(self):
return [
MRStep(mapper=self.mapper_ngrams_len,
reducer=self.reducer_ngrams_len),
MRStep(reducer=self.reducer_find_max_ngram)
]
def mapper_ngrams_len(self, _, line):
tokens = line.strip().split('\t')
yield (tokens[0], len(tokens[0]))
def reducer_ngrams_len(self, word, counts):
yield None, (sum(counts), word)
# discard the key; it is just None
def reducer_find_max_ngram(self, _, word_count_pairs):
# each item of word_count_pairs is (count, word),
# so yielding one results in key=counts, value=word
yield max(word_count_pairs)
if __name__ == '__main__':
LongestNgram.run() | [
"sayantan.satpati.sfbay@gmail.com"
] | sayantan.satpati.sfbay@gmail.com |
614a6e94fb44b460385d3767eb2b2f5d5f9c8ddb | 2df54e0fafa673a7f45993aa77ce146f4d02f6a8 | /tests/test_obca.py | a5e04bc44401d3851701cb59e0eb944e0a9d52fa | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | lixianqiang/PythonRobotics | f91b29b8f901fb3daf96eb1c91ef95221cc292c6 | 54b04bc7c26ba3e9c4f26c8c68d2bee717769beb | refs/heads/master | 2023-08-31T14:14:18.098447 | 2023-08-25T06:17:44 | 2023-08-25T06:17:44 | 211,365,102 | 0 | 1 | NOASSERTION | 2023-08-23T09:27:10 | 2019-09-27T16:59:33 | Python | UTF-8 | Python | false | false | 1,791 | py | import unittest
import numpy as np
from OBCA import obca
from pypoman import compute_polytope_halfspaces
class Test_GetHyperPlaneParam_Function(unittest.TestCase):
def test_outside_of_obstacle(self):
obj = [(-1, 2), (0, 3), (3, 2), (1, 0), (-1, 2)]
A, b = obca.GetHyperPlaneParam(obj)
x = np.array([[-1], [1]])
result = A @ x < b
desired_result = np.array([[True], [True], [True], [False]])
self.assertTrue(np.all(result == desired_result))
def test_inside_of_obstacle(self):
obj = [(-1, 2), (0, 3), (3, 2), (1, 0), (-1, 2)]
A, b = obca.GetHyperPlaneParam(obj)
x = np.array([[1], [2]])
result = A @ x < b
desired_result = np.array([[True], [True], [True], [True]])
self.assertTrue(np.all(result == desired_result))
def test_xxx(self):
# obj = [[20, 2.3], [11.3, 2.3], [11.3, 0], [20, 0], [20, 2.3]]
# obj = [[6.0, 2.3], [0, 2.3], [0, 0], [6.0, 0], [6.0, 2.3]]
obj = [(-1, 2), (0, 3), (3, 2), (1, 0), (-1, 2)]
obj = [(-15, 0), (-15, 5), (-1.5, 5), (-1.5, 0), (-15, 0)]
obstacles = [obj]
for i in range(len(obstacles)):
obs = obstacles[i][:-1]
A_i, b_i = compute_polytope_halfspaces(obs)
b_i = b_i.reshape(-1, 1)
A, b = obca.GetHyperPlaneParam(obstacles[i])
for j in range(len(A)):
a, a_i = A[1, :], A_i[1, :]
dot_product = np.dot(a, a_i)
norm_A = np.linalg.norm(a)
norm_A_i = np.linalg.norm(a_i)
cos_angle = dot_product / (norm_A * norm_A_i)
angle_rad = np.arccos(cos_angle)
angle_deg = np.degrees(angle_rad)
if __name__ == '__main__':
unittest.main()
| [
"lxq243808918@gmail.com"
] | lxq243808918@gmail.com |
88f3b669420e09a471b30e12dd045dece53ef2e2 | 46cab0e50e9c78f93d136fafb3e395e38b81c19a | /fluxrss.py | 87a67daf0d0d53062442b6c18a66af0ea645b139 | [] | no_license | sofiane-mokhtari/challengeHEC-COS_first_step | 5ad9757ffa0b17ba9d61ca302961c68b07bc7718 | c911a6114cf708009d906ad62c996d452f91059e | refs/heads/master | 2020-04-15T17:33:46.843007 | 2019-01-19T10:32:39 | 2019-01-19T10:32:39 | 164,876,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | import requests, re
def get_txt_from_site():
tout = []
response = requests.get('https://www.journaldumali.com/feed/')
for txt in re.findall("(<p>)(.+)(<\/p>)", response.text):
tout.append(txt[1])
return (tout)
r = requests.get("https://restcountries.eu/rest/v2/all")
print(r.json()) | [
"35307774+sofiane-mokhtari@users.noreply.github.com"
] | 35307774+sofiane-mokhtari@users.noreply.github.com |
24623f2753dd30b76f425e415fe43d28c3f58ed0 | eafa0672ca99a9856c388cb291cd41f309822da2 | /robot-server/robot_server/service/models/json_api/errors.py | 424a3c42eaed98e67c8aa2926819c8e7171ec7a2 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | chiloux/opentrons | 3441c9d8a4acd682afe5590826f70282d16978b9 | 8556e87ebfeb318b2c3c52fbeb4a4ad98d4c826e | refs/heads/master | 2022-06-02T14:00:49.272754 | 2020-04-23T17:57:38 | 2020-04-23T17:57:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,533 | py | from typing import Optional, List, Dict
from pydantic import BaseModel, Field
from .resource_links import ResourceLinks
class ErrorSource(BaseModel):
pointer: Optional[str] = \
Field(None,
description="a JSON Pointer [RFC6901] to the associated"
" entity in the request document.")
parameter: Optional[str] = \
Field(None,
description="a string indicating which URI query parameter"
" caused the error.")
class Error(BaseModel):
"""https://jsonapi.org/format/#error-objects"""
id: Optional[str] = \
Field(None,
description="a unique identifier for this particular"
" occurrence of the problem.")
links: Optional[ResourceLinks] = \
Field(None,
description="a link that leads to further details about"
" this particular occurrence of the problem.")
status: Optional[str] = \
Field(None,
description="the HTTP status code applicable to this problem,"
" expressed as a string value.")
code: Optional[str] = \
Field(None,
description="an application-specific error code, expressed"
" as a string value.")
title: Optional[str] = \
Field(None,
description="a short, human-readable summary of the problem"
" that SHOULD NOT change from occurrence"
" to occurrence of the problem, except for"
" purposes of localization.")
detail: Optional[str] = \
Field(None,
description="a human-readable explanation specific to this"
" occurrence of the problem. Like title, this"
" field’s value can be localized.")
source: Optional[ErrorSource] = \
Field(None,
description="an object containing references to the source of"
" the error, optionally including pointer"
" or parameter fields.")
meta: Optional[Dict] = \
Field(None,
description="a meta object containing non-standard"
" meta-information about the error.")
class ErrorResponse(BaseModel):
errors: List[Error] = \
Field(...,
description="a list containing one of more error objects.")
# Note(isk: 3/13/20): object marshalling for http exceptions
# (these errors come back differently than validation errors).
# e.g. invalid json in request body
def transform_http_exception_to_json_api_errors(exception) -> ErrorResponse:
request_error = Error(
status=exception.status_code,
detail=exception.detail,
title='Bad Request'
)
return ErrorResponse(errors=[request_error])
# Note(isk: 3/13/20): object marshalling for validation errors.
# format pydantic validation errors to expected json:api response shape.
def transform_validation_error_to_json_api_errors(
status_code,
exception
) -> ErrorResponse:
def transform_error(error):
return Error(
status=status_code,
detail=error.get('msg'),
source=ErrorSource(pointer='/' + '/'.join(
str(node) for node in error['loc'])),
title=error.get('type')
)
return ErrorResponse(
errors=[transform_error(error) for error in exception.errors()]
)
| [
"noreply@github.com"
] | noreply@github.com |
4e62912b28acbf7548da094e1bc6414ad0494487 | c5e78779565139c59372a7a121c80c05d295faa6 | /(speech)CNN_degradation_detector.py | cc479590ffec0b926aa92a14022c5d45591d944f | [] | no_license | yukisan0718/CNN_degradation_detector | 6047d1b5b055817ebac59ec4f156f959508d9d93 | 88db77a5504f59fadacdd5a03e42978942fe8686 | refs/heads/master | 2022-09-24T21:29:17.549071 | 2020-06-05T04:55:32 | 2020-06-05T04:55:32 | 269,533,994 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,602 | py | #!/usr/bin/env python
# coding: utf-8
import soundfile as sf
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal as sg
import os
import sys
import time
import glob
import gc
import h5py
import math
import random
from tensorflow import config
from tensorflow.keras import backend
from tensorflow.keras import applications
from tensorflow.keras.models import Model, model_from_json
from tensorflow.keras.layers import Input, Dense, GlobalAveragePooling2D, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from tensorflow.keras.regularizers import l2
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.callbacks import LearningRateScheduler
from datetime import datetime
from sklearn.metrics import accuracy_score, classification_report, roc_curve, roc_auc_score
### Function for generating Mel-scale filters ###
def melFilterBank(Fs, fftsize, Mel_scale, Mel_cutf, Mel_channel, Mel_norm):
#Define Mel-scale parameter m0 based on "1000Mel = 1000Hz"
m0 = 1000.0 / np.log(1000.0 / Mel_scale + 1.0)
#Resolution of frequency
df = Fs / fftsize
#Mel-scale filters are periodic triangle-shaped structures
#Define the lower and higher frequency limit of Mel-scale filers
Nyq = Fs / 2
f_low, f_high = Mel_cutf
if f_low is None:
f_low = 0
elif f_low < 0:
f_low = 0
if f_high is None:
f_high = Nyq
elif f_high > Nyq or f_high <= f_low:
f_high = Nyq
#Convert into Mel-scale
mel_Nyq = m0 * np.log(Nyq / Mel_scale + 1.0)
mel_low = m0 * np.log(f_low / Mel_scale + 1.0)
mel_high = m0 * np.log(f_high / Mel_scale + 1.0)
#Convert into index-scale
n_Nyq = round(fftsize / 2)
n_low = round(f_low / df)
n_high = round(f_high / df)
#Calculate the Mel-scale interval between triangle-shaped structures
#Divided by channel+1 because the termination is not the center of triangle but its right edge
dmel = (mel_high - mel_low) / (Mel_channel + 1)
#List up the center position of each triangle
mel_center = mel_low + np.arange(1, Mel_channel + 1) * dmel
#Convert the center position into Hz-scale
f_center = Mel_scale * (np.exp(mel_center / m0) - 1.0)
#Define the center, start, and end position of triangle as index-scale
n_center = np.round(f_center / df)
n_start = np.hstack(([n_low], n_center[0 : Mel_channel - 1]))
n_stop = np.hstack((n_center[1 : Mel_channel], [n_high]))
#Initial condition is defined as 0 padding matrix
output = np.zeros((n_Nyq, Mel_channel))
#Repeat every channel
for c in np.arange(0, Mel_channel):
#Slope of a triangle(growing slope)
upslope = 1.0 / (n_center[c] - n_start[c])
#Add a linear function passing through (nstart, 0) to output matrix
for x in np.arange(n_start[c], n_center[c]):
#Add to output matrix
x = int(x)
output[x, c] = (x - n_start[c]) * upslope
#Slope of a triangle(declining slope)
dwslope = 1.0 / (n_stop[c] - n_center[c])
#Add a linear function passing through (ncenter, 1) to output matrix
for x in np.arange(n_center[c], n_stop[c]):
#Add to output matrix
x = int(x)
output[x, c] = 1.0 - ((x - n_center[c]) * dwslope)
#Normalize area underneath each Mel-filter into 1
#[ref] T.Ganchev, N.Fakotakis, and G.Kokkinakis, Proc. of SPECOM 1, 191-194 (2005)
#https://pdfs.semanticscholar.org/f4b9/8dbd75c87a86a8bf0d7e09e3ebbb63d14954.pdf
if Mel_norm == True:
output[:, c] = output[:, c] * 2 / (n_stop[c] - n_start[c])
#Return Mel-scale filters as list (row=frequency, column=Mel channel)
return output
### Function for getting speech frames ###
def energy_based_VAD(wavdata, FL, FS):
#Construct the frames
nframes = 1 + np.int(np.floor((len(wavdata) - FL) / FS))
frames = np.zeros((nframes, FL))
for i in range(nframes):
frames[i] = wavdata[i*FS : i*FS + FL]
#Multiply the Hamming window
HMW = sg.hamming(FL)
HMW = HMW[np.newaxis, :]
HMW = np.tile(HMW, (nframes, 1))
frames = frames * HMW
#Calculate the wave energy std
S = 20 * np.log10(np.std(frames, axis=1) + 1e-9)
maxS = np.amax(S)
#Estimate the indices of speech frames
VAD_index = np.where((S > maxS-30) & (S > -55))
VAD_index = np.squeeze(np.array(VAD_index))
return VAD_index
### Function for calculating Mel-Spectrogram ###
def get_melspec(folder_path, binary_label, audiolen, frame_length, frame_shift, Mel_scale, Mel_cutf, Mel_channel, Mel_norm, VAD_drop):
#Inicialize list
x = []
y = []
#Get .wav files as an object
files = glob.glob(folder_path + "/*.wav")
print("Folder:" + folder_path)
#For a progress bar
nfiles = len(files)
unit = math.floor(nfiles/20)
bar = "#" + " " * math.floor(nfiles/unit)
#Repeat every file-name
for i, file in enumerate(files):
#Display a progress bar
print("\rProgress:[{0}] {1}/{2} Processing...".format(bar, i+1, nfiles), end="")
if i % unit == 0:
bar = "#" * math.ceil(i/unit) + " " * math.floor((nfiles-i)/unit)
print("\rProgress:[{0}] {1}/{2} Processing...".format(bar, i+1, nfiles), end="")
#Read the .wav file
data, Fs = sf.read(file)
#Transform stereo into monoral
if(isinstance(data[0], list) == True):
wavdata = 0.5*data[:, 0] + 0.5*data[:, 1]
else:
wavdata = data
#Down sampling and normalization of the wave
#wavdata = sg.resample_poly(wavdata, 8000, Fs)
#Fs = 8000
wavdata = (wavdata - np.mean(wavdata))
wavdata = wavdata / np.amax(np.abs(wavdata))
#Calculate the index of window size and overlap
FL = round(frame_length * Fs)
FS = round(frame_shift * Fs)
OL = FL - FS
#Call my function for getting speech frames
VAD_index = energy_based_VAD(wavdata, FL, FS)
#Pass through a pre-emphasis fileter to emphasize the high frequency
wavdata = sg.lfilter([1.0, -0.97], 1, wavdata)
#Execute STFT
F, T, dft = sg.stft(wavdata, fs=Fs, window='hamm', nperseg=FL, noverlap=OL)
Adft = np.abs(dft)[0 : round(FL/2)]**2
#Call my function for generating Mel-scale filters(row: fftsize/2, column: Channel)
filterbank = melFilterBank(Fs, FL, Mel_scale, Mel_cutf, Mel_channel, Mel_norm)
#Multiply the filters into the STFT amplitude, and get logarithm of it
melspec = Adft.T @ filterbank
if np.any(melspec == 0):
melspec = np.where(melspec == 0, 1e-9, melspec)
melspec = np.log10(melspec)
#Drop the non-speech frames
if VAD_drop == True:
melspec = melspec[VAD_index, :]
#Cropping the melspec with length of audiolen
if melspec.shape[0] >= audiolen/frame_shift:
center = round(melspec.shape[0] / 2)
melspec = melspec[round(center - audiolen/frame_shift/2) : round(center + audiolen/frame_shift/2), :]
#Add results to list sequentially
x.append(melspec)
if binary_label == 0 or binary_label ==1 :
y.append(binary_label)
#In case of audio is shorter than audiolen
#else:
# print("\rAudio file:" + file + " has been skipped.\nBecause the audio is shorter than audiolen.\n")
#Finish the progress bar
bar = "#" * math.ceil(nfiles/unit)
print("\rProgress:[{0}] {1}/{2} Completed! ".format(bar, i+1, nfiles), end="")
print()
#Convert into numpy array
x = np.array(x)
y = np.array(y)
#Return the result
return x, y
### Function to change the learning rate for each epoch ###
def step_decay(x):
y = learn_rate * 10**(-lr_decay*x)
return y
### Function for executing CNN learning ###
def CNN_learning(train_x, train_y, test_x, test_y, detect_label, LR, BS, EP, log_path, fold, mode):
#Memory saving
devices = config.experimental.list_physical_devices('GPU')
if len(devices) > 0:
for k in range(len(devices)):
config.experimental.set_memory_growth(devices[k], True)
print('memory growth:', config.experimental.get_memory_growth(devices[k]))
else:
print("Not enough GPU hardware devices available")
#Calculate the 1st and 2nd derivative (file, time, Mel-frequency, derivative)
diff1 = np.diff(test_x, n=1, axis=1)[:, 1:, :, np.newaxis] #Trim the data for corresponding to 2nd derivative
diff2 = np.diff(test_x, n=2, axis=1)[:, :, :, np.newaxis]
test_X = test_x[:, 2:, :, np.newaxis] #Trim the data for corresponding to 2nd derivative
test_X = np.concatenate([test_X, diff1], 3)
test_X = np.concatenate([test_X, diff2], 3)
#Delete the valuables to save memory
del diff1
del diff2
del test_x
gc.collect()
#Path for saving CNN model
p1 = "./models/Normal_running_speech/" + detect_label + "_" + str(fold+1) + "model.json"
p2 = "./models/Normal_running_speech/" + detect_label + "_" + str(fold+1) + "weights.h5"
#In case of existing pre-learned model
if os.path.isfile(p1) and os.path.isfile(p2) and mode == 1:
#Read the pre-learned model
with open(p1, "r") as f:
cnn_model = model_from_json(f.read())
cnn_model.load_weights(p2)
#In case of learning from the beginning
else:
#Calculate the 1st and 2nd derivative (file, time, Mel-frequency, derivative)
diff1 = np.diff(train_x, n=1, axis=1)[:, 1:, :, np.newaxis] #Trim the data for corresponding to 2nd derivative
diff2 = np.diff(train_x, n=2, axis=1)[:, :, :, np.newaxis]
train_X = train_x[:, 2:, :, np.newaxis] #Trim the data for corresponding to 2nd derivative
train_X = np.concatenate([train_X, diff1], 3)
train_X = np.concatenate([train_X, diff2], 3)
#Delete the valuables to save memory
del diff1
del diff2
del train_x
gc.collect()
#Get the number of row and column in Mel-spectrogram
row = train_X.shape[1]
column = train_X.shape[2]
#print("input_data_shape: " + str(train_X.shape) )
#Define the input size(row, column, color)
image_size = Input(shape=(row, column, 3))
#Construct the CNN model with Functional API by Keras
x = BatchNormalization()(image_size)
x = Conv2D(32, (3, 3), padding='same', activation="relu")(x)
x = BatchNormalization()(x)
x = Conv2D(32, (3, 3), padding='same', activation="relu")(x)
x = BatchNormalization()(x)
#x = Conv2D(32, (3, 3), padding='same', activation="relu")(x)
#x = BatchNormalization()(x)
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = Conv2D(64, (3, 3), padding='same', activation="relu")(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), padding='same', activation="relu")(x)
x = BatchNormalization()(x)
#x = Conv2D(64, (3, 3), padding='same', activation="relu")(x)
#x = BatchNormalization()(x)
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = Conv2D(128, (3, 3), padding='same', activation="relu")(x)
x = BatchNormalization()(x)
x = Conv2D(128, (3, 3), padding='same', activation="relu")(x)
x = BatchNormalization()(x)
x = Conv2D(128, (3, 3), padding='same', activation="relu")(x)
x = BatchNormalization()(x)
#x = MaxPooling2D((2, 2), strides=(2, 2))(x)
#x = Conv2D(256, (3, 3), padding='same', activation="relu")(x)
#x = BatchNormalization()(x)
#x = Conv2D(256, (3, 3), padding='same', activation="relu")(x)
#x = BatchNormalization()(x)
#x = Conv2D(256, (3, 3), padding='same', activation="relu")(x)
#x = BatchNormalization()(x)
#x = MaxPooling2D((2, 2), strides=(2, 2))(x)
#x = Conv2D(512, (3, 3), padding='same', activation="relu")(x)
#x = BatchNormalization()(x)
#x = Conv2D(512, (3, 3), padding='same', activation="relu")(x)
#x = BatchNormalization()(x)
#x = Conv2D(512, (3, 3), padding='same', activation="relu")(x)
#x = BatchNormalization()(x)
x = GlobalAveragePooling2D()(x)
#x = Flatten()(x)
#x = Dense(256, activation='relu')(x)
x = Dense(256, activation='relu', kernel_regularizer=l2(0.01))(x)
x = BatchNormalization()(x)
x = Dense(1, activation='sigmoid')(x)
#Construct the model and display summary
cnn_model = Model(image_size, x)
#print(cnn_model.summary())
#Define the optimizer (SGD with momentum or Adam)
opt = SGD(lr=LR, momentum=0.9, decay=0.0)
#opt = Adam(lr=LR, beta_1=0.9, beta_2=0.999)
#Compile the model
cnn_model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['acc'])
#Start learning
lr_decay = LearningRateScheduler(step_decay)
hist = cnn_model.fit(train_X, train_y, batch_size=BS, epochs=EP, validation_data=(test_X, test_y), callbacks=[lr_decay], verbose=1)
#Save the learned model
model_json = cnn_model.to_json()
with open(p1, 'w') as f:
f.write(model_json)
cnn_model.save_weights(p2)
#Save the learning history as text file
loss = hist.history['loss']
acc = hist.history['acc']
val_loss = hist.history['val_loss']
val_acc = hist.history['val_acc']
with open(log_path, "a") as fp:
fp.write("epoch\tloss\tacc\tval_loss\tval_acc\n")
for i in range(len(acc)):
fp.write("%d\t%f\t%f\t%f\t%f" % (i, loss[i], acc[i], val_loss[i], val_acc[i]))
fp.write("\n")
#Display the learning history
plt.rcParams.update({'font.size': 14})
fig, (axL, axA) = plt.subplots(ncols=2, figsize=(18, 5))
#Loss function
axL.plot(hist.history['loss'], label="loss for training")
axL.plot(hist.history['val_loss'], label="loss for validation")
axL.set_title('model loss')
axL.set_xlabel('epoch')
axL.set_ylabel('loss')
axL.legend(loc='upper right')
#Score
axA.plot(hist.history['acc'], label="accuracy for training")
axA.plot(hist.history['val_acc'], label="accuracy for validation")
axA.set_title('model accuracy')
axA.set_xlabel('epoch')
axA.set_ylabel('accuracy')
axA.legend(loc='lower right')
#plt.show()
#Save the graph
fig.savefig("./models/Normal_running_speech/" + detect_label + "_" + str(fold+1) + "loss_accuracy.png")
#Get the score for evaluation data
proba_y = cnn_model.predict(test_X)
#Restart the session to relieve the GPU memory (to prevent Resource Exhausted Error)
backend.clear_session()
#backend.get_session() #Less than tensorflow ver.1.14
del cnn_model
gc.collect()
#Sleep 1 minute for cooling down the GPU
#time.sleep(60)
#Return the learning history and binary score
return proba_y
### Function for calculating AUC(Area Under ROC Curve) and its standard error ###
def get_AUC(test_y, proba_y, detect_label, fold):
#Compute the AUC
AUC = roc_auc_score(test_y, proba_y)
#Plot the ROC curve
#plt.rcParams["font.size"] = 16
#plt.figure(figsize=(10, 6), dpi=100)
#fpr, tpr, thresholds = roc_curve(test_y, proba_y)
#plt.plot([0, 1], [0, 1], linestyle='--')
#plt.plot(fpr, tpr, marker='.')
#plt.title('ROC curve')
#plt.xlabel('False positive rate')
#plt.ylabel('True positive rate')
#plt.savefig("./log/" + detect_label + "_" + str(fold+1) + "ROCcurve.png")
#Return AUC
return AUC
### Main ###
if __name__ == "__main__":
#Set up
audiolen = 3 #Cropping time for audio (second) [Default]3
frame_length = 0.03 #STFT window width (second) [Default]0.03
frame_shift = 0.02 #STFT window shift (second) [Default]0.02
Mel_scale = 700 #Mel-frequency is proportional to "log(f/Mel_scale + 1)" [Default]700
Mel_cutf = [0, None] #The cutoff frequency (Hz, Hz) of Mel-filter [Default] [0, None(=Nyquist)]
Mel_channel = 40 #The number of frequency channel for Mel-spectrogram [Default]40
Mel_norm = False #Normalize the area underneath each Mel-filter into 1 [Default]False
VAD_drop=False #Drop non-speech frames by voice activity detection
detect_label = "NZ" #Degradation what to detect (DT, NZ, RV)
learn_rate = 1e-2 #Lerning rate for CNN training [Default]1e-2
lr_decay = 0.1 #Lerning rate is according to "learn_rate*10**(-lr_decay*n_epoch)" [Default]0.1
batch_size = 64 #Size of each batch for CNN training [Default]64
epoch = 20 #The number of repeat for CNN training [Default]20
cv = 10 #The number of folds for cross varidation [Default]10
Melmode = 0 #0: calculate mel from the beginning, 1: read local files [Default]0
CNNmode = 0 #0: train from the beginning, 1: read pre-learned model [Default]0
num_train = 1600 #The number of each training data [Default]1600 (<4350)
num_test = 200 #The number of each evaluation data [Default]200 (<205)
#In case of calculating the Mel-Spectrogram from audio
if Melmode == 0:
#Define the class names for ***training data***
classes = ['CLEAN', 'DT', 'NZ', 'RV', 'NR']
if detect_label == "CLEAN":
binary_labels = [1, 0, 0, 0, 0]
elif detect_label == "DT":
binary_labels = [0, 1, 0, 0, 0]
elif detect_label == "NZ":
binary_labels = [0, 0, 1, 0, 1]
elif detect_label == "RV":
binary_labels = [0, 0, 0, 1, 1]
#Call my function for calculating Mel-spectrogram
for i, cl in enumerate(classes):
fpath = "./audio_data/Normal_running_speech/training/" + cl
x, y = get_melspec(fpath, binary_labels[i], audiolen, frame_length, frame_shift, Mel_scale, Mel_cutf, Mel_channel, Mel_norm, VAD_drop)
#Pick up randomly
indices = np.arange(int(x.shape[0]))
indices = np.random.choice(indices, size=num_train, replace=False)
x = x[indices]
y = y[indices]
if i == 0:
train_x, train_y = x, y
else:
train_x = np.concatenate((train_x, x), axis=0)
train_y = np.concatenate((train_y, y), axis=0)
#Save the training data
fpath = "./numpy_files/Normal_running_speech/training"
np.save(fpath + '/X_' + detect_label + 'train', train_x)
np.save(fpath + '/Y_' + detect_label + 'train', train_y)
#Define the class names for ***evaluation data***
classes = ['CLEAN', 'DT', 'NZ', 'RV', 'OTHERS', 'NR']
if detect_label == "CLEAN":
binary_labels = [1, 0, 0, 0, 0, 0]
elif detect_label == "DT":
binary_labels = [0, 1, 0, 0, 0, 0]
elif detect_label == "NZ":
binary_labels = [0, 0, 1, 0, 0, 1]
elif detect_label == "RV":
binary_labels = [0, 0, 0, 1, 0, 1]
#Repeat every classes
test_xs = []
for i, cl in enumerate(classes):
#Calculating Mel-spectrogram of true-class data
fpath = "./audio_data/Normal_running_speech/evaluation/" + cl
x, y = get_melspec(fpath, binary_labels[i], audiolen, frame_length, frame_shift, Mel_scale, Mel_cutf, Mel_channel, Mel_norm, VAD_drop)
#Pick up
x = x[:num_test]
y = y[:num_test]
#Save the test data
fpath = "./numpy_files/Normal_running_speech/evaluation"
np.save(fpath + '/X_' + cl + 'test', x)
test_xs.append(x) #test data is used for cross validation
#In case of reading the Mel-spectrogram from local file
else:
#Read the training data and evaluation data
fpath = "./numpy_files/Normal_running_speech/training"
train_x = np.load(fpath + '/X_' + detect_label + 'train' + '.npy')
train_y = np.load(fpath + '/Y_' + detect_label + 'train' + '.npy')
#Define the class names for ***evaluation data***
classes = ['CLEAN', 'DT', 'NZ', 'RV', 'OTHERS', 'NR']
#Repeat every classes
test_xs = []
for i, cl in enumerate(classes):
#Read the data from local file
fpath = "./numpy_files/Normal_running_speech/evaluation"
x = np.load(fpath + '/X_' + cl + 'test.npy')
test_xs.append(x) #test data is used for cross validation
#Standardlize the input data
ave = np.average(train_x, axis=None)
std = np.std(train_x, axis=None)
train_x = (train_x - ave)/std
for i in range(len(test_xs)):
test_xs[i] = (test_xs[i] - ave)/std
#Prepare for process-log
message = "Training for " + detect_label + "-detector\n\n"
log_path = "./log/" + detect_label + "_" + datetime.now().strftime("%Y%m%d_%H%M%S") + ".txt"
with open(log_path, "w") as fp:
fp.write(message)
#Define parameters for cross validation
true_cl = classes.index(detect_label)
total_files = test_xs[0].shape[0]
test_files = math.floor(total_files / cv)
enu = list(range(total_files))
#Initialize the vector for AUC score and Detection-Rate
AUC_vector = np.zeros(cv)
DR_vector = np.zeros(cv)
#Repeat every fold
for fold in range(cv):
#Get randomly test sampling without replacement
test_i = random.sample(enu, k=test_files)
train_i = list(set(enu) - set(test_i)) #The remain is for training
#Get the test data and class label for true-class
true_x = test_xs[true_cl]
test_x = true_x[test_i]
test_y = np.ones(test_files, dtype=np.int) #True-class=1
#Remain test data is used as training data
plus_x = true_x[train_i]
plus_y = np.ones(plus_x.shape[0], dtype=np.int)
train_X = np.concatenate((train_x, plus_x), axis=0)
train_Y = np.concatenate((train_y, plus_y), axis=0)
#Construct test data (True-Class + Others)
for cl in range(6):
#Extend the test data except for true-class
if cl != true_cl:
#For outlier
if cl == 4:
x = test_xs[cl]
outlier_i = list(range(50, 50+test_files)) #Indices are fixed
x = x[outlier_i]
#For other than outlier
else:
x = test_xs[cl]
x = x[test_i]
test_x = np.concatenate((test_x, x), axis=0)
#Construct class label (True-Class=1, NR=0 or 1, Others=0)
for cl in range(6):
#Extend the class label except for true-class
if cl != true_cl:
#For NR
if cl == 5:
if true_cl == 2 or true_cl == 3: #In the case of NZ or RV
y = np.ones(test_files, dtype=np.int)
else: #In the case of CLEAR or DT
y = np.zeros(test_files, dtype=np.int)
#For other than NR
else:
y = np.zeros(test_files, dtype=np.int)
test_y = np.concatenate([test_y, y], axis=0)
#Get the start time
start = time.time()
#Call my function for executing CNN learning (train_X = train_x + plus_x dataset)
proba_y = CNN_learning(train_X, train_Y, test_x, test_y, detect_label, learn_rate, batch_size, epoch, log_path, fold, CNNmode)
#Call my function for calculating the AUC
A = get_AUC(test_y, proba_y, detect_label, fold)
AUC_vector[fold] = A
#Output the binary accuracy (Detection Rate)
pred_y = np.where(proba_y < 0.5, 0, 1) #Binary threshold = 0.5
B = accuracy_score(test_y, pred_y)
DR_vector[fold] = B
print(classification_report(test_y, pred_y))
#Construct the process log
finish = time.time() - start
report = "Fold{}: AUC_{}={:.5f}, Detection_rate={:.5f}, Process_time={:.1f}sec\n".format(fold+1, classes[true_cl], A, B, finish)
message = message + report
print(report)
#Average the result of cv-folds
AUC = np.average(AUC_vector)
DR = np.average(DR_vector)
SE_AUC = np.std(AUC_vector) / np.sqrt(cv-1) #Population variance in cross varidation
SE_DR = np.std(DR_vector) / np.sqrt(cv-1)
#Output the result
report = "AUC_{}={:.5f}, CI(95%)=±{:.5f}, Detection_rate={:.5f}, CI(95%)=±{:.5f}".format(detect_label, AUC, 1.96*SE_AUC, DR, 1.96*SE_DR)
message = message + report
print(report)
with open(log_path, "a") as fp:
fp.write(message) | [
"yuki-saishu1013@outlook.jp"
] | yuki-saishu1013@outlook.jp |
ba3735ce85ff98c1207cea5f7fb64581dc6899ad | a86877bb3c786dc7b3f0ae7042002bddc34b55e2 | /validator_balance.py | c1b2c04729e27fa8fd1725c9149f18b51bc476b4 | [] | no_license | Toruitas/validator_balance | 6ead03d848001a5bfce99cbe37e46f61ba7b2e72 | f9a7fa9d3c4b96f39cbfeb87026d1d17f918379b | refs/heads/main | 2023-04-03T00:03:38.125098 | 2021-04-05T06:55:12 | 2021-04-05T06:55:12 | 320,023,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,000 | py | import os
import requests
import time
import math
import pathlib
from datetime import datetime, date, timezone
from signal import signal, SIGINT
from sys import exit
import pandas as pd
from coinbase.wallet.client import Client
def handler(signal_received, frame):
# Handle any cleanup here
print('SIGINT or CTRL-C detected. Exiting gracefully')
exit(0)
if __name__ == '__main__':
signal(SIGINT, handler)
print('Running. Press CTRL-C to exit.')
# ADD YOUR OWN VALIDATORS HERE (Max 10):
validators = [
# '0xa68266429de6906469b825fbe01d70b5d155963dd0d0cd640b907f1da136de843638c0fb8ec6ba62660308ae2ecbf782',
# '0x9891e4522462230f6cdce5fc78dba7p8a99d6e82cc476feda0f91b6e8bd88f430038f086f90b2bea2f2fd9a2fa940897c',
]
if len(validators) < 1:
print('No validators added, please add validators before starting the program')
exit(0)
coinbase_client = Client(os.environ.get("COINBASE_API_KEY"), os.environ.get("COINBASE_SECRET"))
SECONDS_PER_SLOT = 12
SLOTS_PER_EPOCH = 32
SYNC_EVERY_N_EPOCHS = 3
GWEI_PER_ETH = 1000000000
BEACONCHAIN_BASE_URL = "https://beaconcha.in/api/v1"
beaconchain_timeout = 15
beaconchain_timed_out = False
coinbase_timeout = 15
pathlib.Path('./csvs/lifetime/').mkdir(parents=True, exist_ok=True)
pathlib.Path('./csvs/annual/').mkdir(parents=True, exist_ok=True)
pathlib.Path('./csvs/daily/').mkdir(parents=True, exist_ok=True)
# Initialize csv files w/ correct headers
for v in validators:
try:
df = pd.read_csv(f'csvs/lifetime/{v}.csv', index_col=0)
if "balance_gbp" not in df:
df["balance_gbp"] = 0
if "delta_gbp" not in df:
df["delta_gbp"] = 0
df.to_csv(f'csvs/lifetime/{v}.csv')
except FileNotFoundError as e:
df = pd.DataFrame(columns = ["timestamp", "datetime_utc","epoch","effective_balance_eth","balance_eth","delta_eth","balance_usd","delta_usd","balance_gbp","delta_gbp"])
df.to_csv(f'csvs/lifetime/{v}.csv')
# Loop through validators, check for most recent epochs.
while True:
# open or create today's csv. Using UTC.
now_utc = datetime.now(timezone.utc)
today = now_utc.date()
try:
df_today = pd.read_csv(f'csvs/daily/{today}.csv', index_col=0)
if "balance_gbp" not in df_today:
df_today["balance_gbp"] = 0
if "delta_gbp" not in df_today:
df_today["delta_gbp"] = 0
df_today.to_csv(f'csvs/daily/{today}.csv')
except FileNotFoundError as e:
df_today = pd.DataFrame(columns = ["timestamp", "datetime_utc","validator","epoch","effective_balance_eth","balance_eth","delta_eth","balance_usd","delta_usd","balance_gbp","delta_gbp"])
df_today.to_csv(f'csvs/daily/{today}.csv')
try:
df_this_year = pd.read_csv(f'csvs/annual/{today.year}.csv', index_col=0)
if "balance_gbp" not in df_this_year:
df_this_year["balance_gbp"] = 0
if "delta_gbp" not in df_this_year:
df_this_year["delta_gbp"] = 0
df_this_year.to_csv(f'csvs/annual/{today.year}.csv')
except FileNotFoundError as e:
df_this_year = pd.DataFrame(columns = ["timestamp", "datetime_utc","validator","epoch","effective_balance_eth","balance_eth","delta_eth","balance_usd","delta_usd","balance_gbp","delta_gbp"])
df_this_year.to_csv(f'csvs/annual/{today.year}.csv')
try:
# get ETH_USD
eth_usd_price = float(coinbase_client.get_spot_price(currency_pair = 'ETH-USD').amount) # only check this once for the whole loop through validators
eth_gbp_price = float(coinbase_client.get_spot_price(currency_pair = 'ETH-GBP').amount) # only check this once for the whole loop through validators
coinbase_timeout = 15
except requests.ConnectionError as e:
print(f"Unable to connect to Coinbase API, retrying in for {coinbase_timeout} seconds.")
time.sleep(coinbase_timeout)
coinbase_timeout += 15
continue
for v in validators:
print(f"Updating balance sheet for validator: {v}")
datapoints = [] # list of rows to add to DF.
df = pd.read_csv(f'csvs/lifetime/{v}.csv', index_col=0)
if len(df) > 0:
last_recorded_epoch = df['epoch'].iloc[-1]
else:
last_recorded_epoch = 0
try:
history = requests.get(f"{BEACONCHAIN_BASE_URL}/validator/{v}/balancehistory")
beaconchain_timeout = 15
beaconchain_timed_out = False
except requests.ConnectionError as e:
print(f"Unable to connect to Beaconchain API, retrying in {beaconchain_timeout} seconds.")
time.sleep(beaconchain_timeout)
beaconchain_timeout += 15
beaconchain_timed_out = True
break
print(history)
data = history.json().get('data')
if not data:
print("No data found, is the validator public key correctly entered?")
continue
for epoch in data:
if epoch['epoch'] > last_recorded_epoch:
balance_eth = (epoch["balance"]/GWEI_PER_ETH)
balance_usd = balance_eth*eth_usd_price
balance_gbp = balance_eth*eth_gbp_price
# leave deltas to 0 for now, we'll re-calculate shortly
row_to_add = {
"timestamp": int(time.time()),
"datetime_utc": str(now_utc),
"epoch": epoch["epoch"],
"effective_balance_eth": epoch["effectivebalance"]/GWEI_PER_ETH,
"balance_eth": balance_eth,
"delta_eth": 0,
"balance_usd": balance_usd,
"delta_usd": 0,
"balance_gbp":balance_gbp,
"delta_gbp":0
}
datapoints.append(row_to_add)
else:
# break and go to next validator
break
# if we have datapoints, we want to reverse the row, so the oldest are first and newest last. The API returns newest first.
# The CSV has more recent entries appended to the bottom.
if len(datapoints) > 0:
datapoints = datapoints[::-1]
# get the most recently saved balance info
# calculate deltas
for idx, dp in enumerate(datapoints):
if idx == 0:
if len(df) > 0:
last_eth_balance = df['balance_eth'].iloc[-1]
last_usd_balance = df['balance_usd'].iloc[-1]
last_gbp_balance = df['balance_gbp'].iloc[-1]
delta_eth = dp["balance_eth"] - last_eth_balance
delta_usd = delta_eth * eth_usd_price # don't want to do the delta between last usd balance and current, as there may have been price flux. Price flux goes into capital gains/losses
delta_gbp = delta_eth * eth_gbp_price
dp["delta_eth"] = delta_eth
dp["delta_usd"] = delta_usd
dp["delta_gbp"] = delta_gbp
else:
delta_eth = dp["balance_eth"] - datapoints[idx-1]["balance_eth"]
delta_usd = delta_eth * eth_usd_price
delta_gbp = delta_eth * eth_gbp_price
dp["delta_eth"] = delta_eth
dp["delta_usd"] = delta_usd
dp["delta_gbp"] = delta_gbp
# save to the continuous/lifetime csv
pd_datapoints = pd.DataFrame(datapoints)
df = df.append(pd_datapoints, ignore_index=True)
df.to_csv(f'csvs/lifetime/{v}.csv')
# save to today's dataframe
pd_datapoints['validator'] = v
df_today = df_today.append(pd_datapoints, ignore_index=True)
df_today.to_csv(f'csvs/daily/{today}.csv')
df_this_year = df_this_year.append(pd_datapoints, ignore_index=True)
df_this_year.to_csv(f'csvs/annual/{today.year}.csv')
print("Validator records updated to epoch: ", df['epoch'].iloc[-1])
else:
print("No new values found in epoch ", df['epoch'].iloc[-1])
if not beaconchain_timed_out:
time.sleep(SECONDS_PER_SLOT*SLOTS_PER_EPOCH*SYNC_EVERY_N_EPOCHS)
| [
"Toruitas@gmail.com"
] | Toruitas@gmail.com |
2678d77fed1226a6cd4934656ab9ca6859e5298c | 4c7f9f8048af21d19a7fead24649e34e6351a585 | /Trees/0437E-Trees_PathSum3.py | 691f966ecc3d8cc9b98c4f02abb1ae1ce3cd3b8a | [] | no_license | prashuym/IKPrograms | f374e0cf36d48a2af0ad0bed62d18ed40a3a0e3b | 9f041c71a0e16a4e573cd631a133fa828882cac2 | refs/heads/master | 2023-05-04T03:34:30.711315 | 2021-05-16T20:01:43 | 2021-05-16T20:01:43 | 367,973,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,500 | py | """
437. Path Sum III
https://leetcode.com/problems/path-sum-iii/
You are given a binary tree in which each node contains an integer value.
Find the number of paths that sum to a given value.
The path does not need to start or end at the root or a leaf, but it must go downwards (traveling only from parent nodes to child nodes).
The tree has no more than 1,000 nodes and the values are in the range -1,000,000 to 1,000,000.
Example:
root = [10,5,-3,3,2,null,11,3,-2,null,1], sum = 8
10
/ \
5 -3
/ \ \
3 2 11
/ \ \
3 -2 1
Return 3. The paths that sum to 8 are:
1. 5 -> 3
2. 5 -> 2 -> 1
3. -3 -> 11
"""
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from collections import deque
class Solution:
def pathSum(self, root: TreeNode, sum: int) -> int:
if root == None : return 0
result = [0]
def dfs(node, slate, target):
slate.appendleft(node.val)
tsum = 0
for x in slate :
tsum += x
result[0] += (tsum == target)
# Recursion condition
if node.left :
dfs(node.left, slate, target)
if node.right :
dfs(node.right, slate, target)
# Cleanup slate
slate.popleft()
return
dfs(root, deque(), sum)
return result[0]
| [
"prashanth.ym@gmail.com"
] | prashanth.ym@gmail.com |
825f86ddf91c727bd5b1653cd7c3ca38f4092a48 | 4ca98884a7f50fba5a52faecaa078887fd0f7027 | /main.py | 31272f4af8ebdd1c1ce4b990f9b8fb5dbb0ecdd7 | [] | no_license | Shoulders93/robots_vs_dinos_proj | 7443c2d7aaf6eedea786ce33a7cbd6d0078916cc | dba4f9662502b2e4dd14557bdea2d5f3984267d2 | refs/heads/main | 2023-07-02T10:22:08.462903 | 2021-08-11T21:08:23 | 2021-08-11T21:08:23 | 394,742,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | # MAIN: Controller/Initator of Application
# Author: Kory Attleson
# Create Date: August 10, 2021
# Imports:
from battlefield import Battlefield
# ** Instantiation of Objects
battle_one = Battlefield()
index = 0
while (index <= 2 ):
battle_one.battle_round(index)
index += 1 | [
"kory.attleson@gmail.com"
] | kory.attleson@gmail.com |
0a67f22bf611349ea64dd4a19978048e26fc6ecd | bc295af499fcd312bdc4f2b1b5a9795297e356ee | /Tools/ProjectTemplate/gen_project.py | 1fabcdd3c5a55f716ddef80187ffab51dd6a2de7 | [
"MIT"
] | permissive | r-koubou/KSPCompiler | 744756060b5dd0b71c09109647d2e1301b2f3d62 | bc1956c98d84e0e36830b1aa0f910fcf86b2e044 | refs/heads/develop | 2022-10-09T03:57:46.240107 | 2022-09-30T21:00:04 | 2022-09-30T21:00:04 | 327,079,773 | 0 | 1 | MIT | 2022-09-30T21:00:05 | 2021-01-05T18:05:16 | C# | UTF-8 | Python | false | false | 2,205 | py | import os
import sys
import re
AUTHOR = 'R-Koubou'
PROJECT_NAME_PREFIX = 'KSPCompiler.'
TARGET_LANGVERSION = '10'
TARGET_FRAMEWORK = 'netstandard2.1'
REPO_URL = 'https://github.com/r-koubou/KSPCompiler'
PROJECT_TYPE_MODULE = 'module'
PROJECT_TYPE_CLI = 'cliapp'
THIS_DIR = os.path.dirname( sys.argv[ 0 ] )
SUFFIX = '.csproj'
def replace( template, options, is_test_project ):
project_name = options[ 'project_name' ]
project_type = options[ 'project_type' ]
project_name_prefix = PROJECT_NAME_PREFIX
if project_type == PROJECT_TYPE_CLI:
new_text = template.replace( '$$PROJECT_NAME$$', project_name_prefix + 'Apps.'+ project_name )
else:
new_text = template.replace( '$$PROJECT_NAME$$', project_name_prefix + project_name )
new_text = new_text.replace( '$$LANGVER$$', TARGET_LANGVERSION )
new_text = new_text.replace( '$$FRAMEWORK$$', TARGET_FRAMEWORK )
new_text = new_text.replace( '$$AUTHOR$$', AUTHOR )
new_text = new_text.replace( '$$REPO_URL$$', REPO_URL )
return new_text
def generate( project_type, project_name, is_test_project ):
template_file = os.path.join(THIS_DIR, "Template.{type}.Tests.csproj".format(type=project_type) ) if is_test_project else os.path.join(THIS_DIR, "Template.{type}.csproj".format(type=project_type) )
if is_test_project:
project_name += ".Tests"
if not os.path.exists( template_file ):
return
with open( template_file ) as f:
template = f.read()
new_text = replace( template,
{
'project_type': project_type,
'project_name': project_name
},
is_test_project
)
dest_dir = os.path.join( 'out', project_type, project_name )
if not os.path.exists( dest_dir ):
os.makedirs( dest_dir )
dest_file = project_name + SUFFIX
with open( os.path.join( dest_dir, dest_file ), 'w' ) as f:
f.write( new_text )
print( "Done : {name}".format( name=project_name ) )
if __name__ == '__main__':
generate( sys.argv[ 1 ], sys.argv[ 2 ], False )
generate( sys.argv[ 1 ], sys.argv[ 2 ], True ) | [
"dev.r.koubou@gmail.com"
] | dev.r.koubou@gmail.com |
7ec60c9aaf44e817a790fadc0527baa4d6712d68 | 377dc973a58d30154cf485de141223d7ca5424dd | /havok_classes/hclBoneSpaceMeshMeshDeformPOperator.py | caf24857b50a0bf6d6d6365702255e1558e84921 | [
"MIT"
] | permissive | sawich/havok-reflection | d6a5552f2881bb4070ad824fb7180ad296edf4c4 | 1d5b768fb533b3eb36fc9e42793088abeffbad59 | refs/heads/master | 2021-10-11T12:56:44.506674 | 2019-01-25T22:37:31 | 2019-01-25T22:37:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | from .hclBoneSpaceMeshMeshDeformOperator import hclBoneSpaceMeshMeshDeformOperator
from typing import List
from .common import get_array
from .hclBoneSpaceDeformerLocalBlockP import hclBoneSpaceDeformerLocalBlockP
from .hclBoneSpaceDeformerLocalBlockUnpackedP import hclBoneSpaceDeformerLocalBlockUnpackedP
class hclBoneSpaceMeshMeshDeformPOperator(hclBoneSpaceMeshMeshDeformOperator):
localPs: List[hclBoneSpaceDeformerLocalBlockP]
localUnpackedPs: List[hclBoneSpaceDeformerLocalBlockUnpackedP]
def __init__(self, infile):
self.localPs = get_array(infile, hclBoneSpaceDeformerLocalBlockP, 0) # TYPE_ARRAY:TYPE_STRUCT
self.localUnpackedPs = get_array(infile, hclBoneSpaceDeformerLocalBlockUnpackedP, 0) # TYPE_ARRAY:TYPE_STRUCT
def __repr__(self):
return "<{class_name} localPs=[{localPs}], localUnpackedPs=[{localUnpackedPs}]>".format(**{
"class_name": self.__class__.__name__,
"localPs": self.localPs,
"localUnpackedPs": self.localUnpackedPs,
})
| [
"kevin@turtlerockweb.com"
] | kevin@turtlerockweb.com |
b493f45018a023f4c471707e7d525822ac064772 | 16d98f5cc4c82e94b1dbb7fb8a7cc79b97d8490e | /try.py | 8e86562a1022deea1a5c8a401ea843f463e59e83 | [] | no_license | VitalyLub/WhatsApp | b67f670d123bf9683023a31c2868edf696831c60 | 1de7874aece1f062f47c171c07f4cb36b66c2126 | refs/heads/master | 2020-03-31T05:17:55.428803 | 2019-07-15T13:40:18 | 2019-07-15T13:40:18 | 151,940,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
import urllib.request
chromedriver = "C:\\Users\\DELL\\Desktop\\chromedriver_new.exe"
driver = webdriver.Chrome(chromedriver)
print(1)
sleep(2)
driver.get("https://www.ynet.co.il/")
print(2)
sleep(2)
driver.execute_script('''window.open("https://images1.ynet.co.il/PicServer5/2018/09/26/8789574/878957146962264640360no.jpg","_blank");''')
print(3)
sleep(2)
driver.switch_to.window(driver.window_handles[1])
print(3)
sleep(2)
urllib.request.urlretrieve(driver.current_url, "local-filename.jpg")
print(4)
sleep(10)
driver.execute_script('''close();''')
print(5)
sleep(2)
input() | [
"vitlub@gmail.com"
] | vitlub@gmail.com |
6068a2355da2e16dda6e8c8ff1e8707c9f2103dc | 5d8f77c5bc7e48287b0f1a85ec6c907cc559b51d | /examples/test_remap.py | d4cae9fc7af73e1510339fa38c4bd3d33f7af8da | [
"BSD-3-Clause"
] | permissive | Keck-FOBOS/enyo | 7be008a58a930fd468b4a42eadb9fe1b6fe87238 | 82dd4324083d456c78bcbafdd081bee53f0c7ba9 | refs/heads/master | 2021-06-06T15:35:50.362217 | 2020-05-07T18:39:49 | 2020-05-07T18:39:49 | 142,364,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py |
import numpy
from matplotlib import pyplot
from enyo.etc import spectrographs
tmtb = spectrographs.TMTWFOSBlueOpticalModel()
test_img = numpy.zeros((100,50), dtype=float)
wave0 = 3110.
pixelscale = 0.05153458543289052
dispscale = 15 #0.1995
test_img[20,:] = 1
test_img[60,:] = 1
test_img[:,10] = 1
test_img[:,30] = 1
#pyplot.imshow(test_img, origin='lower', interpolation='nearest', aspect='auto')
#pyplot.show()
spec, spec0, spat0 \
= tmtb.project_2d_spectrum(test_img, pixelscale, wave0, dispscale, field_coo=numpy.array([-3,0.5]))
print(spec0, spat0, spec.shape)
| [
"kbwestfall@gmail.com"
] | kbwestfall@gmail.com |
0053a3e99d0f634ff943c57913d718871bd4c2db | 62b17976af83cb1d50ea68c628b2bc9cadc03e01 | /Python Program/Python progamming 56.py | 36693ca35c5f006f3c2d2bedcb20cd73ad821f5e | [] | no_license | CDinuwan/Python | 16a7efa0c6c697ff3ae21ca28c9ba8e602e626f9 | 5777b7e4c9e32cd3fe3fd6e145644b2e7bab4b8a | refs/heads/master | 2020-09-27T17:07:07.045210 | 2020-01-02T21:05:55 | 2020-01-02T21:05:55 | 226,565,691 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | from threading import *
from time import sleep
class Hello(Thread):
def run(self):
for i in range(5):
print("Hello")
sleep(1)
class Hi(Thread):
def run(self):
for i in range(5):
print("Hi")
sleep(1)
t1=Hello()
sleep(0.2)
t2=Hi()
t1.start()
t2.start()
t1.join()
t2.join()
print("Bye") | [
"noreply@github.com"
] | noreply@github.com |
066aca54dc4e77f1df2ebfed38e74746bed83ef5 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /MNePwAcuoKG9Cza8G_9.py | ca7802d54b45b17f9c0920804d670942c9f44253 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | """
Create a function that builds a staircase given the height and the type of
building block.
### Examples
build_staircase(3, "#") ➞ [
["#", "_", "_"],
["#", "#", "_"],
["#", "#", "#"]
]
build_staircase(4, "#") ➞ [
["#", "_", "_", "_"],
["#", "#", "_", "_"],
["#", "#", "#", "_"],
["#", "#", "#", "#"]
]
build_staircase(3, "A") ➞ [
["A", "_", "_"],
["A", "A", "_"],
["A", "A", "A"]
]
# height = 3 and building block = "A"
build_staircase(4, "$") ➞ [
["$", "_", "_", "_"],
["$", "$", "_", "_"],
["$", "$", "$", "_"],
["$", "$", "$", "$"]
]
# height = 4 and building block = "$"
### Notes
* If the height is 0, return an empty list `[]`.
* See **Comments** or **Resources** for help.
"""
def build_staircase(height, block):
lst = []
for i in range(1, height+1):
lst.append(i*block + (height-i)*"_")
lst2 = []
for i in range(0, len(lst)):
lst2.append(list(lst[i]))
return lst2
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
3a67dfbb83beaadc84afff4128c56fbf545219a6 | 3970706a16be81a63b2476222c1b061da9f11b70 | /estimator/trainer/model.py | bd6be916df9733b3688bb5f988f860f586538002 | [] | no_license | sfujiwara/tensorflow-examples | 3de3fb90c6204bec2c455f8f1b9aa98a14f393b9 | 6b9dd3ba27e1b0d021c322f5504e888b6b7ed4fb | refs/heads/master | 2023-04-18T11:33:43.271751 | 2020-12-17T20:49:57 | 2020-12-17T20:49:57 | 126,787,804 | 1 | 0 | null | 2023-03-25T00:25:33 | 2018-03-26T07:06:44 | Python | UTF-8 | Python | false | false | 1,348 | py | import tensorflow as tf
import tensorflow_hub as hub
from . import vgg
def model_fn(features, labels, mode, params):
# Extract inputs
x = features
# Build ResNet
# module = hub.Module(
# 'https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/1',
# trainable=True,
# tags={'train'}
# )
# x = module(x)
# Build VGG16
x = vgg.build_vgg16_graph(img_tensor=x, trainable=True, include_top=False)
x = tf.layers.dense(x, 256, activation=tf.nn.relu)
logits = tf.layers.dense(x, params['n_classes'], activation=None)
# Build loss
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Build training operation
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
train_op = params['optimizer'].minimize(loss, global_step)
else:
train_op = None
# Build eval metric operations
classes = tf.argmax(logits, axis=1)
probabilities = tf.nn.softmax(logits)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(labels=labels, predictions=classes)
}
# Build EstimatorSpec
estimator_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
)
return estimator_spec
| [
"shuhei.fujiwara@gmail.com"
] | shuhei.fujiwara@gmail.com |
061b1affaf6cf7ed68f4aac28a116e5504741a63 | 73dc835b32320a9b77de94233d61b6dd63626a0d | /AnnScratch/Optimization_derivatives.py | 2a7c0dfd24f2ce57672f3daa30ce071d9ede680a | [] | no_license | Yamassindir/DLScratch | 2b5105b8945e276451667ad580604c5feb9c2b07 | 1ca9de3cc973da6cb94f2a53e187396f6fdb8ae6 | refs/heads/main | 2023-09-04T12:42:30.005495 | 2021-10-24T11:41:25 | 2021-10-24T11:41:25 | 420,666,459 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,415 | py | import numpy as np
import nnfs
from nnfs.datasets import spiral_data
import matplotlib.pyplot as plt
nnfs.init()
#Mean Absolute Error
#One-hot coding classes: 5 label: 3 One-hot Vector : [0,0,0,1,0] => dim 5 and 1 in position 3
#target class e.g : 0 - dog 1 - cat 2 - human then: class_target[dog,cat,cat] => [0,1,1]
#for the confidence on correct labels, the target class is the axis of the confidence (softmax_output)
# log(0)=infinity so to manage this problem : clip to maintain y_pred between 1e-7 and 1 - 1-e-7
#np.random.seed(0)
class Layer_Dense:
def __init__(self, n_inputs, n_neurons):
self.weights = 0.10 * np.random.randn(n_inputs, n_neurons)
self.biases = np.zeros((1, n_neurons))
def forward(self, inputs):
self.output = np.dot(inputs, self.weights) + self.biases
class Activation_ReLU:
def forward(self, inputs):
self.output = np.maximum(0,inputs)
#print(np.sum(X, axis=1, keepdims=True))
class Activation_Softmax:
def forward(self, inputs):
exp_values = np.exp(inputs - np.max(inputs, axis=1, keepdims=True))
probabilities = exp_values / np.sum(exp_values, axis=1, keepdims=True)
self.output = probabilities
class Loss:
def calculate(self, output, y):
sample_losses = self.forward(output, y)
data_loss = np.mean(sample_losses)
return data_loss
class Loss_CategoricalCrossentropy(Loss):
def forward(self, y_pred, y_true):
samples = len(y_pred)
y_pred_clipped = np.clip(y_pred, 1e-7, 1-1e-7)
if len(y_true.shape) == 1:
correct_confidences = y_pred_clipped[range(samples), y_true]
elif len(y_true.shape) == 2:
correct_confidences = np.sum(y_pred_clipped*y_true,axis=1)
negative_log_likelihoods = -np.log(correct_confidences)
return negative_log_likelihoods
#nnfs to call data
X, y = spiral_data(samples=100, classes=3)
plt.scatter(X[:,0], X[:,1], c=y, s=40, cmap='brg')
plt.show()
dense1 = Layer_Dense(2,3)
activation1 = Activation_ReLU()
dense2 = Layer_Dense(3,3)
activation2 = Activation_Softmax()
dense1.forward(X)
activation1.forward(dense1.output)
dense2.forward(activation1.output)
activation2.forward(dense2.output)
print(activation2.output[:5])
loss_function = Loss_CategoricalCrossentropy()
loss = loss_function.calculate(activation2.output, y)
print('loss', loss)
#check the accuracy | [
"elayadiyassir@gmail.com"
] | elayadiyassir@gmail.com |
f5e0aeb169439c5f1239907db60154effacdcfcf | 0b83ee211ed07f8bc71c30526e9a753509455c0f | /PSI_3/zad5.py | da29ddcbe6c3d8511b7f0474f49dcddd5fd1034e | [] | no_license | bopopescu/PSI | 725ba600507efa0237bed5c254d6663a2ca51379 | 842e4562c62aee2adbd460f30c72e8e8062d0d2b | refs/heads/master | 2022-11-20T08:30:22.950258 | 2020-01-27T20:26:18 | 2020-01-27T20:26:18 | 281,215,924 | 0 | 0 | null | 2020-07-20T20:11:58 | 2020-07-20T20:11:57 | null | UTF-8 | Python | false | false | 402 | py | # Stwórz klasę Calculator, która będzie posiadać funkcje add, difference, multiply, divide.
class Calculator:
def __init__(self, a, b):
self.a = a
self.b = b
def add(self):
return self.a +self.b
def difference(self):
return self.a - self.b
def multiply(self):
return self.a * self.b
def divide(self):
return self.a / self.b
| [
"mateusz.suchodolski@op.pl"
] | mateusz.suchodolski@op.pl |
06420430580006854788cf4a3ce0ea532bcc0bc0 | 09f349ef909cfebeceb53b7fe19c42d1921a0336 | /dayoweek.py | 9d8c71c9c445205e03d5bcb177ba4b7b05c41709 | [] | no_license | Ktoh20/CS550 | 400812a5d179d0ff6d76318c47c5be54efdbf206 | 394a26ce9dd553de98725646109e15c131d71514 | refs/heads/master | 2020-03-28T06:16:57.388267 | 2019-01-17T15:05:23 | 2019-01-17T15:05:23 | 147,824,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | m = int(input("month:"))
d = int(input("day:"))
y = int(input("year:"))
yo = y - (14 - m) / 12
x = yo + (yo / 4) - (yo / 100) + (yo / 400)
mo = m + 12 * ((14-m) / 2) - 2
do = ((d + x + (31 * mo) / 12) % 7)//1
if do == 0:
print("Sunday")
if do == 1:
print("Monday")
if do == 2:
print("Tuesday")
if do == 3:
print("Wednesday")
if do == 4:
print("Thursday")
if do == 5:
print("Friday")
if do == 6:
print("Saturday")
| [
"kyn@Kyns-MacBook-Pro.local"
] | kyn@Kyns-MacBook-Pro.local |
d4cc56e909f5dd97a9a4bdffe759695a086e3c64 | ce5c8734d0a1d9514b2060fad1415ff2de08f39d | /swapping.py | 1e0b279aa423b056bd6de24329bd188e31612aae | [] | no_license | adarshranjan97/basic-c-programs- | 2cb45118933651bb84173570751d7654ab2f1950 | 086228c14a608663a8552265c842794e524c95c2 | refs/heads/master | 2020-12-11T09:39:39.559913 | 2020-02-09T13:44:49 | 2020-02-09T13:44:49 | 233,811,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | x = input('Enter value of x: ')
y = input('Enter value of y: ')
temp = x
x = y
y = temp
print('The value of x after swapping: {}'.format(x))
print('The value of y after swapping: {}'.format(y))
| [
"adarshranjan97@gmail.com"
] | adarshranjan97@gmail.com |
0650ddd5b53ae894b14710e57667b03be6394595 | 45b2841f0897ed8f4b6a9212d114f4b3fcbd9ccb | /node_modules/watchpack-chokidar2/node_modules/fsevents/build/config.gypi | 3eb7e2a2aa46223038b55076f7aab60fe2818aa8 | [
"MIT"
] | permissive | Marina294/React-redux-lab | 60a73c23d6adfa32dae682831c91bae15e54479f | ae47235ecc55687a09c18cdd168211dfe5f109fe | refs/heads/main | 2023-03-21T00:43:42.234350 | 2021-03-16T03:59:58 | 2021-03-16T03:59:58 | 347,878,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,705 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-tmp/icudt67l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_path": "deps/icu-small",
"icu_small": "false",
"icu_ver_major": "67",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "7",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 83,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"ossfuzz": "false",
"shlib_suffix": "83.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/marina/Library/Caches/node-gyp/14.15.4",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/zsh",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/marina/.npm-init.js",
"userconfig": "/Users/marina/.npmrc",
"cidr": "",
"node_version": "14.15.4",
"user": "",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/marina/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.14.10 node/v14.15.4 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/3f/jhnsrw_s3qnf_6_g9m8hvlsw0000gn/T",
"unsafe_perm": "true",
"format_package_lock": "true",
"link": "",
"prefix": "/usr/local"
}
}
| [
"69162947+Marina294@users.noreply.github.com"
] | 69162947+Marina294@users.noreply.github.com |
4a21cfe915d4331d57e62f089173967a54428c0d | 4999cd60fc1f798ffbd52520949db001f2e2303f | /13_jinja2/task_13_1.py | 3b587286020a872754b92f6813d31b7cd0f56a6c | [] | no_license | AntonGorynya/PyNeng | d22b3052c5b719b09899d7fd8d37a82ac25208a6 | 3b48f962bdfccd5093351003b220595f80bfb3ae | refs/heads/master | 2021-01-19T11:11:55.235126 | 2017-08-09T16:07:08 | 2017-08-09T16:07:08 | 87,942,049 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | #! /usr/bin/python3
# -*- coding: utf-8 -*-
from jinja2 import Environment, FileSystemLoader
import importlib
import yaml
import sys
#importlib.reload(sys)
#sys.setdefaultencoding('utf-8')
TEMPLATE_DIR, template = sys.argv[1].split('/')
VARS_FILE = sys.argv[2]
env = Environment(loader = FileSystemLoader(TEMPLATE_DIR), trim_blocks=True)
template = env.get_template(template)
vars_dict = yaml.load( open( VARS_FILE ) )
print (template.render( vars_dict )) | [
"antongorynya1@gmail.com"
] | antongorynya1@gmail.com |
cce8ac41c03f69087d7d505aa9aed578f35c342c | c825435a97fc241cfdd9293ee997affbf4119cb3 | /visualizer/0007_auto_20160419_0857.py | 4af623a32ce7e528563b4e8b4a91c85cbc7efec6 | [] | no_license | mousahakim/agviewer | 4d1897a639b1dba8fa760f99fe9090901b6b3097 | a9daf0cde4d9e85c9315b46e725fa3d721e01e69 | refs/heads/master | 2021-06-04T00:28:24.290084 | 2018-11-07T07:07:44 | 2018-11-07T07:07:44 | 106,172,707 | 1 | 0 | null | 2017-10-08T12:51:49 | 2017-10-08T11:57:00 | JavaScript | UTF-8 | Python | false | false | 646 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('visualizer', '0006_auto_20160419_0855'),
]
operations = [
migrations.AlterField(
model_name='appuser',
name='fc_salt',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='appuser',
name='user',
field=models.OneToOneField(primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL),
),
]
| [
"ubuntu@ip-172-31-45-143.us-west-2.compute.internal"
] | ubuntu@ip-172-31-45-143.us-west-2.compute.internal |
f4f528b083b136208c8546a7389e46a52d973736 | 503d0beaa32bd9857cf4a3462b7b7a681b09037c | /library/bigip_iapp_template.py | ca029990e4e76e3065fb8401ce8f3326584e82c8 | [
"Apache-2.0",
"GPL-3.0-or-later",
"LicenseRef-scancode-unknown",
"GPL-3.0-only"
] | permissive | jmcalalang/Ansible_Meetups | 8e22f747a3f0d3dd3d988a8eb09543324d8f0959 | 61aef7bace967f8917fd1967ba1387e58b4c980e | refs/heads/master | 2021-01-19T16:08:53.691831 | 2018-08-03T16:34:52 | 2018-08-03T16:34:52 | 100,988,119 | 5 | 14 | Apache-2.0 | 2018-02-20T04:50:27 | 2017-08-21T20:04:12 | Python | UTF-8 | Python | false | false | 16,094 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
---
module: bigip_iapp_template
short_description: Manages TCL iApp templates on a BIG-IP.
description:
- Manages TCL iApp templates on a BIG-IP. This module will allow you to
deploy iApp templates to the BIG-IP and manage their lifecycle. The
conventional way to use this module is to import new iApps as needed
or by extracting the contents of the iApp archive that is provided at
downloads.f5.com and then importing all the iApps with this module.
This module can also update existing iApps provided that the source
of the iApp changed while the name stayed the same. Note however that
this module will not reconfigure any services that may have been
created using the C(bigip_iapp_service) module. iApps are normally
not updated in production. Instead, new versions are deployed and then
existing services are changed to consume that new template. As such,
the ability to update templates in-place requires the C(force) option
to be used.
version_added: "2.3"
options:
force:
description:
- Specifies whether or not to force the uploading of an iApp. When
C(yes), will force update the iApp even if there are iApp services
using it. This will not update the running service though. Use
C(bigip_iapp_service) to do that. When C(no), will update the iApp
only if there are no iApp services using the template.
required: False
default: None
choices:
- yes
- no
name:
description:
- The name of the iApp template that you want to delete. This option
is only available when specifying a C(state) of C(absent) and is
provided as a way to delete templates that you may no longer have
the source of.
required: False
default: None
content:
description:
- Sets the contents of an iApp template directly to the specified
value. This is for simple values, but can be used with lookup
plugins for anything complex or with formatting. C(content) must
be provided when creating new templates.
required: False
default: None
state:
description:
- Whether the iRule should exist or not.
required: False
default: present
choices:
- present
- absent
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Add the iApp contained in template iapp.tmpl
bigip_iapp_template:
content: "{{ lookup('template', 'iapp.tmpl') }}"
password: "secret"
server: "lb.mydomain.com"
state: "present"
user: "admin"
delegate_to: localhost
- name: Update a template in place
bigip_iapp_template:
content: "{{ lookup('template', 'iapp-new.tmpl') }}"
password: "secret"
server: "lb.mydomain.com"
state: "present"
user: "admin"
delegate_to: localhost
- name: Update a template in place that has existing services created from it.
bigip_iapp_template:
content: "{{ lookup('template', 'iapp-new.tmpl') }}"
force: yes
password: "secret"
server: "lb.mydomain.com"
state: "present"
user: "admin"
delegate_to: localhost
'''
RETURN = '''
'''
import re
import uuid
from ansible.module_utils.basic import BOOLEANS
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
HAS_F5SDK,
F5ModuleError,
iteritems,
defaultdict,
iControlUnexpectedHTTPError
)
from f5.utils.iapp_parser import (
IappParser,
NonextantTemplateNameException
)
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class Parameters(AnsibleF5Parameters):
api_attributes = []
returnables = []
def __init__(self, params=None):
self._values = defaultdict(lambda: None)
if params:
self.update(params=params)
def update(self, params=None):
if params:
for k,v in iteritems(params):
if self.api_map is not None and k in self.api_map:
map_key = self.api_map[k]
else:
map_key = k
# Handle weird API parameters like `dns.proxy.__iter__` by
# using a map provided by the module developer
class_attr = getattr(type(self), map_key, None)
if isinstance(class_attr, property):
# There is a mapped value for the api_map key
if class_attr.fset is None:
# If the mapped value does not have an associated setter
self._values[map_key] = v
else:
# The mapped value has a setter
setattr(self, map_key, v)
else:
# If the mapped value is not a @property
self._values[map_key] = v
@property
def name(self):
if self._values['name']:
return self._values['name']
if self._values['content']:
try:
name = self._get_template_name()
return name
except NonextantTemplateNameException:
return F5ModuleError(
"No template name was found in the template"
)
return None
@property
def content(self):
if self._values['content'] is None:
return None
result = self._squash_template_name_prefix()
if self._values['name']:
result = self._replace_template_name(result)
return result
@property
def checksum(self):
return self._values['tmplChecksum']
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
def _squash_template_name_prefix(self):
"""Removes the template name prefix
The IappParser in the SDK treats the partition prefix as part of
the iApp's name. This method removes that partition from the name
in the iApp so that comparisons can be done properly and entries
can be created properly when using REST.
:return string
"""
pattern = r'sys\s+application\s+template\s+/Common/'
replace = 'sys application template '
return re.sub(pattern, replace, self._values['content'])
def _replace_template_name(self, template):
"""Replaces template name at runtime
To allow us to do the switch-a-roo with temporary templates and
checksum comparisons, we need to take the template provided to us
and change its name to a temporary value so that BIG-IP will create
a clone for us.
:return string
"""
pattern = r'sys\s+application\s+template\s+[^ ]+'
replace = 'sys application template {0}'.format(self._values['name'])
return re.sub(pattern, replace, template)
def _get_template_name(self):
parser = IappParser(self.content)
tmpl = parser.parse_template()
return tmpl['name']
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def exec_module(self):
result = dict()
changed = False
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def update(self):
self.have = self.read_current_from_device()
if not self.templates_differ():
return False
if not self.want.force and self.template_in_use():
return False
if self.client.check_mode:
return True
self._remove_iapp_checksum()
# The same process used for creating (load) can be used for updating
self.create_on_device()
self._generate_template_checksum_on_device()
return True
def template_in_use(self):
collection = self.client.api.tm.sys.application.services.get_collection()
fullname = '/{0}/{1}'.format(self.want.partition, self.want.name)
for resource in collection:
if resource.template == fullname:
return True
return False
def read_current_from_device(self):
self._generate_template_checksum_on_device()
resource = self.client.api.tm.sys.application.templates.template.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return Parameters(result)
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def exists(self):
result = self.client.api.tm.sys.application.templates.template.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def _remove_iapp_checksum(self):
"""Removes the iApp tmplChecksum
This is required for updating in place or else the load command will
fail with a "AppTemplate ... content does not match the checksum"
error.
:return:
"""
resource = self.client.api.tm.sys.application.templates.template.load(
name=self.want.name,
partition=self.want.partition
)
resource.modify(tmplChecksum=None)
def templates_differ(self):
# BIG-IP can generate checksums of iApps, but the iApp needs to be
# on the box to do this. Additionally, the checksum is MD5, but it
# is not an MD5 of the entire content of the template. Instead, it
# is a hash of some portion of the template that is unknown to me.
#
# The code below is responsible for uploading the provided template
# under a unique name and creating a checksum for it so that that
# checksum can be compared to the one of the existing template.
#
# Using this method we can compare the checksums of the existing
# iApp and the iApp that the user is providing to the module.
backup = self.want.name
# Override whatever name may have been provided so that we can
# temporarily create a new template to test checksums with
self.want.update({
'name': 'ansible-{0}'.format(str(uuid.uuid4()))
})
# Create and remove temporary template
temp = self._get_temporary_template()
# Set the template name back to what it was originally so that
# any future operations only happen on the real template.
self.want.update({
'name': backup
})
if temp.checksum != self.have.checksum:
return True
return False
def _get_temporary_template(self):
self.create_on_device()
temp = self.read_current_from_device()
self.remove_from_device()
return temp
def _generate_template_checksum_on_device(self):
generate = 'tmsh generate sys application template {0} checksum'.format(
self.want.name
)
self.client.api.tm.util.bash.exec_cmd(
'run',
utilCmdArgs='-c "{0}"'.format(generate)
)
def create(self):
if self.client.check_mode:
return True
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the iApp template")
def create_on_device(self):
remote_path = "/var/config/rest/downloads/{0}".format(self.want.name)
load_command = 'tmsh load sys application template {0}'.format(remote_path)
template = StringIO(self.want.content)
upload = self.client.api.shared.file_transfer.uploads
upload.upload_stringio(template, self.want.name)
output = self.client.api.tm.util.bash.exec_cmd(
'run',
utilCmdArgs='-c "{0}"'.format(load_command)
)
if hasattr(output, 'commandResult'):
result = output.commandResult
if 'Syntax Error' in result:
raise F5ModuleError(output.commandResult)
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the iApp template")
return True
def remove_from_device(self):
resource = self.client.api.tm.sys.application.templates.template.load(
name=self.want.name,
partition=self.want.partition
)
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
name=dict(
required=False,
default=None
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
),
force=dict(
choices=BOOLEANS,
required=False,
default=None,
type='bool'
),
content=dict(
required=False,
default=None
)
)
self.f5_product_name = 'bigip'
self.mutually_exclusive = [
['sync_device_to_group', 'sync_group_to_device']
]
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name,
mutually_exclusive=spec.mutually_exclusive
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| [
"jon.calalang@gmail.com"
] | jon.calalang@gmail.com |
3511790acca16e8cc73a8ff397d295a53cd4b859 | b961f30a5da2df0e9d95f30437bd907a7f61dd1c | /graph.py | 7c24c8c9ab3957b8d87942ee2da9d3e5a4bdd1ee | [] | no_license | MariBit/IADS | c6beb1dcf0988fb6ca22eb52e4456becf0ad9008 | 20dc451fa49cc2f967f81e0ab49a601d1c7fd61c | refs/heads/master | 2022-09-30T13:57:14.236448 | 2020-06-07T13:19:38 | 2020-06-07T13:19:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,489 | py | import math
import sys
# helper function: calculate euclidean distance between points p and q
def euclid(p, q):
x = p[0] - q[0]
y = p[1] - q[1]
return math.sqrt(x * x + y * y)
class Graph:
# parse two kinds of graphs based on the value of n
# n= -1 case: read points in the Euclidean plane -> Euclidean Graph
# n>0 case: a general graph in a different format -> non-Euclidean Graph
# self.perm, self.dists, self.n are the key variables to be set up
def __init__(self, n, filename):
# general case -> assuming general TSP input
file = open(filename, "r")
self.n = n
# euclidean TSP -> count the number of lines in the file and assign it to self.n
if (n == -1):
num_lines = 0
for line in file:
num_lines = num_lines + 1
self.n = num_lines
# initialise self.dist table
self.dist = [[0 for col in range(self.n)] for row in range(self.n)]
if n == -1:
# parse lines from f
coordinates_table = []
file = open(filename, "r")
for line in file:
coordinates_table.append(line.split())
# cast distances to integers
for j in range(2):
coordinates_table[-1][j] = int(coordinates_table[-1][j])
# fill in the self.dist table with distances
for i in range(len(coordinates_table)):
for j in range(len(coordinates_table)):
self.dist[i][j] = euclid(coordinates_table[i], coordinates_table[j])
else:
# parse lines from f
coordinates_distance_table = []
file = open(filename, "r")
for line in file:
coordinates_distance_table.append(line.split())
# fill in the self.dist table with weights
for line in range(len(coordinates_distance_table)):
self.dist[int(coordinates_distance_table[line][0])][int(coordinates_distance_table[line][1])] = int(
coordinates_distance_table[line][2])
self.dist[int(coordinates_distance_table[line][1])][int(coordinates_distance_table[line][0])] = int(
coordinates_distance_table[line][2])
# initialise self.perm with the identity permutation
self.perm = [i for i in range(self.n)]
# 2D array for reference DP algorithm for part D
# set a limit
if self.n <= 15:
self.pathLength = [[(-1) for j in range(1 << self.n)] for i in range(self.n)]
# calculate the cost of the tour (represented by self.perm)
def tourValue(self):
total_cost = 0
for i in range(len(self.perm)):
j = i + 1
# calculate the cost between 2 adjacent points in self.perm (i and its neighbor j)
total_cost += self.dist[self.perm[i]][self.perm[j % len(self.perm)]]
return total_cost
# attempt the swap of cities i and i+1 in self.perm
# commit to the swap if it improves the cost of the tour.
# return True/False depending on success
def trySwap(self, i):
cost_before_swap = self.tourValue()
# swap cities i and i+1 in self.perm
temp = self.perm[i]
self.perm[i] = self.perm[(i + 1) % self.n]
self.perm[(i + 1) % self.n] = temp
cost_after_swap = self.tourValue()
# if swap does not improve cost, swap cities again (undo the effect) else keep the swap
if cost_after_swap > cost_before_swap:
temp = self.perm[i]
self.perm[i] = self.perm[(i + 1) % self.n]
self.perm[(i + 1) % self.n] = temp
return False
else:
return True
# consider the effect of reversing the segment between
# self.perm[i] and self.perm[j], and commit to the reversal
# if it improves the tour value
# return True/False depending on success.
def tryReverse(self, i, j):
# calculate start and end of the sequence that will be reversed
start = self.perm[(i - 1) % self.n]
end = self.perm[(j + 1) % self.n]
# compute cost before and after reversal
dist_before = self.dist[start][self.perm[i]] + self.dist[self.perm[j]][end]
dist_after = self.dist[start][self.perm[j]] + self.dist[self.perm[i]][end]
# if cost is improved then reverse else keep the same
if dist_after < dist_before:
self.perm[i:j + 1] = self.perm[i:j + 1][::-1]
return True
else:
return False
# given function to perform trySwap on the self.perm
def swapHeuristic(self):
better = True
while better:
better = False
for i in range(self.n):
if self.trySwap(i):
better = True
# given function to perform tryReverse on the self.perm
def TwoOptHeuristic(self):
better = True
while better:
better = False
for j in range(self.n - 1):
for i in range(j):
if self.tryReverse(i, j):
better = True
# the Greedy heuristic which builds a tour starting
# from node 0, taking the closest (unused) node as 'next' each time
def Greedy(self):
# start from node 0
self.perm[0] = 0
# keep track of the unused nodes
unused_nodes = []
for node in range(1, self.n):
unused_nodes.append(node)
# find the nearest neighbor(j) for each node(i) and assign it to the best_node
for i in range(self.n - 1):
best_node = 0
best_distance = 1000000000000000000
for j in unused_nodes:
if (self.dist[self.perm[i]][j] < best_distance):
best_node = j
best_distance = self.dist[self.perm[i]][j]
# after nearest node is added, remove it from unused nodes and continue
unused_nodes.remove(best_node)
self.perm[i + 1] = best_node
# try to reverse self.perm[x:y], if the cost improves, reverse
def tryReverse3Opt(self, x, y, z):
#take points to form segments
p1 = self.perm[x - 1]
p2 = self.perm[x]
p3 = self.perm[y - 1]
p4 = self.perm[y]
p5 = self.perm[z - 1]
p6 = self.perm[z % len(self.perm)]
# calculate distances for different arrangements
try0 = self.dist[p1][p2] + self.dist[p3][p4] + self.dist[p5][p6]
try1 = self.dist[p1][p3] + self.dist[p2][p4] + self.dist[p5][p6]
try2 = self.dist[p1][p2] + self.dist[p3][p5] + self.dist[p4][p6]
try3 = self.dist[p1][p4] + self.dist[p5][p2] + self.dist[p3][p6]
try4 = self.dist[p6][p2] + self.dist[p3][p4] + self.dist[p5][p1]
# find the lowest-cost tour for given sub-tours
if try0 > try1:
self.perm[x:y] = reversed(self.perm[x:y])
return -try0 + try1
elif try0 > try2:
self.perm[y:z] = reversed(self.perm[y:z])
return -try0 + try2
elif try0 > try4:
self.perm[x:z] = reversed(self.perm[x:z])
return -try0 + try4
elif try0 > try3:
temp = self.perm[y:z] + self.perm[x:y]
self.perm[x:z] = temp
return -try0 + try3
return 0
# create all possible edge combinations for segments
# a helper function used in 3Opt algorithm
def generateSegments(self, n):
return ((x, y, z)
for x in range(self.n)
for y in range(x + 2, self.n)
for z in range(y + 2, self.n + (x > 0)))
# run 3Opt algorithm on self.perm for all possible segment combinations
def threeOptHeuristic(self):
while True:
difference = 0
for (seg1, seg2, seg3) in self.generateSegments(self.n):
difference = difference + self.tryReverse3Opt(seg1, seg2, seg3)
if difference >= 0:
break
# a helper function that calculates the cost of a tour given by a sequence (for variable lengths from 1 to n)
# used in the implementation of myDynamicAlgorithm in part C
def tourValueMyAlgorithm(self, sequence):
total_cost = 0
for i in range(len(sequence)):
j = i + 1
# calculate the cost between 2 adjacent points in sequence
total_cost += self.dist[sequence[i]][sequence[j % len(sequence)]]
return total_cost
# starting at node start, the algorithm builds up the solution optimising for the lowest cost at every addition of a new node
# the length of the sequence increases by 1 in every iteration until len(sequence)=self.n
def dynamicAlgorithm(self, start):
sequence = [] # store the lowest-cost permutation
sequence.append(start)
# keep adding nodes until full length
for node in range(0, self.n):
best_sequence = []
best_sequence_cost = 100000000000000000000000000
if not (node in sequence):
# try to insert node n at all positions available
for place in range(node + 1):
sequence.insert(place, node)
# insert node n at the place that will result in the lowest cost
if self.tourValueMyAlgorithm(sequence) < best_sequence_cost:
best_sequence = sequence[:]
best_sequence_cost = self.tourValueMyAlgorithm(sequence)
sequence.pop(place)
sequence = best_sequence
#best sequence found for a given starting node is assigned to self.perm
self.perm = best_sequence
# try dynamicAlgorithm with all nodes as starting nodes and optimise for the lowest cost
def myDynamicAlgorithm(self):
best_cost = 10000000000000000
best_perm = [] # store the lowest cost permutation
# try all nodes as starting nodes - n
for node in range(self.n):
self.dynamicAlgorithm(node)
tryMe = self.perm
# select the starting node that will result in the lowest cost permutation
if self.tourValueMyAlgorithm(tryMe) < best_cost:
best_cost = self.tourValueMyAlgorithm(tryMe)
best_perm = tryMe
self.perm = best_perm
# NOTE: This algorithm is only used for finding the optimal solution for small inputs in part D
# inspiration from: https://codingblocks.com/resources/travelling-salesman/
# recursive DP algorithm (exponential time)
# starting node 0
# Parameters:
# visited - range 0-(2^n-1), binary number denoting if a node is visited
# pos - the node/city which is visited last
# start - the start node
def referenceAlgorithm(self, visited=1, position=0, start=0):
# only allow small inputs
if self.n > 15:
return False
allVisitedNodes = (1 << self.n) - 1
# base case
if (visited == allVisitedNodes):
return self.dist[position][start]
if (self.pathLength[position][visited] != -1):
# already calculated cost
return self.pathLength[position][visited]
cost = sys.maxsize
for node in range(self.n):
# check if a certain node is visited
if (visited & (1 << node) == 0):
new_cost = self.dist[node][position] + self.referenceAlgorithm((visited | (1 << node)), node, start)
cost = min(cost, new_cost)
self.pathLength[position][visited] = cost
return cost
# find optimal solution
# by finding the best starting node
# returns the minimal cost
def referenceAlgorithmStart(self):
# only works for small inputs
if self.n > 15:
return False
minimum_cost = sys.maxsize
for i in range(self.n):
currrent_cost = self.referenceAlgorithm((1 << i), i, i)
if minimum_cost > currrent_cost:
minimum_cost = currrent_cost
return minimum_cost
if __name__ == "__main__":
g = Graph(-1, "cities10t")
print(g.referenceAlgorithmStart())
g.myDynamicAlgorithm()
print(g.tourValue())
#print(g.tourValue())
#g.TwoOptHeuristic()
#print(g.tourValue())
#g.Greedy()
#print(g.tourValue())
| [
"noreply@github.com"
] | noreply@github.com |
27a9b38fa69c18095d013a8153b8a12d533a2341 | 18b3ad3b0e1f7f10969738251e1201d01dfbc6bf | /Public/2.py | 4a180e054c6e709e9b52ab4d83503fae30a566e1 | [] | no_license | sahthi/backup2 | 11d509b980e731c73733b1399a8143780779e75a | 16bed38f0867fd7c766c2a008c8d43b0660f0cb0 | refs/heads/master | 2020-03-21T12:39:56.890129 | 2018-07-09T08:12:46 | 2018-07-09T08:12:46 | 138,565,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | def Ab(a,b):
try:
c=((a+b)/(a-b))
except ZeroDivisionError:
print "a/b result in 0"
else:
print c
Ab(2,3)
Ab(3,3)
| [
"siddamsetty.sahithi@votarytech.com"
] | siddamsetty.sahithi@votarytech.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.