index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
986,300 | 9cd75d5f61d86ff86d69a30adac273d5dbedbae2 | import networkx as nx
from pylab import *
from statistics import mean
from itertools import groupby
from tqdm import tqdm
from os.path import join
G = #networkX graph
pos = nx.spring_layout(G)
def initialize():
global G, nextG, pos, stepaverageopinion
for node in G.nodes():
G.nodes[node]['state'] = 1 if random() < 0.5 else 0
nextG = G.copy()
stepaverageopinion = {}
def observe():
global G, nextG
cla()
nx.draw(nextG, cmap = cm.binary, vmin = 0, vmax = 1,
node_color = [G.nodes[node]['state'] for node in G.nodes()], edgecolors='black',
pos = pos)
def observeOpinion(currentstep):
global G, nextG, statesdict
cla()
# print(currentstep)
statesdict = dict(G.nodes(data='state'))
#print(statesdict)
averageopinion = mean(statesdict[node] for node in statesdict)
#print(averageopinion)
stepaverageopinion[currentstep] = averageopinion
liststepaverageopinion = sorted(stepaverageopinion.items())
x, y = zip(*liststepaverageopinion)
plt.plot(x, y)
for x, y in liststepaverageopinion:
plt.annotate(str(round(y,4)), xy=(x,y), xytext=(10,10), textcoords='offset points')
plt.title('Average opinion per step')
plt.xlabel('Step')
plt.ylabel('Average opinion')
# print(averageopinion)
def stopping(currentstep): #Steady-state is reached when the average opinion is the same for 4 steps long or (i)&(i+2) and (i+1)&(i+3) are the same
steadystatelist = []
def all_equal(list):
g = groupby(list)
return next(g, True) and not next(g, False)
def two_times_same(list):
if list[0] == list[2] and list[1] == list[3]:
return True
else:
return False
if len(stepaverageopinion) > 3:
steadystatelist.append(stepaverageopinion[currentstep-3])
steadystatelist.append(stepaverageopinion[currentstep-2])
steadystatelist.append(stepaverageopinion[currentstep-1])
steadystatelist.append(stepaverageopinion[currentstep])
if all_equal(steadystatelist) or two_times_same(steadystatelist):
return True
else:
return False
def update():
global G, nextG
for node in G.nodes():
count = G.nodes[node]['state']
for neighbor in G.neighbors(node):
count += G.nodes[neighbor]['state']
ratio = count / (G.degree(node) + 1.0)
nextG.nodes[node]['state'] = 1 if ratio > 0.5 \
else 0 if ratio < 0.5 \
else G.nodes[node]['state'] #1 if random() < 0.5 else 0 #Flipping the coin if it's exactly 0.5
G, nextG = nextG, G
import pycxsimulator
stepaverageopinionsims = {}
statessims = {}
print('Running simulations:')
for i in tqdm(range(1)):
pycxsimulator.GUI().start(func=[initialize, observe, observeOpinion, stopping, update])
stepaverageopinionsims[i+1] = stepaverageopinion
statessims[i+1] = statesdict
mypath = #path were files should be saved
# print(stepaverageopinionsims)
averageopinionfile = #file name
with open(join(mypath, (averageopinionfile + ".txt")), 'w') as file:
file.write(str(stepaverageopinionsims))
# print(statessims)
statesfile = #file name
with open(join(mypath, (statesfile + ".txt")), 'w') as file:
file.write(str(statessims))
|
986,301 | ef4f10319a39ea6d51686799ef7645415b846906 | # -*- coding: utf-8 -*-
from mock import patch, MagicMock
from django.test import TestCase
from sender.factories import SmsAPIGateFactory
from sender.sms_handlers import SmsHandler
class SmsHandlerTestCase(TestCase):
def setUp(self):
self.gate = SmsAPIGateFactory()
self.data = {
'test': 1
}
@patch('requests.post')
def test_make_request(self, mock_post):
handler = SmsHandler(self.gate)
data = {
'test': 1
}
handler._make_request(data)
mock_post.assert_called_with(self.gate.url, json=data)
@patch.object(SmsHandler, '_make_request')
@patch.object(SmsHandler, '_get_logger')
def test_send_good_response(self, mock_logger, mock_request):
response_ok = {
'status': 'ok'
}
mock_request.return_value = response_ok
logger = MagicMock()
mock_logger.return_value = logger
handler = SmsHandler(self.gate)
handler.send(self.data)
logger.log_sending.assert_called_with(self.data)
logger.log_response.assert_called_with(response_ok)
self.assertIsNone(handler._error)
@patch.object(SmsHandler, '_make_request')
@patch.object(SmsHandler, '_get_logger')
def test_send_error_response(self, mock_logger, mock_request):
response_error = {
'status': 'error',
'error_msg': 'test_error'
}
mock_request.return_value = response_error
logger = MagicMock()
mock_logger.return_value = logger
handler = SmsHandler(self.gate)
handler.send(self.data)
logger.log_response.assert_called_with(response_error)
logger.log_error.assert_called_with(response_error['error_msg'])
self.assertEquals(handler._error, response_error['error_msg'])
@patch.object(SmsHandler, '_make_request', side_effect=Exception('test_error'))
@patch.object(SmsHandler, '_get_logger')
def test_send_raises(self, mock_logger, mock_request):
logger = MagicMock()
mock_logger.return_value = logger
handler = SmsHandler(self.gate)
handler.send(self.data)
logger.log_error.assert_called_with('test_error')
self.assertFalse(logger.log_response.called)
self.assertEquals(handler._error, 'test_error')
|
986,302 | 2b5c57a76dca1a46adeea6b41230a5edb96d39e1 | """
约翰·沃森(John Watson)多年来一直在为福尔摩斯(Sherlock Holmes)工作,他从未理解如何能够如此轻易地猜出谁是杀手。
然而,在某个特定的夜晚,Sherlock醉得太厉害了,他告诉约翰这个秘密是什么。
“基本亲爱的沃森”,福尔摩斯说。“它绝不是最可疑的,但却是第二个最可疑的”。在他得到这个秘密之后,约翰决定自己解决一个案件,
只是为了测试Sherlock说的是否正确。
给出一个包含N个整数的列表,表示每个人的怀疑程度,帮助John Watson决定谁是杀手
Input
将有几个测试用例。每个测试用例有一个整数开始 N (2 ≤ N ≤ 1000),表示嫌疑人的数目。
下面会有N个不同的整数,其中第 i 个整数,代表第 i 个人的嫌疑,嫌疑值 V,1≤ V ≤10000。
当N = 0时,输入结束。
Output
根据上述方法,对于每个测试用例,打印一行,包含一个整数,代表第几个是凶手。
Sample Input 1
3
3 5 2
5
1 15 3 5 2
0
Sample Output 1
1
4
"""
def findMan(nums):
temp = nums.copy()
zuida = max(nums)
first = 0
for i in range(first, len(nums)):
for j in range(i, len(nums)):
if nums[j] == zuida:
first = j
nums.remove(nums[j])
break
second = max(nums)
weizhi = temp.index(second)
return weizhi+1
result = []
while True:
N = int(input())
if N == 0:
break
else:
nums = input().split()
nums = [int(n) for n in nums]
result.append(findMan(nums))
for i in result:
print(i)
|
986,303 | d508554581bed52a056604afcce9dd9a0fe2d131 | # credit: Olha Babich, Data Scientist at WiserBrand
# changelog by Valentyna Fihurska
#
# added option to use requests library for
# function _get_text_list i.e. user can choose
# whether to use webdriver or requests
from difflib import SequenceMatcher
from bs4 import BeautifulSoup
from selenium import webdriver
import numpy as np
import re
from nltk import tokenize
import random
import requests
key_words_dict = {'privacy_policy': ['privacy policy', 'privacy'],
'return_policy': ['return'],
'warranty': ['warranty'],
'faq': ['faq', 'frequently asked questions'],
'shipping': ['shipping', 'delivery'],
'terms and conditions': ['terms']
}
def _clean_base_url(url):
try:
url_pattern = re.compile(r"http(?:s)?://(?:[-._a-zA-Z0-9])+")
cleaned_url = re.findall(pattern=url_pattern, string=url)[0]
return cleaned_url
except Exception as e:
print('Error in _clean_base_url({})'.format(url), e)
def _prepare_url(url):
try:
to_replace = ['https://', 'http://', 'www.']
for el in to_replace:
if el in url:
url = url.replace(el, '')
return url
except Exception as e:
print('Error in _prepare_url({})'.format(url), e)
def _is_same_domain(checked_url, received_url):
prepared_checked_url = _prepare_url(checked_url)
prepared_received_url = _prepare_url(received_url)
if prepared_checked_url in prepared_received_url or prepared_received_url in prepared_checked_url:
return True
else:
return False
def _clean_target_url(url):
if len(url) > 0:
url_pattern = re.compile(r"http[s]?://(?:[#._a-zA-Z0-9\/\-])+")
res = re.findall(pattern=url_pattern, string=url)
if len(res) > 0:
cleaned_url = re.findall(pattern=url_pattern, string=url)[0]
if cleaned_url[-1] == '/':
cleaned_url = cleaned_url[:-1]
return cleaned_url
else:
return None
else:
return None
def _create_link_from_href(href_attr, base_url, skip_words):
try:
stop_counter = 0
for word in skip_words:
if word in href_attr:
stop_counter += 1
if stop_counter > 0:
return None
if len(href_attr) < 1:
return None
prepared_base_url = _prepare_url(base_url)
if 'http' in href_attr and not _is_same_domain(prepared_base_url, href_attr):
return None
if prepared_base_url in href_attr:
if href_attr[0] != '/':
target_link = href_attr
elif len(href_attr) > 2:
if href_attr[:2] == '//':
target_link = 'https:' + href_attr
elif href_attr[0] == '/':
target_link = base_url + href_attr
else:
target_link = base_url + '/' + href_attr
if target_link[-1] == '#':
return None
return target_link
except Exception as e:
return None
def _get_links_list(web_driver, url_to_scrape):
try:
web_driver.implicitly_wait(30)
url_to_scrape = _clean_base_url(url_to_scrape)
web_driver.get(url_to_scrape)
soup = BeautifulSoup(web_driver.page_source, 'lxml')
link_list = soup.findAll('a')
if len(link_list) > 0:
link_list = link_list[::-1]
return link_list, url_to_scrape
except Exception as e:
return None, None
def _select_best_link(keyword, link_mas, key_words_dict):
np_link_mas = np.array(link_mas)
unique_links, unique_link_indexes = np.unique(np_link_mas[:, 1], return_index=True)
if len(unique_links) == 1:
return unique_links[0]
else:
unique_link_mas = np_link_mas[unique_link_indexes, :]
sorted_mas = sorted(unique_link_mas[:, ], key=lambda x: len(x[0].split()))
return sorted_mas[0][1]
def _analyze_parameter_part(link, key, key_dict):
link_mas = link.split('?')
base_part, option_part = link_mas[0], link_mas[1]
if '&' in option_part:
option_part_mas = option_part.split('&')
base_part_counter = 0
option_part_counter = 0
for word in key_dict[key]:
if word in base_part.lower():
base_part_counter += 1
if word in option_part.lower():
option_part_counter += 1
if '.jsp' in base_part:
return link
if base_part_counter > 0:
return base_part
if 'utm' in option_part:
return base_part
if option_part_counter > 0 or (base_part_counter == 0 and option_part_counter == 0):
return link
def get_text(link, options, company_name, company_website):
"""
функция для выкачивания текста по одной ссылке
:param company_website:
:param link: ссылка на страницу с полисис
:param options: параметры для хромдрайвера
:return:
"""
if link is not None and type(link) == str and len(link) > 0:
driver = webdriver.Chrome(executable_path='./chromedriver', options=options)
res, url = _get_text_list(driver, link)
if res is not None:
company_website = company_website.replace('https://', '').replace('http://', '')
if company_website.find('/') > -1:
company_website = company_website[:company_website.find('/')]
b = text_generator(res, company_name, company_website)
if b is not None:
driver.quit()
return ' '.join(b)
else:
driver.quit()
return None
else:
driver.quit()
return None
phone_numbers_pattern = re.compile('([\+\(\)\-\. 0-9]{7,23})')
date_pattern = re.compile(
'(?:[A-Za-z]{3,9} [0-9]{1,2}, [0-9]{4})|(?:[0-9]{1,2}[.\/-]?[0-9]{1,2}[.\/-]?[0-9]{2,4})|(?:[0-9]{1,2}[:\-]{1}[0-9]{1,2})')
symbol_pattern = re.compile('[#%$><}{]{1}')
url_pattern = re.compile(r'[A-Za-z0-9]+([\-\.]{1}[A-Za-z0-9]+)*\.[\/A_Z0-9a-z]+')
def text_generator(text_mas, company_name, company_website):
patterns_to_skip = [phone_numbers_pattern,
date_pattern,
symbol_pattern] # , url_pattern]
temp = []
for el in text_mas:
if el.name == 'p':
text_tmp = []
for tag in el.contents:
if tag.name is not None and tag.name != 'a':
temp_content = tag.contents
for t in temp_content:
if t.name is not None and t.name != 'a' and t.contents is not None:
temp_content2 = t.contents
if len(temp_content2) == 0:
continue
elif temp_content2[0].name is None:
text_tmp.append(str(temp_content2[0]))
else:
text_tmp.append(str(t))
else:
text_tmp.append(str(tag))
text = '\n'.join(text_tmp)
else:
text = el.text.strip()
if text is not None and len(text) > 0:
if '..' in text:
continue
if '\n' in text:
text_splitted = text.split('\n')
for par in text_splitted:
if len(par.split()) >= 4:
temp.append(par)
elif len(text.split()) >= 4:
temp.append(text)
temp2 = []
for el in temp:
sentences = tokenize.sent_tokenize(el)
for sen in sentences[:]:
sen = sen.strip()
if sen in temp2:
continue
if len(sen.split()) < 3:
continue
if re.match('[A-Z]', sen[0]) is None or sen[-1] != '.':
continue
if 'cart' in sen.lower() or 'registration' in sen.lower() or 'invalid' in sen.lower() or 'get' in sen.lower() or 'please select' in sen.lower() or 'enter' in sen.lower() or 'cookie' in sen.lower() or 'shopping bag' in sen.lower() or 'search result' in sen.lower() or 'click' in sen.lower() or 'see here' in sen.lower() or 'please see' in sen.lower():
continue
counter = 0
for pattern in patterns_to_skip:
if len(re.findall(pattern=pattern, string=sen.lower())) > 0:
counter += 1
if counter > 0:
continue
replace_characters = {'“': '"', '’': "'", '”': '"', '–': '-', '®': '', '\xa0': ' '}
for key in replace_characters:
sen = sen.replace(key, replace_characters[key])
sen = sen.replace('\n', ' ').replace('\t', ' ').replace(' ', ' ').strip()
temp2.append(sen)
if len(temp2) < 5 or len(' '.join(temp2)) < 800:
return ''
mas_length = len(temp2)
curr_length = 5
text_length = len(' '.join(temp2[:curr_length]))
while text_length < 800 and curr_length <= mas_length:
curr_length += 1
text_length = len(' '.join(temp2[:curr_length]))
word_pattern = '(?:[\W\"\(]{{1}}|^)({})(?:[\W\"\)\(\.\,;]{{1,3}}?|$)'
pronoun_dict = {
16:{re.compile(word_pattern.format("I")): ['The Company', company_name, company_website, 'Website']},
15:{re.compile(word_pattern.format("my")): ["Company's", company_name + "'s'"]},
14:{re.compile(word_pattern.format("mine")): ["Company's", company_name + "'s'"]},
12: {re.compile(word_pattern.format('us')): ['The Company', company_name, company_website, 'Website']},
13: {re.compile(word_pattern.format('we')): ['The Company', company_name, company_website, 'Website']},
10: {re.compile(word_pattern.format('you')): ['The Client', 'The Customer', 'The User',
'The Client of {}'.format(company_name), 'The User of Website',
'The User of company services']},
9: {re.compile(word_pattern.format('your')): ["The Client's", "The Customer's", "The User's"]},
11: {re.compile(word_pattern.format('our')): ["Company's", company_name + "'s'"]},
0: {re.compile(word_pattern.format("we'd")): ['The company would', '{} would'.format(company_name),
'{} would'.format(company_website), 'website would']},
1: {re.compile(word_pattern.format("we'll")): ['The company will', '{} will'.format(company_name),
'{} will'.format(company_website), 'website will']},
2: {re.compile(word_pattern.format("we're")): ['The company is', '{} is'.format(company_name),
'{} is'.format(company_website), 'website is']},
3: {re.compile(word_pattern.format("we've")): ['The company has', '{} has'.format(company_name),
'{} has'.format(company_website), 'website has']},
4: {re.compile(word_pattern.format("you'd")): ['The client would', 'The customer would', 'User would',
'the client of the {} would'.format(company_name),
'the user of website would',
'the user of company services would']},
5: {re.compile(word_pattern.format("ours")): ['The Company', company_name, company_website, 'Website']},
7: {re.compile(word_pattern.format("yours")): ['The Client', 'The Customer', 'The User',
'The Client of {}'.format(company_name), 'The User of Website',
'The User of company services']},
6: {re.compile(word_pattern.format("yourself")): ['The Client', 'The Customer', 'The User',
'The Client of {}'.format(company_name),
'The User of Website', 'The User of company services']},
8: {re.compile(word_pattern.format("you're")): ['The client is', 'The customer is', 'User is',
'The client of the {} is'.format(company_name),
'The user of website is', 'The user of company services is']}}
if text_length >= 800:
for i in range(curr_length):
temp2[i] = pronoun_replacer(temp2[i], pronoun_dict)
return temp2[:curr_length]
else:
return ''
def pronoun_replacer(sentence, to_replace_dict):
is_upper = sentence.isupper()
sentence_mas = sentence.split()
if is_upper:
for i in range(len(sentence_mas)):
for num in range(14):
for el in to_replace_dict[num].keys():
if re.search(el, sentence_mas[i].lower()) is not None:
word_mas = to_replace_dict[num][el]
index = random.choice(range(len(word_mas)))
new_word = word_mas[index]
while new_word in sentence_mas:
word_mas = word_mas[:index] + word_mas[index + 1:]
if len(word_mas) < 1:
break
index = random.choice(range(len(word_mas)))
new_word = word_mas[index]
sentence_mas[i] = re.sub(el, new_word, sentence_mas[i].lower()).upper()
else:
for i in range(len(sentence_mas)):
for num in range(14):
for el in to_replace_dict[num].keys():
if re.search(el, sentence_mas[i].lower()) is not None:
word_mas = to_replace_dict[num][el]
index = random.choice(range(len(word_mas)))
new_word = word_mas[index]
while new_word in sentence_mas:
word_mas = word_mas[:index] + word_mas[index + 1:]
if len(word_mas) < 1:
break
index = random.choice(range(len(word_mas)))
new_word = word_mas[index]
sentence_mas[i] = re.sub(el, new_word, sentence_mas[i].lower())
#
return ' '.join(sentence_mas)
def _get_text_list(url_to_scrape, method='requests', web_driver=None):
"""
Получаем список тегов со странички, в которых скорее всего есть текст
:param web_driver:
:param url_to_scrape:
:return:
"""
try:
if method == 'webdriver':
web_driver.get(url_to_scrape)
soup = BeautifulSoup(web_driver.page_source, 'lxml')
else:
r = requests.get(url_to_scrape)
soup = BeautifulSoup(r.text, 'lxml')
link_list = soup.findAll(['h1', 'h2', 'h3', 'h4', 'h5', 'p', 'span', 'li', 'br'])
res = []
tag_exceptions = {
'div': ['h1', 'h2', 'h3', 'h4', 'h5', 'p', 'span', 'li', 'div', 'img', 'br', 'iframe', 'input', 'i'],
'li': ['a', 'img', 'i'],
'span': ['a', 'img', 'i', 'br'],
'p': ['button']}
for el in link_list:
if el.name in tag_exceptions:
if len(el.findAll(tag_exceptions[el.name])) > 0:
continue
if el.parent in res:
continue
if 0 < len(el.contents) < 5:
res.append(el)
if len(res) == 0:
link_list = soup.findAll(['div'])
for el in link_list:
if el.name in tag_exceptions:
if len(el.findAll(tag_exceptions[el.name])) > 0:
continue
if 0 < len(el.contents) < 5:
res.append(el)
return res, url_to_scrape
except Exception as e:
return None, None
def link_scraper(web_driver, options, url_to_scrape, company_name, key_words_dict, skip_words):
"""
Функция для вытаскивание ссылок и текстов всех доступных полисис
:param web_driver:
:param options:
:param url_to_scrape:
:param company_name:
:param key_words_dict:
:param skip_words:
:return:
"""
link_dict = dict()
res, base_url = _get_links_list(web_driver, url_to_scrape)
link_dict['base_url'] = base_url
if res is not None:
real_url = web_driver.current_url
real_url = _clean_base_url(real_url)
if not _is_same_domain(base_url, real_url):
return None
for el in res:
# take elements with href attr
if 'href' not in el.attrs:
continue
target_text = el.text.lower()
if len(target_text) > 50:
continue
target_link = _create_link_from_href(el.attrs['href'], real_url, skip_words)
if target_link is None:
continue
for key in key_words_dict:
if key == 'help' and 'faq' in target_link.lower():
continue
for key_word in key_words_dict[key]:
if key_word in target_text:
if '?' in target_link:
target_link = _analyze_parameter_part(target_link, key, key_words_dict)
if link_dict.get(key) is None:
link_dict[key] = [[target_text, target_link]]
elif link_dict.get(key) is not None and [target_text, target_link] not in link_dict.get(key):
link_dict[key].append([target_text, target_link])
policy_textes = []
result = dict()
for key in link_dict:
if key == 'base_url':
continue
if len(link_dict[key]) > 1:
result[key + '_url'] = _select_best_link(key, link_dict[key], key_words_dict)
text = get_text(result[key + '_url'], options, company_name, link_dict['base_url'])
if text is not None:
if len(policy_textes) > 0:
similar = []
for el in policy_textes:
ratio = SequenceMatcher(None, el, text).ratio()
similar.append(ratio)
if max(similar) < 0.1:
result[key] = text
policy_textes.append(text)
else:
result[key] = ''
else:
result[key] = text
policy_textes.append(text)
else:
result[key] = ''
else:
result[key + '_url'] = link_dict[key][0][1]
text = get_text(result[key + '_url'], options, company_name, link_dict['base_url'])
if text is not None:
if len(policy_textes) > 0:
similar = []
for el in policy_textes:
ratio = SequenceMatcher(None, el, text).ratio()
similar.append(ratio)
if max(similar) < 0.1:
result[key] = text
policy_textes.append(text)
else:
result[key] = ''
else:
result[key] = text
policy_textes.append(text)
else:
result[key] = ''
if len(result) > 0:
result['base_url'] = link_dict['base_url']
return result
else:
return None
else:
return None
|
986,304 | f6a6dcc9cb50a5bac406ac629732a66d4f92fceb | # -*- coding: utf-8 -*-
# =============================================================================
# module : pulses/base_sequences.py
# author : Matthieu Dartiailh
# license : MIT license
# =============================================================================
from atom.api import (Int, Instance, Str, Dict, Bool, List,
ContainerList, set_default)
from itertools import chain
from inspect import cleandoc
from copy import deepcopy
from hqc_meas.utils.atom_util import member_from_str
from .contexts.base_context import BaseContext
from .entry_eval import eval_entry
from .item import Item
from .pulse import Pulse
class BaseSequence(Item):
""" Base class for all sequences.
This class defines the basic of a sequence but with only a very limited
child support : only construction is supported, indexing is not handled
nor is child insertion, deletion or displacement.
"""
# --- Public API ----------------------------------------------------------
#: Name of the sequence (help make a sequence more readable)
name = Str().tag(pref=True)
#: List of items this sequence consists of.
items = ContainerList(Instance(Item))
#: Dict of variables whose scope is limited to the sequence. Each key/value
#: pair represents the name and definition of the variable.
local_vars = Dict(Str()).tag(pref=True)
def cleanup_cache(self):
""" Clear all internal caches after successfully compiling the sequence
"""
self._evaluated_vars = {}
self._compiled = []
for i in self.items:
if isinstance(i, BaseSequence):
i.cleanup_cache()
def compile_sequence(self, root_vars, sequence_locals, missings, errors):
""" Evaluate the sequence vars and compile the list of pulses.
Parameters
----------
root_vars : dict
Dictionary of global variables for the all items. This will
tipically contains the i_start/stop/duration and the root vars.
This dict must be updated with global new values but for
evaluation sequence_locals must be used.
sequence_locals : dict
Dictionary of variables whose scope is limited to this sequence
parent. This dict must be updated with global new values and
must be used to perform evaluation (It always contains all the
names defined in root_vars).
missings : set
Set of unfound local variables.
errors : dict
Dict of the errors which happened when performing the evaluation.
Returns
-------
flag : bool
Boolean indicating whether or not the evaluation succeeded.
pulses : list
List of pulses in which all the string entries have been evaluated.
"""
raise NotImplementedError()
def walk(self, members, callables):
""" Explore the items hierarchy.
Missing values will be filled with None.
Parameters
----------
members : list(str)
Names of the members whose value should be retrieved.
callables : dict(callable)
Dict {name: callables} to call on every item in the hierarchy. Each
callable should take as single argument the task.
Returns
-------
answer : list
List summarizing the result of the exploration.
"""
answer = [self._answer(members, callables)]
for item in self.items:
if isinstance(item, Pulse):
answer.append(item._answer(members, callables))
else:
answer.append(item.walk(members, callables))
return answer
@classmethod
def build_from_config(cls, config, dependencies):
""" Create a new instance using the provided infos for initialisation.
Parameters
----------
config : dict(str)
Dictionary holding the new values to give to the members in string
format, or dictionnary like for instance with prefs.
dependencies : dict
Dictionary holding the necessary classes needed when rebuilding.
Returns
-------
sequence : Sequence
Newly created and initiliazed sequence.
Notes
-----
This method is fairly powerful and can handle a lot of cases so
don't override it without checking that it works.
"""
sequence = cls()
for name, member in sequence.members().iteritems():
# First we set the preference members
meta = member.metadata
if meta and 'pref' in meta:
if name not in config:
continue
# member_from_str handle containers
value = config[name]
validated = member_from_str(member, value)
setattr(sequence, name, validated)
i = 0
pref = 'item_{}'
validated = []
while True:
item_name = pref.format(i)
if item_name not in config:
break
item_config = config[item_name]
item_class_name = item_config.pop('item_class')
item_class = dependencies['pulses'][item_class_name]
item = item_class.build_from_config(item_config,
dependencies)
validated.append(item)
i += 1
setattr(sequence, 'items', validated)
return sequence
# --- Private API ---------------------------------------------------------
#: Dict of all already evaluated vars.
_evaluated_vars = Dict()
#: List of already compiled items.
_compiled = List()
def _compile_items(self, root_vars, sequence_locals, missings, errors):
""" Compile the sequence in a flat list of pulses.
Parameters
----------
root_vars : dict
Dictionary of global variables for the all items. This will
tipically contains the i_start/stop/duration and the root vars.
sequence_locals : dict
Dictionary of variables whose scope is limited to this sequence.
missings : set
Set of unfound local variables.
errors : dict
Dict of the errors which happened when performing the evaluation.
Returns
-------
flag : bool
Boolean indicating whether or not the evaluation succeeded.
pulses : list
List of pulses in which all the string entries have been evaluated.
"""
# Inplace modification of compile will update self._compiled.
if not self._compiled:
self._compiled = [None for i in self.items if i.enabled]
compiled = self._compiled
# Compilation of items in multiple passes.
while True:
miss = set()
index = -1
for item in self.items:
# Skip disabled items
if not item.enabled:
continue
# Increment index so that we set the right object in compiled.
index += 1
# Skip evaluation if object has already been compiled.
if compiled[index] is not None:
continue
# If we get a pulse simply evaluate the entries, to add their
# values to the locals and keep track of the missings to now
# when to abort compilation.
if isinstance(item, Pulse):
success = item.eval_entries(root_vars, sequence_locals,
miss, errors)
if success:
compiled[index] = [item]
# Here we got a sequence so we must try to compile it.
else:
success, items = item.compile_sequence(root_vars,
sequence_locals,
miss, errors)
if success:
compiled[index] = items
known_locals = set(sequence_locals.keys())
# If none of the variables found missing during last pass is now
# known stop compilation as we now reached a dead end. Same if an
# error occured.
if errors or miss and (not known_locals & miss):
# Update the missings given by caller so that it knows it this
# failure is linked to circle references.
missings.update(miss)
return False, []
# If no var was found missing during last pass (and as no error
# occured) it means the compilation succeeded.
elif not miss:
pulses = list(chain.from_iterable(compiled))
# Clean the compiled items once the pulse is transfered
self.cleanup_cache()
return True, pulses
def _answer(self, members, callables):
""" Collect answers for the walk method.
"""
answers = {m: getattr(self, m, None) for m in members}
answers.update({k: c(self) for k, c in callables.iteritems()})
return answers
class Sequence(BaseSequence):
""" A sequence is an ensemble of pulses.
"""
# --- Public API ----------------------------------------------------------
#: Bool indicating whether or not the sequence has a hard defined
#: start/stop/duration. In case it does not the associated values won't
#: be computed.
time_constrained = Bool().tag(pref=True)
def compile_sequence(self, root_vars, sequence_locals, missings, errors):
""" Evaluate the sequence vars and compile the list of pulses.
Parameters
----------
root_vars : dict
Dictionary of global variables for the all items. This will
tipically contains the i_start/stop/duration and the root vars.
This dict must be updated with global new values but for
evaluation sequence_locals must be used.
sequence_locals : dict
Dictionary of variables whose scope is limited to this sequence
parent. This dict must be updated with global new values and
must be used to perform evaluation (It always contains all the
names defined in root_vars).
missings : set
Set of unfound local variables.
errors : dict
Dict of the errors which happened when performing the evaluation.
Returns
-------
flag : bool
Boolean indicating whether or not the evaluation succeeded.
pulses : list
List of pulses in which all the string entries have been evaluated.
"""
prefix = '{}_'.format(self.index)
# Definition evaluation.
if self.time_constrained:
self.eval_entries(root_vars, sequence_locals, missings, errors)
# Local vars computation.
for name, formula in self.local_vars.iteritems():
if name not in self._evaluated_vars:
try:
val = eval_entry(formula, sequence_locals, missings)
self._evaluated_vars[name] = val
except Exception as e:
errors[prefix + name] = repr(e)
local_namespace = sequence_locals.copy()
local_namespace.update(self._evaluated_vars)
res, pulses = self._compile_items(root_vars, local_namespace,
missings, errors)
if res:
if self.time_constrained:
# Check if start, stop and duration of sequence are compatible.
start_err = [pulse for pulse in pulses
if pulse.start < self.start]
stop_err = [pulse for pulse in pulses
if pulse.stop > self.stop]
if start_err:
mess = cleandoc('''The start time of the following items {}
is smaller than the start time of the sequence {}''')
mess = mess.replace('\n', ' ')
ind = [p.index for p in start_err]
errors[self.name + '-start'] = mess.format(ind, self.index)
if stop_err:
mess = cleandoc('''The stop time of the following items {}
is larger than the stop time of the sequence {}''')
mess = mess.replace('\n', ' ')
ind = [p.index for p in stop_err]
errors[self.name + '-stop'] = mess.format(ind, self.index)
if errors:
return False, []
return True, pulses
else:
return False, []
def get_bindable_vars(self):
""" Access the list of bindable vars for the sequence.
"""
return self.local_vars.keys() + self.parent.get_bindable_vars()
def preferences_from_members(self):
""" Get the members values as string to store them in .ini files.
Reimplemented here to save items.
"""
pref = super(Sequence, self).preferences_from_members()
for i, item in enumerate(self.items):
pref['item_{}'.format(i)] = item.preferences_from_members()
return pref
def update_members_from_preferences(self, **parameters):
""" Use the string values given in the parameters to update the members
This function will call itself on any tagged HasPrefAtom member.
Reimplemented here to update items.
"""
super(Sequence, self).update_members_from_preferences(**parameters)
for i, item in enumerate(self.items):
para = parameters['item_{}'.format(i)]
item.update_members_from_preferences(**para)
# --- Private API ---------------------------------------------------------
#: Last index used by the sequence.
_last_index = Int()
def _observe_root(self, change):
""" Observer passing the root to all children.
This allow to build a sequence without a root and parent it later.
"""
if change['value']:
for item in self.items:
self._item_added(item)
# Connect only now to avoid cleaning up in an unwanted way the
# root linkable vars attr.
self.observe('items', self._items_updated)
else:
self.unobserve('items', self._items_updated)
for item in self.items:
self._item_removed(item)
self.observe('items', self._items_updated)
def _observe_time_constrained(self, change):
"""
"""
if change['value']:
self.linkable_vars = ['start', 'stop', 'duration']
else:
self.linkable_vars = []
def _items_updated(self, change):
""" Observer for the items list.
"""
if self.root:
# The whole list changed.
if change['type'] == 'update':
added = set(change['value']) - set(change['oldvalue'])
removed = set(change['oldvalue']) - set(change['value'])
for item in removed:
self._item_removed(item)
for item in added:
self._item_added(item)
# An operation has been performed on the list.
elif change['type'] == 'container':
op = change['operation']
# itemren have been added
if op in ('__iadd__', 'append', 'extend', 'insert'):
if 'item' in change:
self._item_added(change['item'])
if 'items' in change:
for item in change['items']:
self._item_added(item)
# itemren have been removed.
elif op in ('__delitem__', 'remove', 'pop'):
if 'item' in change:
self._item_removed(change['item'])
if 'items' in change:
for item in change['items']:
self._item_removed(item)
# One item was replaced.
elif op in ('__setitem__'):
old = change['olditem']
if isinstance(old, list):
for item in old:
self._item_removed(item)
else:
self._item_removed(old)
new = change['newitem']
if isinstance(new, list):
for item in new:
self._item_added(item)
else:
self._item_added(new)
self._recompute_indexes()
def _item_added(self, item):
""" Fill in the attributes of a newly added item.
"""
item.root = self.root
item.parent = self
item.observe('linkable_vars', self.root._update_linkable_vars)
if isinstance(item, Sequence):
item.observe('_last_index', self._item_last_index_updated)
def _item_removed(self, item):
""" Clear the attributes of a removed item.
"""
item.unobserve('linkable_vars', self.root._update_linkable_vars)
with item.suppress_notifications():
del item.root
del item.parent
item.index = 0
if isinstance(item, Sequence):
item.unobserve('_last_index', self._item_last_index_updated)
def _recompute_indexes(self, first_index=0, free_index=None):
""" Recompute the item indexes and update the vars of the root_seq.
Parameters
----------
first_index : int, optional
Index in items of the first item whose index needs to be updated.
free_index : int, optional
Value of the first free index.
"""
if free_index is None:
free_index = self.index + 1
# Cleanup the linkable_vars for all the pulses which will be reindexed.
linked_vars = self.root.linkable_vars
for var in linked_vars[:]:
if var[0].isdigit() and int(var[0]) >= free_index:
linked_vars.remove(var)
for item in self.items[first_index:]:
item.index = free_index
prefix = '{}_'.format(free_index)
linkable_vars = [prefix + var for var in item.linkable_vars]
linked_vars.extend(linkable_vars)
if isinstance(item, Sequence):
item.unobserve('_last_index', self._item_last_index_updated)
item._recompute_indexes()
item.observe('_last_index', self._item_last_index_updated)
free_index = item._last_index + 1
# We have a non indexed item (pulse or template).
else:
free_index += 1
self._last_index = free_index - 1
def _item_last_index_updated(self, change):
""" Update the items indexes whenever the last index of a child
sequence is updated.
"""
index = self.items.index(change['object']) + 1
free_index = change['value'] + 1
self._recompute_indexes(index, free_index)
class RootSequence(Sequence):
""" Base of any pulse sequences.
This Item perform the first step of compilation by evaluating all the
entries and then unravelling the pulse sequence (elimination of condition
and loop flattening).
Notes
-----
The linkable_vars of the RootSequence stores all the known linkable vars
for the sequence.
The start, stop, duration and def_1, def_2 members are not used by the
RootSequence. The time_constrained member only affects the use of the
sequence duration.
"""
# --- Public API ----------------------------------------------------------
#: Dictionary of external variables whose values should be given before
#: the start of the compilation stage.
external_vars = Dict(Str()).tag(pref=True)
#: Duration of the sequence when it is fixed. The unit of this time is
# fixed by the context.
sequence_duration = Str().tag(pref=True)
#: Reference to the executioner context of the sequence.
context = Instance(BaseContext)
index = set_default(0)
name = set_default('Root')
def __init__(self, **kwargs):
"""
"""
super(RootSequence, self).__init__(**kwargs)
self.root = self
def compile_sequence(self, use_context=True):
""" Compile a sequence to useful format.
Parameters
---------------
use_context : bool, optional
Should the context compile the pulse sequence.
Returns
-----------
result : bool
Flag indicating whether or not the compilation succeeded.
args : iterable
Objects depending on the result and use_context flag.
In case of failure: tuple
- a set of the entries whose values where never found and a
dict of the errors which occured during compilation.
In case of success:
- a flat list of Pulse if use_context is False
- a context dependent result otherwise.
"""
missings = set()
errors = {}
root_vars = self.external_vars.copy()
# Local vars computation.
for name, formula in self.local_vars.iteritems():
if name not in self._evaluated_vars:
try:
val = eval_entry(formula, root_vars, missings)
self._evaluated_vars[name] = val
except Exception as e:
errors['root_' + name] = repr(e)
root_vars.update(self._evaluated_vars)
if self.time_constrained:
try:
duration = eval_entry(self.sequence_duration, root_vars,
missings)
root_vars['sequence_end'] = duration
except Exception as e:
errors['root_seq_duration'] = repr(e)
res, pulses = self._compile_items(root_vars, root_vars,
missings, errors)
if not res:
return False, (missings, errors)
if self.time_constrained:
err = [p for p in pulses if p.stop > duration]
if err:
mess = cleandoc('''The stop time of the following pulses {}
is larger than the duration of the sequence.''')
ind = [p.index for p in err]
errors['Root-stop'] = mess.format(ind)
return False, (missings, errors)
if not use_context:
return True, pulses
else:
kwargs = {}
if self.time_constrained:
kwargs['sequence_duration'] = duration
return self.context.compile_sequence(pulses, **kwargs)
def compile_loop(self, use_context=True):
""" Compile a sequence to useful format.
Parameters
---------------
use_context : bool, optional
Should the context compile the pulse sequence.
Returns
-----------
result : bool
Flag indicating whether or not the compilation succeeded.
args : iterable
Objects depending on the result and use_context flag.
In case of failure: tuple
- a set of the entries whose values where never found and a
dict of the errors which occured during compilation.
In case of success:
- a flat list of Pulse if use_context is False
- a context dependent result otherwise.
"""
missings = set()
errors = {}
root_vars = self.external_vars.copy()
# Local vars computation.
for name, formula in self.local_vars.iteritems():
if name not in self._evaluated_vars:
try:
val = eval_entry(formula, root_vars, missings)
self._evaluated_vars[name] = val
except Exception as e:
errors['root_' + name] = repr(e)
root_vars.update(self._evaluated_vars)
if self.time_constrained:
try:
duration = eval_entry(self.sequence_duration, root_vars,
missings)
root_vars['sequence_end'] = duration
except Exception as e:
errors['root_seq_duration'] = repr(e)
res, pulses = self._compile_items(root_vars, root_vars,
missings, errors)
if not res:
return False, (missings, errors), None
if self.time_constrained:
err = [p for p in pulses if p.stop > duration]
if err:
mess = cleandoc('''The stop time of the following pulses {}
is larger than the duration of the sequence.''')
ind = [p.index for p in err]
errors['Root-stop'] = mess.format(ind)
return False, (missings, errors), None
if not use_context:
return True, pulses, None
else:
kwargs = {}
if self.time_constrained:
kwargs['sequence_duration'] = duration
return self.context.compile_loop(pulses, **kwargs)
def get_bindable_vars(self):
""" Access the list of bindable vars for the sequence.
"""
return (self.linkable_vars + self.local_vars.keys() +
self.external_vars.keys())
def walk(self, members, callables):
""" Explore the items hierarchy.
Missing values will be filled with None. Overrided here to add context
entries.
Parameters
----------
members : list(str)
Names of the members whose value should be retrieved.
callables : dict(callable)
Dict {name: callables} to call on every item in the hierarchy. Each
callable should take as single argument the task.
Returns
-------
answer : list
List summarizing the result of the exploration.
"""
answer = [self._answer(members, callables),
self.context._answer(members, callables)]
for item in self.items:
if isinstance(item, Pulse):
answer.append(item._answer(members, callables))
else:
answer.append(item.walk(members, callables))
return answer
def preferences_from_members(self):
""" Get the members values as string to store them in .ini files.
Reimplemented here to save context.
"""
pref = super(RootSequence, self).preferences_from_members()
if self.context:
pref['context'] = self.context.preferences_from_members()
return pref
def update_members_from_preferences(self, **parameters):
""" Use the string values given in the parameters to update the members
This function will call itself on any tagged HasPrefAtom member.
Reimplemented here to update context.
"""
super(RootSequence, self).update_members_from_preferences(**parameters)
para = parameters['context']
self.context.update_members_from_preferences(**para)
@classmethod
def build_from_config(cls, config, dependencies):
""" Create a new instance using the provided infos for initialisation.
Overridden here to allow context creation.
Parameters
----------
config : dict(str)
Dictionary holding the new values to give to the members in string
format, or dictionnary like for instance with prefs.
dependencies : dict
Dictionary holding the necessary classes needed when rebuilding.
Returns
-------
sequence : Sequence
Newly created and initiliazed sequence.
"""
config = deepcopy(config)
if 'context' in config:
context_config = config['context']
c_class_name = context_config.pop('context_class')
context_class = dependencies['pulses']['contexts'][c_class_name]
context = context_class()
context.update_members_from_preferences(**context_config)
seq = super(RootSequence, cls).build_from_config(config,
dependencies)
if 'context' in config:
seq.context = context
return seq
# --- Private API ---------------------------------------------------------
def _answer(self, members, callables):
"""
"""
answers = super(RootSequence, self)._answer(members, callables)
con_members = [m for m in members
if m.startswith('context.')]
answers.update({m: getattr(self.context, m[8:], None)
for m in con_members})
return answers
def _observe_time_constrained(self, change):
""" Keep the linkable_vars list in sync with fix_sequence_duration.
"""
if change['value']:
link_vars = self.linkable_vars[:]
link_vars.insert(0, 'sequence_end')
self.linkable_vars = link_vars
elif 'sequence_end' in self.linkable_vars:
link_vars = self.linkable_vars[:]
link_vars.remove('sequence_end')
self.linkable_vars = link_vars
def _update_linkable_vars(self, change):
"""
"""
# Don't won't this to happen on member init.
if change['type'] == 'update':
link_vars = self.linkable_vars
item = change['object']
prefix = '{}_{{}}'.format(item.index)
added = set(change['value']) - set(change.get('oldvalue', []))
removed = set(change.get('oldvalue', [])) - set(change['value'])
link_vars.extend([prefix.format(var)
for var in added])
for var in removed:
r = prefix.format(var)
if r in link_vars:
link_vars.remove(r)
|
986,305 | d515f953200f41fc303cf76b266e6d26eff8e7b4 | from django.conf.urls import include, url
from django.views.generic import TemplateView
from misago.conf import settings
from misago.core.views import forum_index
app_name = 'misago'
# Register Misago Apps
urlpatterns = [
url(r'^', include('misago.legal.urls')),
url(r'^', include('misago.users.urls')),
url(r'^', include('misago.categories.urls')),
url(r'^', include('misago.threads.urls')),
url(r'^', include('misago.search.urls')),
# default robots.txt
url(
r'^robots.txt$',
TemplateView.as_view(content_type='text/plain', template_name='misago/robots.txt')
),
# "misago:index" link symbolises "root" of Misago links space
# any request with path that falls below this one is assumed to be directed
# at Misago and will be handled by misago.views.exceptionhandler if it
# results in Http404 or PermissionDenied exception
url(r'^$', forum_index, name='index'),
]
# Register API
apipatterns = [
url(r'^', include('misago.categories.urls.api')),
url(r'^', include('misago.legal.urls.api')),
url(r'^', include('misago.markup.urls')),
url(r'^', include('misago.threads.urls.api')),
url(r'^', include('misago.users.urls.api')),
url(r'^', include('misago.search.urls.api')),
]
urlpatterns += [
url(r'^api/', include((apipatterns, 'api'), namespace='api')),
]
# Register Misago ACP
if settings.MISAGO_ADMIN_PATH:
# Admin patterns recognised by Misago
adminpatterns = [
url(r'^', include('misago.admin.urls')),
]
admin_prefix = r'^%s/' % settings.MISAGO_ADMIN_PATH
urlpatterns += [
url(admin_prefix, include((adminpatterns, 'admin'), namespace='admin')),
]
|
986,306 | 6674527269b68e274c5bc4a15125ecd198309983 | ## w^3.py
# Next, we review our notation for ω^3.
TEMPLATE='''
TEMPLATE="""
X=\'''___\'''
while True:
output(X)
X='output(\\\"""' + escape(X) + '\\\""")'
"""
X=\'''***\'''
while True:
output(X)
X = TEMPLATE.replace('___', escape(X))
'''
X=""
while True:
output(X)
X = TEMPLATE.replace('***', escape(X)) |
986,307 | beefaf63099eb538b8fc8755a83e9456812d8c16 |
def bar_fn():
print " executing bar_fn()"
|
986,308 | 72ebcf0565a9470b62eeeb72f18223164fa95ee8 | #!/usr/bin/env python3
import os.path as path
from datetime import datetime
import tensorflow as tf
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.callbacks import *
from tensorflow.keras.datasets import cifar100
from tensorflow.keras.layers import *
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import RMSprop
# Define training parameters
name = 'vgg16'
learning_rate = 1e-5
epochs = 100
# Load and preprocess training data
(x_train, y_train), _ = cifar100.load_data(label_mode='fine')
y_train = tf.one_hot(tf.squeeze(y_train), 100)
# Load pretrained backbone
backbone = VGG16(input_shape=x_train.shape[1:],
include_top=False,
weights='imagenet')
# Add final size-specific layers
inputs = backbone.input
x = inputs
x = backbone(x)
x = Flatten()(x)
x = Dense(4096)(x)
x = ReLU()(x)
x = Dense(4096)(x)
x = ReLU()(x)
x = Dense(100)(x)
x = Softmax()(x)
model = Model(inputs=inputs, outputs=x)
# Compile the model for training
model.compile(optimizer=RMSprop(learning_rate=learning_rate),
loss='categorical_crossentropy',
metrics=['accuracy', 'top_k_categorical_accuracy'])
# Set up callbacks
tb_name = '{}_{}'.format(name,
datetime.now().strftime('%Y-%m-%d_%H:%M:%S'))
callbacks = [
ModelCheckpoint(path.join('models', name + '.h5'),
monitor='val_accuracy',
verbose=1,
save_best_only=True),
TensorBoard(log_dir=path.join('tensorboard', tb_name)),
]
# Train the model (best epoch automatically saved by the callback)
model.fit(x=x_train,
y=y_train,
epochs=epochs,
callbacks=callbacks,
validation_split=0.2)
|
986,309 | 4680a2c237f74d8c1f3b0fde8431e7c883882280 | #!/usr/bin/python3
import sys
def decibinary(x):
if x == 1 or x == 2:
return x - 1
total = 2
value = 4
while total < x:
total += value
value += 4
value = int((value-4) / 2)
if x >= total-(value/2):
total -= value*2
value += 1
q = int(input().strip())
for a0 in range(q):
x = int(input().strip())
# your code goes here
print(decibinary(x))
|
986,310 | 3e7762768c90f3dc1f5c8202ebd39ff1b8edb32d | """
Project: python_class
Author:柠檬班-Tricy
Time:2021/8/24 21:23
Company:湖南零檬信息技术有限公司
Site: http://www.lemonban.com
Forum: http://testingpai.com
"""
import time
def login(driver,name,passwd):
driver.get("http://erp.lemfix.com/login.html")
username = driver.find_element_by_id("username").send_keys(name)
driver.find_element_by_id("password").send_keys(passwd)
driver.find_element_by_id("btnSubmit").click()
def search(driver,name,passwd,key):
login(driver,name,passwd)
driver.find_element_by_xpath('//span[text()="零售出库"]').click()
time.sleep(2)
id = driver.find_element_by_xpath('//div[text()="零售出库"]/..').get_attribute("id") # 获取元素的属性值
frameid = id + "-frame"
driver.switch_to.frame(driver.find_element_by_xpath("//iframe[@id='{}']".format(frameid))) # 通过元素定位 切换iframe
driver.find_element_by_id("searchNumber").send_keys(key)
driver.find_element_by_id("searchBtn").click()
time.sleep(1) # 隐式等待+ 强制等待
num = driver.find_element_by_xpath('//tr[@id="datagrid-row-r1-2-0"]//td[@field="number"]/div').text # 获取文本
return num |
986,311 | e80c7ed5793b013276a4541c2af9ee5493c091cd | from abc import ABCMeta, abstractmethod
def main():
book_shelf = BookShelf(4)
book_shelf.append_book(Book(name='A'))
book_shelf.append_book(Book(name='B'))
book_shelf.append_book(Book(name='C'))
book_shelf.append_book(Book(name='D'))
it = book_shelf.iterator()
while it.has_next():
book = it.next()
print(book.get_name())
class Aggregate(metaclass=ABCMeta):
'集合体を表すクラス'
@abstractmethod
def iterator(self):
pass
class Iterator(metaclass=ABCMeta):
'''
要素の数え上げを行うような、ループ変数を表すクラス。
'''
# 抽象クラス
@abstractmethod
def has_next(self):
'次の要素が存在するかの関数'
pass
@abstractmethod
def next(self):
'次の要素を返す関数'
pass
class Book:
'本を表すクラス。名前を得るだけ'
def __init__(self, name):
self.__name = name
def get_name(self):
return self.__name
class BookShelf(Aggregate):
'''
本棚を表すクラス。このクラスを集合体として扱うのでaggregateインターフェイスを実装。
この本棚はbookフィールドを持つ。
配列の大きさはBookShelfインスタンスを作成するときに指定。
'''
def __init__(self, maxsize):
# Noneを設定。
# bookフィールドはクラス外からの変更を防ぐためprivateに。
self.__books = [None]*maxsize
self.__last = 0
def get_book_at(self, index):
'indexの本を取得する関数'
return self.__books[index]
def append_book(self, book):
'本を追加する関数'
self.__books[self.__last] = book
self.__last += 1
def get_length(self):
'本がどれくらい保存されているか返す関数'
'''
lenではだめなのか?
https://medium.com/since-i-want-to-start-blog-that-looks-like-men-do/python%E3%81%AB%E3%82%88%E3%82%8B%E3%83%87%E3%82%B6%E3%82%A4%E3%83%B3%E3%83%91%E3%82%BF%E3%83%BC%E3%83%B3-iteratory-9a9d94ded3a9
lenで書いているものあった
'''
return self.__last
def iterator(self):
# BookShelfクラスに対応するBookShelfIteratorクラスのインスタンスを生成
# 本棚の本を数えたいときに使用
return BookShelfIterator(self)
class BookShelfIterator(Iterator):
'''
oncreat iterator(具体的な反復子)役。
抽象クラスを継承している。
BookShelfクラスのスキャンを行うクラス
BookShelfクラスと分けるのは再利用しやすくするため
'''
def __init__(self, book_shelf):
self.__book_shelf = book_shelf
self.__index = 0
def has_next(self):
'次の本があるかを調べる関数。あればTrue、なければFalseを返す'
# 当然だが、ぴったりのときは次の本は存在しない
if self.__index < self.__book_shelf.get_length():
return True
else:
return False
def next(self):
'次の要素を返す関数'
book = self.__book_shelf.get_book_at(self.__index)
self.__index += 1
return book
if __name__ == '__main__':
main()
|
986,312 | d0af23df74f1071449ff6ebb63bd8fc490791273 | from Tkinter import *
import tkMessageBox
import trainer as tr
import pandas
import main
root = Tk()
frame = Frame(root)
frame.pack()
bottomframe = Frame(root)
bottomframe.pack(side=BOTTOM)
L1 = Label(frame, text="Enter the URL: ")
L1.pack(side=LEFT)
E1 = Entry(frame, bd=5, width=150)
E1.pack(side=RIGHT)
def submitCallBack():
url = E1.get()
main.process_test_url(url, 'test_features.csv')
return_ans = tr.gui_caller('url_features.csv', 'test_features.csv')
a = str(return_ans).split()
if int(a[1]) == 0:
tkMessageBox.showinfo("URL Checker Result", "The URL " + url + " is Benign")
elif int(a[1]) == 1:
tkMessageBox.showinfo("URL Checker Result", "The URL " + url + " is Malicious")
else:
tkMessageBox.showinfo("URL Checker Result", "The URL " + url + " is Malware")
B1 = Button(bottomframe, text="Submit", command=submitCallBack)
B1.pack()
root.mainloop()
|
986,313 | 1bad7f8f038c98bb64cf8bc8519f3ba5aefe4608 | import google_vision
import analyze
import classification
import sys
import numpy as np
if __name__ == '__main__':
model = classification.Classifier()
filename = sys.argv[1]
json_labels = google_vision.getJson(filename)
selected_labels = analyze.toDataFrame(json_labels)
# print (selected_labels)
labels = []
for key, value in selected_labels.items():
labels.append(value)
labels = np.array(labels)
# print(labels)
print (model.predict(labels))
print (model.getPlot(labels))
# classification.naive_bayes(selected_labels)
|
986,314 | 32126558e8b284e79391df4625628245935c9719 | from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template import RequestContext
from webshop.forms import MyUserCreationForm, MyAuthenticationForm, MyUserChangeForm, MyPasswordResetForm, MyPasswordChangeForm, AddressForm
from webshop.models import Product, Type, Address, Order, Rating, Comment, Statistic, SaleItem, OrderItem, ShippingMethod
from django.contrib.auth.models import User
from django.core.context_processors import csrf
from django.contrib.auth import authenticate, login as auth_login, logout as auth_logout
from django.contrib import messages
from django.http import Http404, HttpResponse, HttpResponseBadRequest
from django.utils import simplejson as json
from django.contrib.auth.decorators import login_required
from datetime import datetime
from django.contrib.auth.tokens import default_token_generator
import md5
secret_key = '37e383c1182a6bab1524c9a7c0fc4557'
"""
Nintendo Game & Watch Shop
Root of the project.
Author(s): Markku Laine
"""
def root( request ):
# Handle GET requests
if request.method == "GET":
return redirect( "webshop.views.home" )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
"""
Nintendo Game & Watch Shop > Home
Author(s): Kalle Saila
"""
def home( request ):
# Handle GET requests
if request.method == "GET":
# Retrieve all types
types = Type.objects.all().order_by( "name" )
variables = { "types" : types }
context = RequestContext( request )
context.update( csrf( request ) )
return render_to_response( "webshop/index.html", variables, context )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
"""
Nintendo Game & Watch Shop > Home > Search Results
Author(s): Kalle Saila
"""
def search( request ):
# Handle GET requests
if request.method == "GET":
querystring = request.GET["q"]
products = Product.objects.filter(title__icontains=querystring).order_by( "title" )
variables = { "products" : products }
context = RequestContext( request )
return render_to_response( "webshop/search.html", variables, context )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
"""
Nintendo Game & Watch Shop > Home > Category
Author(s): Kalle Saila
"""
def category( request, type_id ):
# Handle GET requests
if request.method == "GET":
# Retrieve type
type = get_object_or_404( Type, id=type_id )
variables = { "type" : type }
context = RequestContext( request )
return render_to_response( "webshop/category.html", variables, context )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
"""
Nintendo Game & Watch Shop > Register
Author(s): Markku Laine
"""
def register( request ):
# Redirect user to Home if (s)he is currently logged in
if request.user.is_authenticated():
return redirect( "webshop.views.home" )
# Create a new address. At first, associate a dummy user with the address
address = Address()
dummyUser = User()
address.user = dummyUser
# Handle POST requests
if request.method == "POST":
myUserCreationForm = MyUserCreationForm( data=request.POST )
addressForm = AddressForm( data=request.POST, instance=address )
# Check the validity of the forms
if myUserCreationForm.is_valid() and addressForm.is_valid():
user = myUserCreationForm.save()
# Automatically login the user after successful registration
user.backend = "django.contrib.auth.backends.ModelBackend"
auth_login( request, user )
# Retrieve the address
userAddress = addressForm.instance
# Replace the dummy user with the currently logged-in user
userAddress.user = user
# Save user's address
userAddress.save()
# After a successful registration, redirect the user to the page (s)he came from based on 1) next URL parameter and 2) default to home
next = request.POST.get( "next", reverse( "webshop.views.home" ) )
return redirect( next )
else:
#print "Forms are not valid!"
# After a failed registration, redirect the user to the page (s)he came from based on 1) next URL parameter and 2) default to home
next = request.POST.get( "next", reverse( "webshop.views.home" ) )
# Handle GET requests
elif request.method == "GET":
myUserCreationForm = MyUserCreationForm( initial={} )
addressForm = AddressForm( initial={} )
# After registration, redirect the user to the page (s)he came from based on 1) next URL parameter, 2) HTTP REFERER, and 3) default to home
next = request.GET.get( "next", request.META.get( "HTTP_REFERER", reverse( "webshop.views.home" ) ) )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
variables = { "form" : myUserCreationForm, "next" : next, "addressForm" : addressForm }
context = RequestContext( request )
context.update( csrf( request ) )
return render_to_response( "webshop/register.html", variables, context )
"""
Nintendo Game & Watch Shop > Login
Author(s): Markku Laine
"""
def login( request ):
# Redirect user to Home if (s)he is currently logged in
if request.user.is_authenticated():
return redirect( "webshop.views.home" )
# Handle POST requests
if request.method == "POST":
myAuthenticationForm = MyAuthenticationForm( data=request.POST )
# Check the validity of the form
if myAuthenticationForm.is_valid():
## Retrieve username and password parameters
username = myAuthenticationForm.cleaned_data.get( "username", "" )
password = myAuthenticationForm.cleaned_data.get( "password", "" )
# Authenticate and login the user
user = authenticate( username=username, password=password )
auth_login( request, user )
# After a successful login, redirect the user to the page (s)he came from based on 1) next URL parameter and 2) default to home
next = request.POST.get( "next", reverse( "webshop.views.home" ) )
return redirect( next )
else:
#print "Form is not valid!"
# After a failed login, redirect the user to the page (s)he came from based on 1) next URL parameter and 2) default to home
next = request.POST.get( "next", reverse( "webshop.views.home" ) )
# Handle GET requests
elif request.method == "GET":
myAuthenticationForm = MyAuthenticationForm( initial={} )
# After login, redirect the user to the page (s)he came from based on 1) next URL parameter, 2) HTTP REFERER, and 3) default to home
next = request.GET.get( "next", request.META.get( "HTTP_REFERER", reverse( "webshop.views.home" ) ) )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
# If the user comes from the password reset page, then redirect to the home page
if next.find( "/webshop/passwordreset/" ) != -1:
next = reverse( "webshop.views.home" )
variables = { "form" : myAuthenticationForm, "next" : next }
context = RequestContext( request )
context.update( csrf( request ) )
return render_to_response( "webshop/login.html", variables, context )
"""
Nintendo Game & Watch Shop > Password Reset
Author(s): Markku Laine
"""
def password_reset( request ):
# Handle GET requests
if request.method == "GET":
return redirect( "webshop.views.request_new_password" )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
"""
Nintendo Game & Watch Shop > Password Reset > Request New Password
Author(s): Markku Laine
"""
def request_new_password( request ):
# Redirect user to Home if (s)he is currently logged in
if request.user.is_authenticated():
return redirect( "webshop.views.home" )
# Handle POST requests
if request.method == "POST":
myPasswordResetForm = MyPasswordResetForm( data=request.POST )
# Check the validity of the form
if myPasswordResetForm.is_valid():
# Email instructions to the user how to reset the password
myPasswordResetForm.save( domain_override=None, use_https=False, token_generator=default_token_generator )
return redirect( "webshop.views.new_password_requested" )
#else:
#print "Form is not valid!"
# Handle GET requests
elif request.method == "GET":
myPasswordResetForm = MyPasswordResetForm( initial={} )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
variables = { "form" : myPasswordResetForm }
context = RequestContext( request )
context.update( csrf( request ) )
return render_to_response( "webshop/passwordreset/request_new_password.html", variables, context )
"""
Nintendo Game & Watch Shop > Password Reset > New Password Requested
Author(s): Markku Laine
"""
def new_password_requested( request ):
# Redirect user to Home if (s)he is currently logged in
if request.user.is_authenticated():
return redirect( "webshop.views.home" )
# Handle GET requests
if request.method == "GET":
variables = {}
context = RequestContext( request )
context.update( csrf( request ) )
return render_to_response( "webshop/passwordreset/new_password_requested.html", variables, context )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
"""
Nintendo Game & Watch Shop > Password Reset > New Password Set
Author(s): Markku Laine
"""
def new_password_set( request ):
# Redirect user to Home if (s)he is currently logged in
if request.user.is_authenticated():
return redirect( "webshop.views.home" )
# Handle GET requests
if request.method == "GET":
variables = {}
context = RequestContext( request )
context.update( csrf( request ) )
return render_to_response( "webshop/passwordreset/new_password_set.html", variables, context )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
"""
Nintendo Game & Watch Shop > Logout
Author(s): Markku Laine
"""
def logout( request ):
# Handle GET requests
if request.method == "GET":
# Logout the currently logged-in user
auth_logout( request )
# After a successful logout, redirect the user to the page (s)he came from based on 1) HTTP REFERER and 2) default to home
next = request.META.get( "HTTP_REFERER", reverse( "webshop.views.home" ) )
return redirect( next )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
"""
Nintendo Game & Watch Shop > My Account
Author(s): Markku Laine
"""
@login_required
def myaccount( request ):
# Handle GET requests
if request.method == "GET":
return redirect( "webshop.views.account_details" )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
"""
Nintendo Game & Watch Shop > My Account > Account Details
Author(s): Markku Laine
"""
@login_required
def account_details( request ):
# Handle POST requests
if request.method == "POST":
myUserChangeForm = MyUserChangeForm( data=request.POST, instance=request.user )
# Check the validity of the form
if myUserChangeForm.is_valid():
# Save changed account details
myUserChangeForm.save()
messages.success( request, "Account details have been successfully updated." ) # Levels: info, success, warning, and error
return redirect( "webshop.views.account_details" )
#else:
#print "Form is not valid!"
# Handle GET requests
elif request.method == "GET":
myUserChangeForm = MyUserChangeForm( instance=request.user )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
variables = { "form" : myUserChangeForm }
context = RequestContext( request )
context.update( csrf( request ) )
return render_to_response( "webshop/myaccount/account_details.html", variables, context )
"""
Nintendo Game & Watch Shop > My Account > Change Password
Author(s): Markku Laine
"""
@login_required
def change_password( request ):
# Handle POST requests
if request.method == "POST":
myPasswordChangeForm = MyPasswordChangeForm( data=request.POST, user=request.user )
# Check the validity of the form
if myPasswordChangeForm.is_valid():
# Save changed password
myPasswordChangeForm.save()
messages.success( request, "Password has been successfully changed." ) # Levels: info, success, warning, and error
return redirect( "webshop.views.change_password" )
#else:
#print "Form is not valid!"
# Handle GET requests
elif request.method == "GET":
myPasswordChangeForm = MyPasswordChangeForm( request.user )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
variables = { "form" : myPasswordChangeForm }
context = RequestContext( request )
context.update( csrf( request ) )
return render_to_response( "webshop/myaccount/change_password.html", variables, context )
"""
Nintendo Game & Watch Shop > My Account > Address Book
Author(s): Markku Laine
"""
@login_required
def address_book( request ):
# Handle GET requests
if request.method == "GET":
addresses = Address.objects.filter( user=request.user )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
variables = { "addresses" : addresses, "first_name" : request.user.first_name, "last_name" : request.user.last_name }
context = RequestContext( request )
context.update( csrf( request ) )
return render_to_response( "webshop/myaccount/address_book.html", variables, context )
"""
Nintendo Game & Watch Shop > My Account > Address Book > New Address
Author(s): Markku Laine
"""
@login_required
def address_book_new( request ):
# Create a new address. Associate the currently logged-in user with the address
address = Address()
address.user = request.user
# Handle POST requests
if request.method == "POST":
addressForm = AddressForm( data=request.POST, instance=address )
# Check the validity of the form
if addressForm.is_valid():
# Save new address
addressForm.save()
messages.success( request, "New address has been successfully saved." ) # Levels: info, success, warning, and error
return redirect( "webshop.views.address_book" )
#else:
#print "Form is not valid!"
# Handle GET requests
elif request.method == "GET":
addressForm = AddressForm( initial={} )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
variables = { "form" : addressForm, "first_name" : request.user.first_name, "last_name" : request.user.last_name }
context = RequestContext( request )
#context.update( csrf( request ) )
return render_to_response( "webshop/myaccount/address_book_new.html", variables, context )
"""
Nintendo Game & Watch Shop > My Account > Address Book > Edit Address
Author(s): Markku Laine
"""
@login_required
def address_book_edit( request, address_id=None ):
# Retrieve the address. Users are allowed to edit their own addresses only!
address = get_object_or_404( Address, pk=address_id, user=request.user )
# Handle POST requests
if request.method == "POST":
addressForm = AddressForm( data=request.POST, instance=address )
# Check the validity of the form
if addressForm.is_valid():
# Save changed address
addressForm.save()
messages.success( request, "Address has been successfully updated." ) # Levels: info, success, warning, and error
return redirect( "webshop.views.address_book" )
#else:
#print "Form is not valid!"
# Handle GET requests
elif request.method == "GET":
addressForm = AddressForm( instance=address )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
variables = { "form" : addressForm, "first_name" : request.user.first_name, "last_name" : request.user.last_name }
context = RequestContext( request )
context.update( csrf( request ) )
return render_to_response( "webshop/myaccount/address_book_edit.html", variables, context )
"""
Nintendo Game & Watch Shop > My Account > Address Book > Delete Address
Author(s): Markku Laine
"""
@login_required
def address_book_delete( request, address_id=None ):
# Retrieve user's addresses
addresses = Address.objects.filter( user=request.user )
# Users are not allowed to delete their only address
if addresses.count() <= 1:
return redirect( "webshop.views.address_book" )
# Handle POST requests
if request.method == "POST":
# Retrieve the address. Users are allowed to edit their own addresses only!
address = get_object_or_404( Address, pk=address_id, user=request.user )
# Delete address
address.delete()
messages.success( request, "Address has been successfully deleted." ) # Levels: info, success, warning, and error
return redirect( "webshop.views.address_book" )
# Handle GET requests
elif request.method == "GET":
# Retrieve the address. Users are allowed to edit their own addresses only!
address = get_object_or_404( Address, pk=address_id, user=request.user )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
variables = { "address" : address, "first_name" : request.user.first_name, "last_name" : request.user.last_name }
context = RequestContext( request )
context.update( csrf( request ) )
return render_to_response( "webshop/myaccount/address_book_delete.html", variables, context )
"""
Nintendo Game & Watch Shop > My Account > Completed Orders
Author(s): Markku Laine
"""
@login_required
def completed_orders( request ):
# Handle GET requests
if request.method == "GET":
# Retrieve completed (paid) orders of the currently logged-in user
orders = Order.objects.filter( user=request.user, paid=True )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
variables = { "orders" : orders }
context = RequestContext( request )
context.update( csrf( request ) )
return render_to_response( "webshop/myaccount/completed_orders.html", variables, context )
"""
Author(s): Markku Laine
"""
def about( request ):
# Handle GET requests
if request.method == "GET":
variables = {}
context = RequestContext( request )
context.update( csrf( request ) )
return render_to_response( "webshop/about.html", variables, context )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
"""
Nintendo Game & Watch Shop > Credits
Author(s): Markku Laine
"""
def credits( request ):
# Handle GET requests
if request.method == "GET":
variables = {}
context = RequestContext( request )
context.update( csrf( request ) )
return render_to_response( "webshop/credits.html", variables, context )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
"""
Nintendo Game & Watch Shop > Admin
Author(s): Markku Laine
"""
@login_required
def admin( request ):
# Redirect user to Home if (s)he is has not access rights
if not request.user.is_staff:
return redirect( "webshop.views.home" )
# Handle GET requests
if request.method == "GET":
return redirect( "webshop.views.admin_paid_orders" )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
"""
Nintendo Game & Watch Shop > Admin > Paid Orders
Author(s): Markku Laine
"""
@login_required
def admin_paid_orders( request, order_id=None ):
# Redirect user to Home if (s)he is has not access rights
if not request.user.is_staff:
return redirect( "webshop.views.home" )
# Handle POST requests
if request.method == "POST":
# Retrieve order id
order_id = request.POST.get( "order_id", 0 )
# Retrieve the order
order = get_object_or_404( Order, pk=order_id )
order.delivered = True
order.save()
# Retrieve paid orders
orders = Order.objects.filter( paid=True, delivered=False )
# Handle GET requests
elif request.method == "GET":
# Retrieve paid orders
orders = Order.objects.filter( paid=True, delivered=False )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
variables = { "orders" : orders }
context = RequestContext( request )
context.update( csrf( request ) )
return render_to_response( "webshop/admin/paid_orders.html", variables, context )
"""
Nintendo Game & Watch Shop > Admin > Delivered Orders
Author(s): Markku Laine
"""
@login_required
def admin_delivered_orders( request ):
# Redirect user to Home if (s)he is has not access rights
if not request.user.is_staff:
return redirect( "webshop.views.home" )
# Handle GET requests
if request.method == "GET":
# Retrieve paid & delivered orders
orders = Order.objects.filter( paid=True, delivered=True )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
variables = { "orders" : orders }
context = RequestContext( request )
context.update( csrf( request ) )
return render_to_response( "webshop/admin/delivered_orders.html", variables, context )
"""
Nintendo Game & Watch Shop > Admin > Statistics
Author(s): Kalle Saila
"""
@login_required
def admin_statistics( request ):
# Redirect user to Home if (s)he is has not access rights
if not request.user.is_staff:
return redirect( "webshop.views.home" )
# Handle GET requests
if request.method == "GET":
# Retrieve paid & delivered orders
orders = []
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
variables = { "orders" : orders }
context = RequestContext( request )
context.update( csrf( request ) )
return render_to_response( "webshop/admin/statistics.html", variables, context )
"""
Author(s): Juha Loukkola
"""
@login_required
def payment_pay( request ):
#if request.method == 'POST':
user = get_object_or_404(User, id=request.user.id)
sm = request.session.get('shippingMethod')
shippingMethod = get_object_or_404(ShippingMethod, name=sm)
# create an order from items in the cart
order = Order(date=datetime.now(),
user=user,
delivered=False,
paid=False,
shippingMethod=shippingMethod,
address=user.address_set.all()[0])
order.save()
# Add orderItems to the order and save them to DB
orderItems = request.session.get('orderItems')
for item in orderItems:
item.order = order
item.save()
# construct parameters for interfacing the net bank
pid = order.id
sid = 'Disk-kun'
amount = order.getTotal()
success_url = request.build_absolute_uri("/webshop/payment/success")
cancel_url = request.build_absolute_uri("/webshop/payment/cancel")
error_url = request.build_absolute_uri("/webshop/payment/error")
# calsulate a cehcksum
checksumstr = "pid=%s&sid=%s&amount=%s&token=%s"%(pid, sid, amount, secret_key)
m = md5.new(checksumstr)
checksum = m.hexdigest()
variables = { "pid":pid, "sid":sid, "amount":amount, "success_url":success_url, "cancel_url":cancel_url, "error_url":error_url, "checksum":checksum}
context = RequestContext( request )
return render_to_response("webshop/payment/confirm.html", variables, context)
"""
Author(s): Juha Loukkola
"""
def payment_confirm( request ):
variables = {}
context = RequestContext( request )
return render_to_response( "webshop/payment/confirm.html", variables, context )
"""
Author(s): Juha Loukkola
"""
@login_required
def payment_success( request ):
# Handle GET requests
if request.method == "GET":
pid = request.GET.get('pid')
ref = request.GET.get('ref')
checksum = request.GET.get('checksum')
# validate checksum
checksumstr = "pid=%s&ref=%s&token=%s"%(pid, ref, secret_key)
m = md5.new(checksumstr)
if checksum == m.hexdigest():
order = get_object_or_404(Order, id=pid)
order.paid = True
order.save()
if 'orderItems' in request.session:
del request.session['orderItems']
if 'shippingMethod' in request.session:
del request.session['shippingMethod']
if 'numberOfCartItems' in request.session:
request.session[ "numberOfCartItems" ] = 0
variables = {}
context = RequestContext( request )
return render_to_response( "webshop/payment/success.html", variables, context )
else:
context = RequestContext( request )
return render_to_response( "webshop/payment/error.html", context )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
"""
Author(s): Juha Loukkola
"""
def payment_cancel( request ):
variables = {}
context = RequestContext( request )
return render_to_response( "webshop/payment/cancel.html", variables, context )
"""
Author(s): Juha Loukkola
"""
def payment_error( request ):
variables = {}
context = RequestContext( request )
return render_to_response( "webshop/payment/error.html", variables, context )
"""
Author(s): Juha Loukkola
"""
#@login_required
def cart( request ):
# Handle POST requests
if request.method == "POST":
print "Hello"
# Handle GET requests
elif request.method == "GET":
orderItems = request.session.get('orderItems', [])
#if 'shippingMethod' in request.session:
sm = request.session.get('shippingMethod')
if sm:
shippingMethod = get_object_or_404(ShippingMethod, name=sm)
else:
request.session['shippingMethod'] = 'Standard'
shippingMethod = get_object_or_404(ShippingMethod, name='Standard')
# Calculate subtotal
subtotal = 0
for orderItem in orderItems:
subtotal += ( orderItem.quantity * orderItem.saleItem.price )
subtotalInEuros = u"%.2f \u20AC" % subtotal
shippingMethodInEuros = u"%.2f \u20AC" % shippingMethod.price
total = subtotal + shippingMethod.price
totalInEuros = u"%.2f \u20AC" % total
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
variables = { 'orderItems': orderItems, 'shippingMethod' : shippingMethod, 'shippingMethodInEuros' : shippingMethodInEuros, "subtotalInEuros" : subtotalInEuros, "totalInEuros" : totalInEuros }
context = RequestContext( request )
context.update( csrf( request ) )
return render_to_response( "webshop/cart_markku.html", variables, context )
"""
Author(s): Markku Laine
"""
def empty_cart( request ):
# Handle POST requests
if request.method == "POST":
# Empty cart
request.session[ 'orderItems' ] = []
request.session[ "numberOfCartItems" ] = 0
return redirect( "webshop.views.cart" )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
"""
Author(s): Juha Loukkola
"""
def add_to_cart( request ):
if request.method == 'POST':
# Get the cart from the session
orderItems = request.session.get( 'orderItems', [] )
# Get SaleItem
#if 'saleitem_id' in request.POST:
si_id = request.POST.get('saleitem_id')
if si_id:
si = SaleItem.objects.get(id=si_id)
else:
return HttpResponseBadRequest()
# Check if there is allredy similar item in the cart ...
similarItem = None
for item in orderItems:
if item.saleItem.id == si.id:
similarItem = item
# ... if similar item exist increase its quantity ...
if similarItem:
similarItem.quantity += 1
# ... else create new item
else:
oi = OrderItem()
oi.quantity = 1
oi.saleItem = si
orderItems.append(oi)
# Save the cart into the session
request.session['orderItems'] = orderItems
# Calculate the number of cart items
numberOfCartItems = 0
for orderItem in orderItems:
numberOfCartItems += orderItem.quantity
request.session[ "numberOfCartItems" ] = numberOfCartItems
# Handle Ajax request
if request.is_ajax():
# Return the number of cart items for updating the cart items number in the navigation
my_json = json.dumps( { "numberOfCartItems" : numberOfCartItems } )
return HttpResponse( my_json, mimetype="application/json" )
# Handle normal request
else:
next = request.POST.get( "next", reverse( "webshop.views.cart" ) )
return redirect( next )
# Handle other requests
else:
raise Http404( "%s method is not supported." % request.method )
"""
Author(s): Juha Loukkola
"""
def update_cart( request ):
#if request.is_ajax():
if request.method == 'POST':
# Get the cart
# orderItems = request.session.get('orderItems', [])
# for item in orderItems:
# item_quantity = int(request.POST.get(item.saleItem.id))
# item.quantity = item_quantity
# update the shipping method
if 'shipping_method' in request.POST:
request.session['shippingMethod'] = request.POST.get('shipping_method')
return redirect( "webshop.views.cart" )
else:
return HttpResponseBadRequest()
else:
return HttpResponseBadRequest()
"""
def add_to_cart( request ):
#if request.is_ajax():
if request.method == 'POST':
cart = request.session.get( 'cart' )
# If this is the first time the cart is refered during this session
if cart == None:
if request.user.is_authenticated():
user = get_object_or_404(User, id=request.user.id)
# If the user has unfinished cart from previous session then get it ...
cart = user.order_set.filter(paid=False)
# ... else create new one
if cart == None:
cart = Order()
cart.user = user
cart.date = datetime.now()
cart.shipingmethod = None
cart.paid = False
cart.delivered = False
cart.save()
# If the user is not authenticated then just create a cart
else:
cart = Order()
cart.user = get_object_or_404(User, username='anonymous')
cart.date = datetime.now()
cart.shipingmethod = ShipingMethod()
cart.paid = False
cart.delivered = False
cart.save()
request.session['cart'] = cart
# Get SaleItem
si_id = request.POST.get('saleitem_id')
si = SaleItem.objects.get(id=si_id)
# Create a new OrderItem and add it into the cart
oi = OrderItem()
oi.saleItem=si
oi.order=cart
oi.quantity=1
oi.save()
variables = {}
context = RequestContext( request )
return render_to_response( "webshop/cart.html", variables, context )
else:
return HttpResponseBadRequest()
"""
"""
Author(s): Kalle Saila
"""
### AJAX ###
def rating( request, product_id ):
if request.is_ajax():
if request.method == 'POST':
try:
product = Product.objects.get(id=product_id)
try:
rate = Rating.objects.get(product=product, user=request.user)
newrate = request.POST['rate']
rate.rate = newrate
rate.save()
average = product.average()
count = product.rating_set.count()
my_json = json.dumps({'average':average, 'count':count})
return HttpResponse(my_json, mimetype="application/json")
except Rating.DoesNotExist:
rate = Rating()
rate.rate = request.POST['rate']
rate.user = request.user
rate.product = product
rate.save()
average = product.average()
count = product.rating_set.count()
my_json = json.dumps({'average':average, 'count':count})
return HttpResponse(my_json, mimetype="application/json")
except Product.DoesNotExist:
my_json = json.dumps({'error':'product not found'})
return HttpResponse(my_json, mimetype="application/json")
else:
try:
product = Product.objects.get(id=product_id)
average = product.average()
count = product.rating_set.count()
my_json = json.dumps({'average':average, 'count':count})
return HttpResponse(my_json, mimetype="application/json")
except Product.DoesNotExist:
my_json = json.dumps({'error':'product not found'})
return HttpResponse(my_json, mimetype="application/json")
else:
my_json = json.dumps({})
return HttpResponse(my_json, mimetype="application/json")
def comment( request, product_id ):
if request.is_ajax():
if request.method == 'POST':
try:
product = Product.objects.get(id=product_id)
comment = Comment()
comment.user = request.user
if request.POST['commentsOn'] != 'None':
commentsOn = Comment.objects.get(id=request.POST['commentsOn'])
comment.commentsOn = commentsOn
comment.contents = request.POST['comment']
comment.product = product
comment.published = datetime.today()
comment.save()
my_json = json.dumps({'id':comment.id, 'commentsOn':request.POST['commentsOn'], 'published':comment.published.__str__(), 'user': comment.user.username, 'admin': comment.user.is_staff})
return HttpResponse(my_json, mimetype="application/json")
except Product.DoesNotExist:
my_json = json.dumps({'error':'product not found'})
return HttpResponse(my_json, mimetype="application/json")
else:
try:
my_json = json.dumps({})
return HttpResponse(my_json, mimetype="application/json")
except Product.DoesNotExist:
my_json = json.dumps({'error':'product not found'})
return HttpResponse(my_json, mimetype="application/json")
else:
my_json = json.dumps({})
return HttpResponse(my_json, mimetype="application/json")
def ajaxproducts( request ):
if request.is_ajax():
#my_json = json.dumps(Product.objects.all(), cls=DjangoJSONEncoder)
my_list = []
for p in Product.objects.all():
temp_dict = {}
temp_dict["id"] = p.id
temp_dict["type"] = p.type.id
temp_dict["label"] = p.title
my_list.append(temp_dict)
#my_list.append("{\"id\":" + str(p.id) + ", \"type\":" + str(p.type.id) + ", \"label\":\"" + p.title+"\"}")
my_json = json.dumps(my_list)
return HttpResponse(my_json, mimetype="application/json")
else:
#my_json = json.dumps(Product.objects.all(), cls=DjangoJSONEncoder)
#my_json = json.dumps(json.loads(serialize('json', Product.objects.all())))
#return HttpResponse(my_json, mimetype="application/json")
my_list = []
for p in Product.objects.all():
temp_dict = {}
temp_dict["id"] = p.id
temp_dict["type"] = p.type.id
temp_dict["label"] = p.title
my_list.append(temp_dict)
#my_list.append("{\"id\":" + str(p.id) + ", \"type\":" + str(p.type.id) + ", \"label\":\"" + p.title+"\"}")
my_json = json.dumps(my_list)
return HttpResponse(my_json, mimetype="application/json")
def ajaxstatisticscommentcount( request ):
my_list = []
for p in Product.objects.all():
if p.comment_set.count() > 0:
temp_dict = {}
temp_dict["name"] = p.title
temp_dict["y"] = p.comment_set.count()
my_list.append(temp_dict)
my_json = json.dumps(my_list)
return HttpResponse(my_json, mimetype="application/json")
def productview( request ):
my_list = []
for s in Statistic.objects.all():
temp_dict = {}
temp_dict["name"] = s.product.title
temp_dict["y"] = s.numberOfViews
my_list.append(temp_dict)
my_json = json.dumps(my_list)
return HttpResponse(my_json, mimetype="application/json")
def addproductview( request, product_id ):
try:
product = Product.objects.get(id=product_id)
try:
stat = Statistic.objects.get(product=product)
stat.numberOfViews = stat.numberOfViews + 1
stat.save()
except Statistic.DoesNotExist:
stat = Statistic()
stat.numberOfViews = 1
stat.product = product
stat.save()
except Product.DoesNotExist:
my_json = json.dumps("{}")
return HttpResponse(my_json, mimetype="application/json")
my_json = json.dumps("{}")
return HttpResponse(my_json, mimetype="application/json")
def commentdelete( request, comment_id ):
if not request.user.is_staff:
my_json = json.dumps({'error':'not authorized'})
return HttpResponse(my_json, mimetype="application/json")
try:
comment = Comment.objects.get(id=comment_id)
if Comment.objects.filter(commentsOn=comment).count() > 0:
my_json = json.dumps({'error':'comment not removable'})
return HttpResponse(my_json, mimetype="application/json")
comment.delete()
my_json = json.dumps("{}")
return HttpResponse(my_json, mimetype="application/json")
except Comment.DoesNotExist:
my_json = json.dumps({'error':'comment not found'})
return HttpResponse(my_json, mimetype="application/json")
my_json = json.dumps("{}")
return HttpResponse(my_json, mimetype="application/json") |
986,315 | 67d93e68ee89e0680a23faa936f39034ff83bd00 | import base64
from datetime import datetime
import io
import itertools
import os
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import sys
from textwrap import dedent
import time
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import dash
from dash.dependencies import Input, Output, State
import dash_html_components as html
import dash_core_components as dcc
import dash_table_experiments as dt
from .IntegrationTests import IntegrationTests
from .utils import assert_clean_console
DF_SIMPLE = pd.DataFrame({
'x': ['A', 'B', 'C', 'D', 'E', 'F'],
'y': [4, 3, 1, 2, 3, 6],
'z': ['a', 'b', 'c', 'a', 'b', 'c']
})
ROWS = DF_SIMPLE.to_dict('records')
class Tests(IntegrationTests):
def setUp(self):
pass
def wait_for_element_by_css_selector(self, selector):
start_time = time.time()
while time.time() < start_time + 20:
try:
return self.driver.find_element_by_css_selector(selector)
except Exception as e:
pass
time.sleep(0.25)
raise e
def wait_for_text_to_equal(self, selector, assertion_text):
start_time = time.time()
while time.time() < start_time + 20:
el = self.wait_for_element_by_css_selector(selector)
try:
return self.assertEqual(el.text, assertion_text)
except Exception as e:
pass
time.sleep(0.25)
raise e
def snapshot(self, name):
if 'PERCY_PROJECT' in os.environ and 'PERCY_TOKEN' in os.environ:
python_version = sys.version.split(' ')[0]
print('Percy Snapshot {}'.format(python_version))
self.percy_runner.snapshot(name=name)
def test_render_table(self):
app = dash.Dash()
app.layout = html.Div([
html.Div(id='waitfor'),
html.Label('Default'),
dt.DataTable(
rows=ROWS
),
html.Label('Editable'),
dt.DataTable(
rows=ROWS,
editable=True
),
html.Label('Filterable'),
dt.DataTable(
rows=ROWS,
filterable=True
),
html.Label('Sortable'),
dt.DataTable(
rows=ROWS,
sortable=True
),
html.Label('Resizable'),
dt.DataTable(
rows=ROWS,
filterable=True
),
html.Label('Column Widths'),
dt.DataTable(
rows=ROWS,
column_widths=[30, 100, 80]
),
html.Label('Columns'),
dt.DataTable(
rows=ROWS,
columns=['y', 'x', 'z']
),
html.Label('Row Selectable'),
dt.DataTable(
rows=ROWS,
row_selectable=True
),
html.Label('Selected Indices'),
dt.DataTable(
rows=ROWS,
selected_row_indices=[2]
),
html.Label('Header Row Height'),
dt.DataTable(
rows=ROWS,
header_row_height=100
),
html.Label('Min Height'),
dt.DataTable(
rows=ROWS,
min_height=600
),
html.Label('Min Width'),
dt.DataTable(
rows=ROWS,
min_width=400
),
html.Label('Row Height'),
dt.DataTable(
rows=ROWS,
row_height=200
)
])
self.startServer(app)
self.wait_for_element_by_css_selector('#waitfor')
self.snapshot('gallery')
assert_clean_console(self)
def test_update_rows_from_callback(self):
app = dash.Dash()
app.layout = html.Div([
html.Button(
children='load',
id='button',
n_clicks=0
),
dt.DataTable(
id='dt',
rows=[{}]
)
])
@app.callback(Output('dt', 'rows'),
[Input('button', 'n_clicks')])
def update_rows(n_clicks):
if n_clicks > 0:
return ROWS
else:
return [{}]
self.startServer(app)
self.snapshot('test_update_rows_from_callback-1')
self.wait_for_element_by_css_selector('#button').click()
time.sleep(5)
self.snapshot('test_update_rows_from_callback-2')
def test_height(self):
def gen_rows(length):
return [
{'a': 'AA', 'b': i} for i in range(length)
]
options = {
'row': [
['single row', gen_rows(1)],
['five rows', gen_rows(5)],
['ten rows', gen_rows(10)],
['thirty rows', gen_rows(30)]
],
'min_height': [
['none', None],
['800', 800],
['200', 200]
],
'filterable': [
['true', True],
['false', False]
],
'row_height': [
['none', None],
['20', 20],
['50', 50]
],
'max_rows_in_viewport': [
['none', None],
['2', 2],
['15', 15]
]
}
layout = []
for opt in itertools.product(options['row'],
options['min_height'],
options['filterable'],
options['row_height'],
options['max_rows_in_viewport']):
kwargs = {'rows': opt[0][1], 'filterable': opt[2][1]}
if opt[1][1] is not None:
kwargs['min_height'] = opt[1][1]
if opt[3][1] is not None:
kwargs['row_height'] = opt[3][1]
if opt[4][1] is not None:
kwargs['max_rows_in_viewport'] = opt[4][1]
layout.extend([
html.H3(
'''
{},
min_height={},
filterable={},
row_height={},
max_rows={}
'''.format(
*[o[0] for o in opt]
)),
dt.DataTable(**kwargs)
])
app = dash.Dash()
app.layout = html.Div(layout + [html.Div(id='waitfor')])
self.startServer(app)
self.wait_for_element_by_css_selector('#waitfor')
self.snapshot('heights')
|
986,316 | 5070f438200364275581621a7161a1fbb2785ab6 | import numpy as np
from tiramisu import *
import keras
import sys
import argparse
import os
import glob
import datetime
from PIL import Image
from scipy.misc import imresize
from preprocessing import load_image
from params import *
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_arg = parser.add_argument
add_arg('images', nargs='*', default=[])
add_arg('--model', default=MODEL_PATH+FROM_MODEL + ".h5", type=str)
add_arg('--input_path', default=INPUT_PATH, type=str)
add_arg('--output_path', default=OUTPUT_PATH, type=str)
add_arg('--image_size', default=(2800, 1760), type=tuple, help='resolution to load images')
args = parser.parse_args()
def img_resize(img):
h, w, _ = img.shape
nvpanels = int(h / 224)
nhpanels = int(w / 224)
new_h, new_w = h, w
if nvpanels * 224 != h:
new_h = (nvpanels + 1) * 224
if nhpanels * 224 != w:
new_w = (nhpanels + 1) * 224
if new_h == h and new_w == w:
return img
else:
return (imresize(img, (new_h, new_w)) / 255. - mu) / std
def split_panels(img):
h, w, _ = img.shape
num_vert_panels = int(h / 224)
num_hor_panels = int(w / 224)
panels = []
for i in range(num_vert_panels):
for j in range(num_hor_panels):
panels.append(img[i * 224:(i + 1) * 224, j * 224:(j + 1) * 224])
return np.stack(panels)
def combine_panels(img, panels):
h, w, _ = img.shape
num_vert_panels = int(h / 224)
num_hor_panels = int(w / 224)
total = []
p = 0
for i in range(num_vert_panels):
row = []
for j in range(num_hor_panels):
row.append(panels[p])
p += 1
total.append(np.concatenate(row, axis=1))
return np.concatenate(total, axis=0)
def prediction_mask(img, target):
layer1 = Image.fromarray(((img * std + mu) * 255).astype('uint8'))
layer2 = Image.fromarray(
np.concatenate(
4 * [np.expand_dims((225 * (1 - target)).astype('uint8'), axis=-1)],
axis=-1))
result = Image.new("RGBA", layer1.size)
result = Image.alpha_composite(result, layer1.convert('RGBA'))
return Image.alpha_composite(result, layer2)
def waldo_predict(img):
rimg = img_resize(img)
panels = split_panels(rimg)
pred_panels = model.predict(panels, batch_size=6)
pred_panels = np.stack([reshape_pred(pred) for pred in pred_panels])
return rimg, combine_panels(rimg, pred_panels)
def reshape_pred(pred): return pred.reshape(224, 224, 2)[:, :, 1]
if __name__ == "__main__":
"""
This script makes predictions on a list of inputs with a pre-trained model,
and saves them as transparency masks over the original image.
# Example:
$ python predict.py image1.jpg image2.jpg
"""
images = args.images
image_size = args.image_size
if len(images) == 0:
images = glob.glob(os.path.join(args.input_path, "*"))
for i, image in enumerate(images):
images[i] = image[len(os.path.join(args.input_path)):]
input_shape = (224, 224, 3)
img_input = Input(shape=input_shape)
x = create_tiramisu(2, img_input, nb_layers_per_block=[4, 5, 7, 10, 12, 15], p=0.2, wd=1e-4)
model = Model(img_input, x)
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.RMSprop(1e-3),
metrics=["accuracy"],
sample_weight_mode='temporal')
model.load_weights(args.model)
for i, image in enumerate(images):
time_start = datetime.datetime.now()
full_image_path = os.path.join(args.input_path, image)
input_file_name = os.path.basename(image)
input_file_name_without_extension = input_file_name[:input_file_name.index('.')]
full_image = load_image(full_image_path, image_size)
full_image_resized, full_prediction = waldo_predict(full_image)
mask = prediction_mask(full_image_resized, full_prediction)
mask.save(os.path.join(args.output_path, OUTPUT_PREFIX + input_file_name_without_extension + ".png"))
print("Saved " + OUTPUT_PREFIX + input_file_name_without_extension + ".png")
time_end = datetime.datetime.now()
print("Found target in", time_end - time_start) |
986,317 | 1618e7038777b22e00613f92fa0effda903ee141 | import pytest
import numpy as np
import torch
import torch.nn as nn
from alpaca.utils import model_builder
import alpaca.nn as ann
@pytest.fixture(scope="module")
def simple_model():
net = nn.Sequential(nn.Linear(10, 10), ann.Dropout())
return net
@pytest.fixture(scope="module")
def nn_simple_model():
def _nn_simple_model(dropout_rate):
net = nn.Sequential(nn.Linear(10, 10), nn.Dropout(p=dropout_rate))
return net
return _nn_simple_model
@pytest.fixture(scope="module")
def ann_dropout():
net = nn.Sequential(ann.Dropout())
return net
@pytest.fixture(scope="module")
def nn_dropout():
def _nn_net(dropout_rate):
net = nn.Sequential(nn.Dropout(p=dropout_rate))
return net
return _nn_net
@torch.no_grad()
def test_check_model_inference(simple_model):
x = torch.randn((1, 10))
out = simple_model(x)
assert out.shape == (1, 10)
@torch.no_grad()
def test_check_build_zero_p(simple_model, seed):
simple_model = model_builder.build_model(simple_model, dropout_rate=1.0)
x = torch.randn((1, 10))
out = simple_model(x)
np.testing.assert_allclose(out.sum(), 0.0, rtol=1e-6)
@torch.no_grad()
def test_check_output_with_nn(ann_dropout, nn_dropout, seed, dropout_rate_extreme):
dropout_rate = dropout_rate_extreme
x = torch.randn((1, 10))
nn_dropout = nn_dropout(dropout_rate)
ann_dropout = model_builder.build_model(ann_dropout, dropout_rate=dropout_rate)
out1, out2 = ann_dropout(x), nn_dropout(x)
np.testing.assert_allclose(out1, out2, rtol=1e-6)
|
986,318 | 614687bdd3b9ba7a73161515558dc125d99e9f56 | # arm_curve.py (c) 2011 Phil Cote (cotejrp1)
#
# ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
bl_info = {
'name': 'Armature Curve',
'author': 'Phil Cote, cotejrp1, (http://www.blenderaddons.com)',
'version': (0,1),
"blender": (2, 6, 0),
"api": 41098,
'location': '',
'description': 'Generates a curve character based on an existing armature',
'warning': '', # used for warning icon and text in addons panel
'category': 'Add Curve'}
import bpy
from pdb import set_trace
def add_spline(bone_chain, crv):
bone_data = []
for bone in bone_chain:
loc = bone.head_local
bone_data.extend((loc.x, loc.y, loc.z))
if bpy.context.scene.curve_type == 'POLY':
bone_data.extend( (0,) )
loc = bone_chain[-1].tail_local
bone_data.extend((loc.x, loc.y, loc.z))
if bpy.context.scene.curve_type == 'POLY':
bone_data.extend( (0,) )
# construct the spline itself.
crv_type = bpy.context.scene.curve_type
spline = crv.splines.new(type=crv_type)
num_points = len(bone_chain)
if crv_type == 'BEZIER':
points = spline.bezier_points
else:
points = spline.points
points.add(num_points)
points.foreach_set("co", bone_data)
for point in points:
if hasattr( point, "handle_left_type"):
point.handle_left_type = "AUTO"
point.handle_right_type = "AUTO"
def get_bone_chain(arm):
bone_chain = []
for bone in arm.bones:
bone_chain.append( bone )
if len( bone.children ) != 1:
yield bone_chain
bone_chain = []
def make_arm_curve(arm_ob):
crv = bpy.data.curves.new("crv", type="CURVE")
crv_ob = bpy.data.objects.new("crv_ob", crv)
for bone_chain in get_bone_chain(arm_ob.data):
add_spline(bone_chain, crv)
return crv_ob
class CurveArmatureOp(bpy.types.Operator):
'''Make a curve that binds itself to the selected armature.'''
bl_idname = "curve.armature_curve"
bl_label = "Curve Armature"
@classmethod
def poll(cls, context):
ob = context.active_object
if ob == None:
return False
if ob.type != 'ARMATURE':
return False
return True
def execute(self, context):
scn = bpy.context.scene
arm_ob = bpy.context.active_object
crv_ob = make_arm_curve(arm_ob)
scn.objects.link(crv_ob)
crv_ob.parent = arm_ob
crv_ob.parent_type="ARMATURE"
return {'FINISHED'}
class CurveArmaturePanel(bpy.types.Panel):
bl_label = "Curve Armature"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_context = "objectmode"
def draw(self, context):
scn = context.scene
layout = self.layout
col = layout.column
col().operator("curve.armature_curve")
col().prop(scn, "curve_type")
def register():
scntype = bpy.types.Scene
enumprop=bpy.props.EnumProperty
choices=[ ('BEZIER', 'BEZIER', 'BEZIER'), ('POLY', 'POLY', 'POLY'), ]
scntype.curve_type=enumprop("curve type", items=choices)
bpy.utils.register_class(CurveArmatureOp)
bpy.utils.register_class(CurveArmaturePanel)
def unregister():
bpy.utils.unregister_class(CurveArmatureOp)
bpy.utils.unregister_class(CurveArmaturePanel)
if __name__ == "__main__":
register() |
986,319 | 409a41a2405bc2f45c7460f39761f7b1c428ab3f | # -*- coding: utf-8 -*-
from __future__ import print_function
from config import get_config
import tensorflow as tf
import numpy as np
__AUTHOR__ = "kozistr"
__VERSION__ = "0.1"
cfg, _ = get_config() # configuration
# set random seed
np.random.seed(cfg.seed)
tf.set_random_seed(cfg.seed)
_init = tf.contrib.layers.variance_scaling_initializer(factor=3., mode='FAN_AVG', uniform=True)
_reg = tf.contrib.layers.l2_regularizer(cfg.l2_reg)
def embedding_table(inputs, vocab_size, embed_size, zero_pad=False,
trainable=True, scope="embedding", reuse=None):
""" Generating Embedding Table with given parameters
:param inputs: A 'Tensor' with type 'int8' or 'int16' or 'int32' or 'int64'
containing the ids to be looked up in 'lookup table'.
:param vocab_size: An int. Vocabulary size.
:param embed_size: An int. Number of size of embedding vector.
:param zero_pad: A boolean. If True, all the values of the first low (id 0)
should be constant zeros.
:param trainable: A boolean. Whether freeze the embedding matrix or not.
:param scope: A str, Optional scope for 'variable_scope'.
:param reuse: A boolean. Whether to reuse the weights of a previous layer
by the same name.
:return: A 'Tensor' with ...
"""
with tf.variable_scope(scope, reuse=reuse):
embed_table = tf.get_variable('embedding_table',
shape=[vocab_size, embed_size],
initializer=_init,
trainable=trainable,
dtype=tf.float32)
if zero_pad:
embed_table = tf.concat((tf.zeros(shape=[1, embed_size]), embed_table[1:, :]),
axis=0)
return tf.nn.embedding_lookup(embed_table, inputs)
def conv1d(inputs,
n_filters=None, kernel=1, stride=1, dilated_rate=1,
padding="SAME", use_bias=False, activation_fn=None,
scope="conv1d", reuse=None):
""" Convolution 1D Operation
:param inputs: A '3D Tensor' with shape of [batch, time, depth]
:param n_filters: An int, Conv1D filter size.
:param kernel: An int. Conv1D kernel size.
:param stride: An int. Conv1D stride size.
:param dilated_rate: An int. Conv1D dilated size.
:param padding: Either 'SAME' or 'VALID' or 'CAUSAL'.
:param use_bias: A boolean.
:param activation_fn: An Object. TF activation function.
:param scope: A str, Optional scope for 'variable_scope'.
:param reuse: A boolean. Whether to reuse the weights of a previous layer
by the same name.
:return: conv1d result
"""
with tf.variable_scope(scope, reuse=reuse):
if n_filters is None:
n_filters = inputs.get_shape().as_list()[-1]
if padding.upper() == "CASUAL":
pad_len = (kernel - 1) * dilated_rate # padding size
inputs = tf.pad(inputs, [[0, 0], [pad_len, 0], [0, 0]])
padding = "VALID"
outputs = tf.layers.conv1d(inputs=inputs,
filters=n_filters,
kernel_size=kernel,
strides=stride,
dilation_rate=dilated_rate,
padding=padding,
activation=activation_fn,
use_bias=use_bias,
kernel_initializer=_init,
kernel_regularizer=_reg,
)
return outputs
def biGRU(inputs, num_units=None, bidirection=False, scope='biGRU', reuse=None):
""" bi-GRU
:param inputs: A 3D Tensor with shape of [batch, T, C]
:param num_units: An int. The number of hidden units.
:param bidirection: A boolean. If True, bidirectional results
are concatenated.
:param scope: A str, Optional scope for 'variable_scope'.
:param reuse: A boolean. Whether to reuse the weights of a previous layer
by the same name.
:return: If bidirection is True, a 3D Tensor with shape of [batch, T, 2 * num_units],
otherwise [batch, T, num_units]
"""
if num_units is None:
num_units = inputs.get_shape().as_list[-1]
with tf.variable_scope(scope, reuse=reuse):
cell_fw = tf.contrib.rnn.GRUCell(num_units)
if bidirection:
cell_bw = tf.contrib.rnn.GRUCell(num_units)
outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs,
dtype=tf.float32)
outputs = tf.concat(outputs, axis=2)
else:
outputs, _ = tf.nn.dynamic_rnn(cell_fw, inputs,
dtype=tf.float32)
return outputs
def batch_norm(inputs, is_training=True, activation_fn=None, scope="batch_norm", reuse=None):
""" Batch Normalization, referenced https://github.com/Kyubyong/tacotron/blob/master/modules.py#L43
:param inputs: A Tensor with 2 or more dimensions, where the first dim has 'batch_size'.
:param is_training: A boolean.
:param activation_fn: Activation function.
:param scope: A str, Optional scope for 'variable_scope'.
:param reuse: A boolean. Whether to reuse the weights of a previous layer
by the same name.
:return:
"""
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank in [2, 3, 4]:
if not inputs_rank == 4:
inputs = tf.expand_dims(inputs, axis=1)
if inputs_rank == 2:
inputs = tf.expand_dims(inputs, axis=2)
outputs = tf.contrib.layers.batch_norm(inputs=inputs,
center=True,
scale=True,
updates_collections=None,
is_training=is_training,
scope=scope,
fused=True,
reuse=reuse)
if inputs_rank == 2:
outputs = tf.squeeze(outputs, axis=[1, 2])
elif inputs_rank == 3:
outputs = tf.squeeze(outputs, axis=1)
else:
outputs = tf.contrib.layers.batch_norm(inputs=inputs,
center=True,
scale=True,
updates_collections=None,
is_training=is_training,
scope=scope,
reuse=reuse,
fused=False)
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
|
986,320 | a8c0146e8db20b993b5978c07790035bbf3cfbc2 |
# coding: utf-8
# In[4]:
import tensorflow as tf
# 创建一个常量
m1 = tf.constant([[4,4]])
# 创建一个常量
m2 = tf.constant([[2],[3]])
# 创建一个矩阵乘法,把m1和m2传入
product = tf.matmul(m1,m2)
# 打印结果
print(product)
|
986,321 | b82fbe6f7a4dfba010fd1ebe6d31ff8a161410ad | """
procedure triInsertion(entier[] tab)
entier i, k;
entier tmp;
entier k;
pour (i de 2 à N en incrémentant de 1) faire
tmp <- tab[i];
k <- i;
tant que (k > 1 et tab[k - 1] > tmp) faire
tab[k] <- tab[k - 1];
k <- k - 1;
fin tant que
tab[k] <- tmp;
fin pour
fin procedure
"""
# Programme Python pour l'implémentation du tri par insertion
def tri_insertion(tab):
# Parcour de 1 à la taille du tab
for i in range(1, len(tab)):
tmp = tab[i]
k = i
while k > 0 and tab[k-1] > tmp :
memoire = tab[k-1]
tab[k] = memoire
k -= 1
tab[k] = tmp
# Programme principal pour tester le code ci-dessus
tab = [4,2,5,1]
print(len(tab))
tri_insertion(tab)
print ("Le tableau trié par insertion est:")
for i in range(len(tab)):
print ("% d" % tab[i])
|
986,322 | 0eb5e97da16806b0e412a50480e1c47af3792c6c | import os
import pathlib
from pandas import Timestamp
from wrfhydropy.core.job import Job
from wrfhydropy.core.namelist import Namelist
def test_job_init():
job = Job(
job_id='test_job_1',
model_start_time='1984-10-14',
model_end_time='2017-01-04',
restart=False,
exe_cmd='bogus exe cmd',
entry_cmd='bogus entry cmd',
exit_cmd='bogus exit cmd'
)
assert job.model_start_time == Timestamp('1984-10-14 00:00:00')
assert job.model_end_time == Timestamp('2017-01-04 00:00:00')
assert job.hydro_times == {
'hydro_nlist': {
'restart_file': None,
'rst_dt': 60,
'out_dt': 60
},
'nudging_nlist': {
'nudginglastobsfile': None
},
}
assert job.hrldas_times == {
'noahlsm_offline': {
'khour': 282480,
'restart_frequency_hours': 1,
'output_timestep': 3600,
'start_year': 1984,
'start_month': 10,
'start_day': 14,
'start_hour': 0,
'start_min': 0,
'restart_filename_requested': None
}
}
def test_job_hydro_namelist():
job = Job(
job_id='test_job_1',
model_start_time='1984-10-14',
model_end_time='2017-01-04',
restart=False,
exe_cmd='bogus exe cmd',
entry_cmd='bogus entry cmd',
exit_cmd='bogus exit cmd'
)
hydro_namelist = Namelist({
'hydro_nlist': {
"restart_file": None,
"channel_option": 2
},
"nudging_nlist": {
"nudginglastobsfile": None
}
})
job._add_hydro_namelist(hydro_namelist)
assert job.hydro_namelist == {
'hydro_nlist': {
'restart_file': None,
'channel_option': 2,
'rst_dt': 60,
'out_dt': 60
},
'nudging_nlist': {
'nudginglastobsfile': None
}
}
def test_job_hrldas_namelist():
job = Job(
job_id='test_job_1',
model_start_time='1984-10-14',
model_end_time='2017-01-04',
restart=False,
exe_cmd='bogus exe cmd',
entry_cmd='bogus entry cmd',
exit_cmd='bogus exit cmd'
)
hrldas_namelist = Namelist({
'noahlsm_offline': {
'btr_option': 1,
'kday': 1,
'khour': None,
'start_year': 1900,
'start_month': 1,
'start_day': 1,
'start_hour': 1,
'start_min': 1,
'restart_filename_requested': None
}
})
job._add_hrldas_namelist(hrldas_namelist)
assert job.hrldas_namelist == {
'noahlsm_offline': {
'btr_option': 1,
'khour': 282480,
'restart_frequency_hours': 1,
'output_timestep': 3600,
'start_year': 1984,
'start_month': 10,
'start_day': 14,
'start_hour': 0,
'start_min': 0,
'restart_filename_requested': None
}
}
def test_job_restart_file_times():
job = Job(
job_id='test_job_1',
model_start_time='1984-10-14',
model_end_time='2017-01-04',
restart=True,
exe_cmd='bogus exe cmd',
entry_cmd='bogus entry cmd',
exit_cmd='bogus exit cmd'
)
hydro_namelist = Namelist({
'hydro_nlist': {
"restart_file": None,
},
"nudging_nlist": {
"nudginglastobsfile": None
}
})
job._add_hydro_namelist(hydro_namelist)
hrldas_namelist = Namelist({
'noahlsm_offline': {
'btr_option': 1,
'kday': 1,
'khour': None,
'start_year': 1900,
'start_month': 1,
'start_day': 1,
'start_hour': 1,
'start_min': 1,
'restart_filename_requested': None
}
})
job._add_hrldas_namelist(hrldas_namelist)
assert job.hydro_namelist == {
'hydro_nlist': {
'restart_file': 'HYDRO_RST.1984-10-14_00:00_DOMAIN1',
'rst_dt': 60,
'out_dt': 60
},
'nudging_nlist': {
'nudginglastobsfile': 'nudgingLastObs.1984-10-14_00:00:00.nc'
}
}
assert job.hrldas_namelist == {
'noahlsm_offline': {
'btr_option': 1,
'khour': 282480,
'restart_frequency_hours': 1,
'output_timestep': 3600,
'start_year': 1984,
'start_month': 10,
'start_day': 14,
'start_hour': 0,
'start_min': 0,
'restart_filename_requested': 'RESTART.1984101400_DOMAIN1'
}
}
def test_job_run_coldstart(tmpdir):
os.chdir(tmpdir)
job = Job(
job_id='test_job_1',
model_start_time='1984-10-14',
model_end_time='2017-01-04',
restart=False,
exe_cmd='echo "bogus exe cmd"',
entry_cmd='echo "bogus entry cmd"',
exit_cmd='echo "bogus exit cmd"'
)
hydro_namelist = Namelist({
'hydro_nlist': {
"restart_file": None,
},
"nudging_nlist": {
"nudginglastobsfile": None
}
})
job._add_hydro_namelist(hydro_namelist)
hrldas_namelist = Namelist({
'noahlsm_offline': {
'btr_option': 1,
'kday': 1,
'khour': None,
'start_year': 1900,
'start_month': 1,
'start_day': 1,
'start_hour': 1,
'start_min': 1,
'restart_filename_requested': None
}
})
job._add_hrldas_namelist(hrldas_namelist)
job._make_job_dir()
job._write_namelists()
job._write_run_script()
try:
job._run()
except:
pass
assert job.exit_status == 1
assert job._proc_log.returncode == 0
actual_files = list(job.job_dir.glob('*'))
expected_files = [
pathlib.Path('job_test_job_1/WrfHydroJob_prerun.pkl'),
pathlib.Path('job_test_job_1/WrfHydroJob_postrun.pkl'),
pathlib.Path('job_test_job_1/hydro.namelist'),
pathlib.Path('job_test_job_1/namelist.hrldas')
]
for file in actual_files:
assert file in expected_files
|
986,323 | 24ad2d32bbfde26e8c817e00162bb4864cdc062e | import math
T = int(raw_input())
for i in xrange(T):
x = int(raw_input())
a = math.log10(math.sqrt(2 * math.pi * x))
b = math.log10(x / math.e) * x
print int(a + b + 1)
|
986,324 | 952f2387c4d07acb74357de4f588f0b9e0c81ffa | import requests
import numpy as np
import conf
import os
import time
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime as dt
import binance
def build_ticker(all_symbols, tickers_raw):
backup_coins = ['BTC', 'ETH', 'BNB']
tickers = {'USDT' : 1, 'USD' : 1}
tickers_raw = {t['symbol']:float(t['price']) for t in tickers_raw}
failed_coins = []
for symbol in set(backup_coins + all_symbols):
success = False
for stable in ('USDT', 'BUSD', 'USDC', 'DAI'):
pair = symbol+stable
if pair in tickers_raw:
tickers[symbol] = tickers_raw[pair]
success = True
break
if not success:
failed_coins.append(symbol)
for symbol in failed_coins:
success = False
for b_coin in backup_coins:
pair = symbol+b_coin
if pair in tickers_raw:
tickers[symbol] = tickers_raw[pair]*tickers[b_coin]
return tickers
def get_report():
api = binance.Client(conf.BINANCE_API_KEY, conf.BINANCE_API_SECRET)
account = api.get_account()
account_symbols = []
balances = {}
for balance in account['balances']:
symbol = balance['asset']
qty = float(balance["free"]) + float(balance["locked"])
if qty != 0:
account_symbols.append(symbol)
balances[symbol] = qty
all_symbols = list(set(conf.COINS + account_symbols + [conf.CURRENCY]))
tickers_raw = api.get_symbol_ticker()
tickers = build_ticker(all_symbols, tickers_raw)
total_usdt = 0
for symbol in account_symbols:
total_usdt += balances[symbol]*tickers[symbol]
report = {}
report['total_usdt'] = total_usdt
report['balances'] = balances
report['tickers'] = tickers
return report
def format_report(report):
msg = "#### Current market:\n"
currency_change = 1/report['tickers'][conf.CURRENCY]
for symbol, qty in report['balances'].items():
ticker = report['tickers'][symbol]
value = round(qty*ticker*currency_change,2)
if value < 0.1:
continue
msg += f"- **{symbol}** *({ticker} {conf.CURRENCY_SYMBOL})* : {value} {conf.CURRENCY_SYMBOL}\n"
total = round(report['total_usdt']*currency_change,2)
msg += f"\n**Total** : {total} {conf.CURRENCY_SYMBOL}\n"
return msg
def get_previous_reports():
if os.path.exists('db/crypto.npy'):
reports = np.load('db/crypto.npy', allow_pickle=True).tolist()
return reports
else:
return []
def save_report(report, old_reports):
report["time"] = int(time.time())
old_reports.append(report)
np.save('db/crypto.npy', old_reports, allow_pickle=True)
return old_reports
def plot_symbol(reports, symbol):
plt.clf()
X,Y = [],[]
for report in reports:
ticker = report['tickers'][symbol]
Y.append(report['total_usdt']/ticker)
X.append(dt.datetime.fromtimestamp(report['time']))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d/%m %H:%M'))
plt.setp(plt.xticks()[1], rotation = 15)
plt.plot(X,Y)
plt.ylabel(symbol)
plt.grid()
figname = f"db/quantity_{symbol}.png"
plt.savefig(figname)
return figname
|
986,325 | bb7c45f01d7bd8ed1fbf3f1e3886a7493bdd214e | # coding: utf-8
import logging
from flask.ext.admin.contrib.sqla import ModelView
from flask.ext.admin.babel import gettext
from flask.ext import login
from flask import flash
from wtforms import TextAreaField
from utils import form_to_dict
from models import SystemMessage
class SystemMessageView(ModelView):
"""定义了系统消息的视图"""
page_size = 30
can_delete = True
can_edit = True
can_create = True
column_searchable_list = ('content',)
column_default_sort = ('time', True)
column_exclude_list = ('view',)
column_display_pk = True
column_labels = dict(
id=u'ID',
content=u'消息内容',
time=u'时间'
)
form_overrides = dict(
content=TextAreaField
)
def __init__(self, db, **kwargs):
super(SystemMessageView, self).__init__(SystemMessage, db, **kwargs)
def is_accessible(self):
return login.current_user.is_admin()
def scaffold_form(self):
form_class = super(SystemMessageView, self).scaffold_form()
delattr(form_class, 'time')
delattr(form_class, 'view')
return form_class
def create_model(self, form):
"""改写flask的新建model的函数"""
try:
model = self.model(**form_to_dict(form))
self.session.add(model) # 保存酒吧基本资料
self.session.commit()
except Exception, ex:
flash(gettext('Failed to create model. %(error)s', error=str(ex)), 'error')
logging.exception('Failed to create model')
self.session.rollback()
return False
else:
self.after_model_change(form, model, True)
return True
def update_model(self, form, model):
"""改写了update函数"""
try:
model.update(**form_to_dict(form))
self.session.commit()
except Exception, ex:
flash(gettext('Failed to update model. %(error)s', error=str(ex)), 'error')
logging.exception('Failed to update model')
self.session.rollback()
return False
else:
self.after_model_change(form, model, False)
return True |
986,326 | fa752d9456a8eba441e2649d9db5924cbcfa2cfb | import Supplier
class Order(Supplier):
def __init__(self, full_name, email, category, membership, note, quantity, description, price, order):
super().__init__(full_name, email, category, membership, note, quantity, description, price)
self.__order == order
def get_order(self):
return self.__order
def set_order(self, order):
self.__order = order
|
986,327 | 1da31410d725e5615570b388a2d1f58cc901ab89 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Serves content for current directory
#
# Author: Thomas Frössman ( thomasf@jossystem.se / http://thomas.jossystem.se )
# Updates to this file will probably be available at https://github.com/thomasf/dotfiles/
#
import sys
import BaseHTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
import socket
port_range_start = 8500
port_range_end = 8600
HandlerClass = SimpleHTTPRequestHandler
ServerClass = BaseHTTPServer.HTTPServer
Protocol = "HTTP/1.0"
#Host = '127.0.0.1'
Host = '0.0.0.0'
if sys.argv[1:]:
port = int(sys.argv[1])
port_range_start=port
port_range_end=port
connected=False
port=port_range_start
httpd = None
HandlerClass.protocol_version = Protocol
while (not connected and port <= port_range_end):
try:
server_address = (Host, port)
httpd = ServerClass(server_address, HandlerClass)
connected=True
except socket.error,e:
if port is port_range_end:
print "port " + str(port) + " is already bound, trying +1"
else:
print "port " + str(port) + " is already bound, exitting"
port=port+1
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
|
986,328 | a3c5802f7977299b3fe4afa0a3dc380081caa4ed | from django.contrib import admin
from .models import Reporter
from .models import Statement
from .models import Tip
from .models import Photo
admin.site.register(Reporter)
admin.site.register(Statement)
admin.site.register(Tip)
admin.site.register(Photo)
|
986,329 | 9a14ee0beb4383df99adca4118784c09a6f47e04 | ## Script created by Jenny Holder, Innovate! Inc. November 2016
##
## Had to install pypyodbc, requests and copied here:
## C:\Python27\ArcGISx6410.4\Lib\site-packages
import arcpy, sets, pypyodbc, requests, ftfy
## Create connection to SQL Server database and open a cursor
#connection = pypyodbc.connect('Driver={SQL Server Native Client 11.0};' 'Server=10.15.230.244\dev;' 'Database=Salesforce_Data;' 'uid=jenny.holder;pwd=crs4fun')
connection = pypyodbc.connect('Driver={SQL Server Native Client 11.0};' 'Server=10.15.30.186;' 'Database=Salesforce_Data;' 'uid=sf_intregrationadmin;pwd=JetterbitCRS')
pyCursor = connection.cursor()
print "Made connection."
## Point to the sde connection
#arcpy.env.workspace = "C:\Users\jenny.holder\AppData\Roaming\Esri\Desktop10.4\ArcCatalog\Salesforce_Data (dev).sde"
#arcpy.env.workspace = "C:\Users\jenny.holder\AppData\Roaming\Esri\Desktop10.4\ArcCatalog\Connection to 10.15.30.186.sde"
arcpy.env.workspace = "D:\Salesforce_Data\Salesforce_Data.sde"
## Create set of current addresses to match against
lookupSet = set()
lookupTable = 'Salesforce_Data.dbo.IndividualsFC'
lookupFields = ["Address"]
with arcpy.da.SearchCursor(lookupTable, lookupFields) as lCursor:
for row in lCursor:
address = row[0].encode("utf8")
lookupSet.add(address)
print "Created Individuals Address Lookup Set."
lookupSetID = set()
lookupTableID = 'Salesforce_Data.dbo.IndividualsFC'
lookupFieldsID = ["ID"]
with arcpy.da.SearchCursor(lookupTableID, lookupFieldsID) as lCursor:
for row in lCursor:
sfID = row[0].encode("utf8")
lookupSetID.add(sfID)
print "Created Individuals ID Lookup Set."
fd = 'Salesforce_Data.dbo.vIndividualsAddress'
fieldNames = [f.name for f in arcpy.ListFields(fd)]
print fieldNames
with arcpy.da.SearchCursor(fd, fieldNames) as sCursor:
for row in sCursor:
## For each record in the view:
# Some rows have apostrophes to take care of
# Create clean version of Address field to insert back into database
if row[0] is None:
cleanAddress = '-'
else:
cleanAddress = ftfy.fix_text(row[0])
cleanAddress = cleanAddress.replace("'", "''").rstrip()
# Create clean version of Name field to insert back into database
if row[2] is None:
cleanName = ''
print cleanName
else:
cleanName = ftfy.fix_text(row[2])
#print cleanName + "--ftfy"
cleanName = cleanName.replace("'", "''").rstrip()
#print cleanName + " -- after"
if cleanAddress in lookupSet and row[1] in lookupSetID:
pass
print "Passed " + cleanAddress
else:
#try:
# First field in each view is the concatenated address
# Use address to geocode
# If the address does not already exist, check to also see if the Salesforce record was simply updated
# If the SF record exists, delete the old one from FC so new data can be written to the SF ID
findExisting = "If exists (Select * from INDIVIDUALSFC where id = '" + row[1] + "') Delete from INDIVIDUALSFC where ID = '" + row[1] + "'"
pyCursor.execute(findExisting)
connection.commit()
response = requests.get('http://geocode.arcgis.com/arcgis/rest/services/World/GeocodeServer/find',
{'text': cleanAddress,
'f': 'json',
'outSR': '3857'})
response = response.json()
print response
if len(response['locations']) == 0:
## Write to output about it
## Insert errors into "error" type
## This means there is no match for the address
score = 0
insertFields = "'" + cleanName + "', '" + str(row[1]) + "', '" + cleanAddress + "', '" + str(fd) + "', '" + str(score) + "'"
outFields = 'Name, Id, Address, TableName, Score'
sqlString = "Use Salesforce_Data Insert into GeocodeErrors(" + outFields + ") values (" + insertFields + ")"
print "Put " + cleanName + " in errors."
pyCursor.execute(sqlString)
connection.commit()
else:
# A location is returned and assigned x/y, but
# Score is no longer being returned by the geocoding services
# Score now entered into the database is arbitrary, for the time being
#score = response['locations'][0]['feature']['attributes']['Score']
x = response['locations'][0]['feature']['geometry']['x']
y = response['locations'][0]['feature']['geometry']['y']
score = ''
#attributes = response['locations'][0]['feature']['attributes']
#print attributes
#addressType = response['locations'][0]['feature']['attributes']['Addr_Type']
## if score <= 79.99:
## insertFields = "'" + cleanName + "', '" + str(row[1]) + "', '" + cleanAddress + "', '" + str(fd) + "', '" + str(score) + "'"
## outFields = 'Name, Id, Address, TableName, Score'
## sqlString = "Use Salesforce_Data Insert into GeocodeErrors(" + outFields + ") values (" + insertFields + ")"
## pyCursor.execute(sqlString)
## connection.commit()
## else:
## Create shape value from x,y point
location = "geometry::STPointFromText('Point (" + str(x) + " " + str(y) + ")', 3857)"
#print location
## Find diocese the point is in
intersectString = "Select Name_other from USDIOCESESAGOL where Shape.STContains(" + location + ") = 1"
#print intersectString
pyCursor.execute(intersectString)
try:
for d in pyCursor.fetchone():
diocese = d
#print diocese
except:
diocese = ''
#print diocese
## Find region the point is in
intersectString = "Select RegionName from USREGIONSAGOL where Shape.STContains(" + location + ") = 1"
#print intersectString
pyCursor.execute(intersectString)
try:
for r in pyCursor.fetchone():
region = r
#print region
except:
region = ''
#print region
## Find congressional district the point is in
intersectString = "Select UniqueID from USCONGRESSIONALDISTRICTSAGOL where Shape.STContains(" + location + ") = 1"
#print intersectString
pyCursor.execute(intersectString)
try:
for c in pyCursor.fetchone():
congDist = str(c)
#print congDist
except:
congDist = ''
#print congDist
## Find US state the point is in
intersectString = "Select STATE_NAME from USSTATESAGOL where Shape.STContains(" + location + ") = 1"
#print intersectString
pyCursor.execute(intersectString)
try:
for s in pyCursor.fetchone():
usState = str(s)
except:
usState = ''
## Find table to put output of individuals
try:
print "Start adding to Individuals."
if row[3] is None:
titlec = ''
else:
titlec = ftfy.fix_text(row[3])
titlec = titlec.replace("'", "''").rstrip()
## Determine if Individual is Grassroots Supporter
gpString = "Select ID from Subscriptions where Subscriber__c = '" + str(row[1]) + "'"
#print gpString
pyCursor.execute(gpString)
try:
for c in pyCursor.fetchone():
#print c
ccgp = 'Yes'
#print ccgp
except:
ccgp = 'No'
#print ccgp
## Determine if Individual is Grassroots Action Taker
atString = "Select ID from vActionTakers where Response_From__c = '" + str(row[1]) + "'"
#print atString
pyCursor.execute(atString)
try:
for a in pyCursor.fetchone():
#print a
grat = 'Yes'
#print grat
except:
grat = 'No'
#print grat
## Determine if Individual is Parish Ambassador
pacString = "Select ID from Relationships where From_Individual__c = '" + str(row[1]) + "' and Second_Relationship_type__c like '%Parish Ambassador%'"
#print pacString
pyCursor.execute(pacString)
try:
for p in pyCursor.fetchone():
#print p
pac = 'Yes'
#print pac
except:
pac = 'No'
#print pac
## Find next Object ID to continue incrementing
pyCursor.execute("DECLARE @myval int EXEC dbo.next_rowid 'dbo', 'IndividualsFC', @myval OUTPUT SELECT @myval")
for thisrow in pyCursor.fetchall():
nextID = thisrow[0]
#print nextID
insertFields = str(nextID) + ", '" + cleanAddress + "', '" + str(row[1]) + "', '" + cleanName + "', '" + titlec + "', '" + str(row[4]) + "', '" + str(row[5]) + "', " + location + ", '" + diocese + "', '" + region + "', '" + congDist + "', '" + str(row[6]) + "', '" + str(row[7]) + "', '" + ccgp + "', '" + grat + "', '" + pac + "', '" + usState + "'"
outFields = 'OBJECTID, Address, Id, Name, Title__c, AccountID, Relationship_Types__c, Shape, Diocese, Region, CongressionalDistrict, kw__Chamber__c, kw__StateOfCoverage__c, GrassrootsSupporter, ActionTaker, ParishAmbassador, USState'
sqlString = "Use Salesforce_Data Insert into INDIVIDUALSFC(" + outFields + ") values (" + insertFields + ")"
#print sqlString
print "Inserting " + cleanName + " into Individuals table."
pyCursor.execute(sqlString)
connection.commit()
except:
#print "Found error in fields other than name and address."
print "Adding to GeocodeErrors table now..."
score = 0
insertFields = "'" + cleanName + "', '" + str(row[1]) + "', '" + cleanAddress + "', '" + str(fd) + "', '" + str(score) + "'"
outFields = 'Name, Id, Address, TableName, Score'
sqlString = "Use Salesforce_Data Insert into GeocodeErrors(" + outFields + ") values (" + insertFields + ")"
#print sqlString
pyCursor.execute(sqlString)
connection.commit()
## Close/delete the cursor and the connection
pyCursor.close()
del pyCursor
connection.close()
print "Individuals geocoding complete!"
|
986,330 | 5c02d5a74715827c62bf0091c0464c24c984fa5d | import tensorflow as tf
import keras
new_model = tf.keras.models.load_model('keras_model_test.model')
predictions = new_model.predict([[153,256,99,0,86,65]])
print(predictions)
|
986,331 | 08c79da664edb6aff20a70ad0f8d84d09e92944b | '''
Created on 25.09.2013
@author: Sven, Christian, Collin
Just a small test utility to initialize a game and make some moves
'''
from TEAMengine import NoTipping
import pdb
import sys
A = NoTipping()
A.display()
print A.to_move
print A.phase
Player_before = -1
#pdb.set_trace()
while True:
if A.to_move == 1:
move = A.magic_alphabeta_search()
if(Player_before == A.to_move):
print str(A.to_move)
print str(move[0])+ " " + str(move[1])
raw_input()
# x = raw_input()
# y = x.split(" ")
# move = (int(y[0]), int(y[1]))
A = A.make_move(move)
Player_before = A.to_move
A.display()
if A.board.tipped():
break
else:
move = A.magic_alphabeta_search()
if(Player_before == A.to_move):
print str(A.to_move)
print str(move[0])+ " " + str(move[1])
raw_input()
# x = raw_input()
# y = x.split(" ")
# move = (int(y[0]), int(y[1]))
A = A.make_move(move)
Player_before = A.to_move
A.display()
if A.board.tipped():
break
if(A.to_move==1):
print "Player "+str(A.to_move)+" wins"
else:
print "Player "+str(A.to_move)+" wins"
|
986,332 | 076f2488504981643e90107ef8ba94963f2d43ec | from collections import deque
def find_set(x):
if parent[x] != x:
parent[x] = find_set(parent[x])
return parent[x]
def make_set(x):
parent[x] = x
rank[x] = 0
def union(x, y):
root1 = find_set(x)
root2 = find_set(y)
if root1 != root2:
if rank[root1] > rank[root2]:
parent[root2] = root1
else:
parent[root1] = root2
if rank[root1] == rank[root2]:
rank[root2] += 1
for tc in range(1, int(input())+1):
n = int(input())
xs = list(map(int, input().split()))
ys = list(map(int, input().split()))
e = float(input())
parent = {}
rank = {}
graph = deque()
for i in range(n):
make_set(i)
for i in range(n-1):
for j in range(i+1, n):
graph.append((i, j, (((xs[i]-xs[j])**2)+((ys[i]-ys[j])**2))*e))
graph = sorted(graph, key= lambda x: x[2])
mst = set()
ans = 0
while len(mst) < n-1:
(n1, n2, cost) = graph.popleft()
if find_set(n1) != find_set(n2):
union(n1, n2)
mst.add((n1, n2))
ans += cost
print(f'#{tc} {round(ans)}')
|
986,333 | 91b1a168591d56266bf7d8688e4fdf2a6dcbcc2c | import main
crypto = main.get_platform_command_code('telegram', 'crypto')
def run(bot, chat_id, user, keyConfig, message, totalResults=1):
return crypto.run(bot, chat_id, user, keyConfig, message, totalResults)
|
986,334 | 7596504b94e790377b3c6af8de579a3346fd2f32 | #import modules
import redis
r = redis.Redis(host="192.168.2.8", port=6379, db=0, password="password")
r.set("openTrades", "4")
r.set("simulatedSum", "0.12")
r.set("sumResult", "0.01")
hello = "yeye" + r.get('openTrades').decode('utf-8')
print(hello)
|
986,335 | 54e6842cdb40522c8a002e71e850c714e5a441bd | import utmpaccess
import utmp
from UTMPCONST import *
import time
from netaddr import *
import collectd
PLUGIN_NAME = 'session'
collectd.debug('session : Loading Python Plugin' +PLUGIN_NAME)
Domains= None
def config_func(config):
global domains
domains = []
for node in config.children:
key = node.key
if key == 'Domains':
domains.extend(node.values)
collectd.debug('Domains is: %s' % ', '.join(domains) )
def read_func():
total=0
domain_counter = {}
for d in domains:
domain_counter[d] = 0
records = utmp.UtmpRecord()
for rec in records:
if rec.ut_type == USER_PROCESS:
(rec.ut_user, rec.ut_line, rec.ut_pid,
rec.ut_host, time.ctime(rec.ut_tv[0]))
host=rec.ut_host
for d in domains:
collectd.debug("HERE: %s %s" % (host,d))
if d in host and host.endswith(d)==True :
collectd.debug('Matches')
domain_counter[d] = domain_counter[d] + 1
total = total + 1
records.endutent()
datapoint = collectd.Values(plugin='sessions',)
datapoint.type = 'count'
datapoint.type_instance = 'total_sessions'
datapoint.values = [total]
collectd.debug('Dispatching a value of %s for total sessions' % total)
datapoint.dispatch()
for d in domains:
datapoint = collectd.Values(plugin='sessions',)
datapoint.type = 'count'
datapoint.type_instance = d
datapoint.values = [domain_counter[d]]
collectd.debug('Dispatching a value of %s for domain sessions %s' % (domain_counter[d], d))
datapoint.dispatch()
collectd.register_config(config_func)
collectd.register_read(read_func)
|
986,336 | 6d88326ea1afbea5f8d34a4c6b552d57b09df198 | from django.shortcuts import render
from . import util
from markdown2 import Markdown
from django import forms
from django.urls import reverse
from django.http import HttpResponseRedirect
markdowner = Markdown()
# display a list of all wiki entries
def index(request):
return render(request, "encyclopedia/index.html", {
"entries": util.list_entries(),
})
# display a wiki entry
def display_entry(request, entry):
entry_markdown = util.get_entry(entry)
if entry_markdown is None:
return render(request, "encyclopedia/entry.html", {"entry_html": "<h1>That entry does not exist.</h1>"})
entry_html = markdowner.convert(entry_markdown)
return render(request, "encyclopedia/entry.html", {"entry_html": entry_html, "title": entry})
|
986,337 | dfc1369f98a1abe0d22e28942a51013cf2588ca1 | from django.apps import AppConfig
class RockclimbersConfig(AppConfig):
name = 'rockClimbers'
|
986,338 | fe9fd5508d80ea734191ac21fe0d3477d428d20d | from math import sqrt
def puzzles_reader(input_file):
puzzle_list = list()
puzzleId = 1
with open(input_file, 'r') as file:
for each in file:
puzzle = dict()
parts = (str(each).split(' ', 4))
puzzle['size'] = int(parts[0])
puzzle['max_d'] = int(parts[1])
puzzle['max_l'] = int(parts[2])
puzzle['board'] = parts[3].strip()
puzzle['is_solved'] = False
puzzle['id'] = puzzleId
puzzleId += 1
puzzle_list.append(puzzle)
return puzzle_list
def write_to_file(prefix, visited, path):
solution_file = open(prefix + "_solution.txt", 'w')
search_file = open(prefix + "_search.txt", 'w')
if len(path) == 0:
solution_file.write('no solution')
else:
for step in path:
solution_file.write(step + '\n')
solution_file.close()
for step in visited:
search_file.write(step + '\n')
search_file.close()
def is_win(board_state):
parts = [int(dot) for dot in board_state]
if sum(parts) == 0:
return True
else:
return False
def flip_single(index, board):
parts = [int(dot) for dot in board]
if index >= (len(parts)) or index < 0:
return board
else:
parts[index] = 1 - parts[index]
return ''.join([str(dot) for dot in parts])
def flip_dot(index, puzzle, board):
current_board = flip_single(index, board)
leftp = index % puzzle['size']
if not leftp == 0:
current_board = flip_single(index - 1, current_board)
rightp = index % puzzle['size']
if not rightp == puzzle['size'] - 1:
current_board = flip_single(index + 1, current_board)
current_board = flip_single(index + int(puzzle['size']), current_board)
current_board = flip_single(index - int(puzzle['size']), current_board)
return current_board
def get_firstzero_index(board):
if str(board).find('0') < 0:
return len(str(board)) + 1
else:
return str(board).find('0')
def get_num_of_zero_weighted(board, size):
return 0
def get_num_of_zero(board):
num = 0
for ch in board:
if ch == '0':
num += 1
return num
def get_connected_islands_count(board, puzzle_size):
num = 0
board_arr = [int(dot) for dot in board]
for x in board_arr:
if x == 1:
num += 1
sink(board_arr, x, puzzle_size)
return num
def sink(board_arr, index, puzzle_size):
if board_arr[index] == '1':
board_arr[index] = '0'
for i in range(4):
neighbour = get_neighbour_index(0, index, puzzle_size)
if neighbour > -1:
sink(board_arr, neighbour, puzzle_size)
def format_index(index, board_size):
if index is None:
return "00"
return chr(int(index / board_size) + 65) + str(int(index) % int(board_size))
# 0,1,2,3 => top, right, bottom, left
def get_neighbour_index(direction, index, puzzle_size):
column_position = index % puzzle_size
val = -1
if direction == 0:
if index - puzzle_size >= 0:
val = index - puzzle_size
elif direction == 1:
if column_position < puzzle_size - 1:
val = index + 1
elif direction == 2:
val = index + puzzle_size
if val > puzzle_size * puzzle_size - 1:
return -1
elif direction == 3:
if column_position > 0:
val = index + 1
if val > 0:
return val
return -1
def compute_h_simple(board, puzzle_size):
return -get_num_of_zero(board)*2 + get_connected_islands_count(board, puzzle_size)
def write_to_file_node(prefix, size, node, search_path):
solution_file = open(prefix + "_solution.txt", 'w')
search_file = open(prefix + "_search.txt", 'w')
print("path length: ", len(search_path))
current_node = node
arr = []
while True:
if current_node is None:
break
arr.insert(0, current_node)
current_node = current_node.previous_node
if len(arr) == 0:
solution_file.write('no solution')
print('>> No Solution\n')
else:
print('>> Solved\n')
for s in arr:
solution_file.write(format_index(s.previous_index, size) + " " + s.board_state + '\n')
for step in search_path:
search_file.write(str(step.f) + " " + str(step.g) + " " + str(step.h) + " " + step.board_state + '\n')
solution_file.close()
search_file.close()
class Node:
board_state = None
previous_node = None
previous_index = 0
previous_index_formated = None
h = 0
g = 0
f = 0
def __init__(self, h, board_state, previous_node, previous_index, is_astar=False):
self.board_state = board_state
self.previous_node = previous_node
self.previous_index = previous_index
self.previous_index_formated = format_index(previous_index, sqrt(len(board_state)))
self.h = h
if is_astar and previous_node is not None:
self.g = previous_node.g + 1
self.f = h + self.g
else:
self.f = h
def __lt__(self, other):
if self.f == other.f:
return get_firstzero_index(self.board_state) < get_firstzero_index(other.board_state)
return self.f < other.f
|
986,339 | 626091793a089011025bd77f7e58db1e6825ef4e | # Generated by Django 2.2 on 2019-05-10 04:02
from django.db import migrations, models
import exchange.util
class Migration(migrations.Migration):
dependencies = [
('exchange_app', '0019_trader_credit_limit'),
]
operations = [
migrations.AlterField(
model_name='order',
name='expiration',
field=models.DateTimeField(default=exchange.util.long_time_in_future),
),
]
|
986,340 | 9c439805f818b2120a73622f985c9fa9cebaa817 | import sys
import csv
from collections import OrderedDict
def main():
# Produce error message if incorrect number of inputs passed in
if len(sys.argv) != 3:
print("Error. Incorrect Input")
sys.exit(1)
# Read the database and the individual sample files as lists
dbReader = list(csv.reader(open(sys.argv[1])))
sampleReader = list(csv.reader(open(sys.argv[2])))
# Create list to keep track of number of sequence occurences
strCount = []
# Iterate through the database fieldNames
for sequence in dbReader[0][1:]:
# Append output from frequencyCount function to strCount dictionary
strCount.append(str(frequencyCount(str(sampleReader[0]), sequence)))
# If sequence count matches database, then print name of individual
for row in dbReader[1:]:
if row[1:] == strCount:
print(row[0])
sys.exit(0)
# Else, no match
print("No match")
def frequencyCount(mainStr, subStr):
"""
# Input: Takes an individuals' dna sample and an str pattern
# Output: Number of times the sample appears consecutively in the mainStr
"""
# Initialize count variables and find the first occurance of subStr at index pos
count = maxCount = 0
pos = mainStr.find(subStr)
# If subStr not found, return 0
if pos == -1:
return count
count += 1
# Find maximum number of consecutive occurances
while(True):
# If count > maxCount, then maxCount = count
if count > maxCount:
maxCount = count
# Continue iterating to the next string
newPos = mainStr.find(subStr, pos + 1)
# If no match found, return current maxCount value
if newPos == -1:
return maxCount
# If newPos is consecutively next from pos, add to count
elif newPos == pos + len(subStr):
count += 1
# print(count)
# Otherwise, set count back to 1
else:
count = 1
# Update pos to newPos
pos = newPos
# i = newPos
return maxCount # This is probably redundant
main()
|
986,341 | 5327e9838c35c8b2362c952ee25d03f99c4d92e0 | # curl -O -L ftp://ftp.cyrusimap.org/cyrus-sasl/cyrus-sasl-2.1.26.tar.gz
# tar xzf cyrus-sasl-2.1.26.tar.gz
# cd cyrus-sasl-2.1.26
# sudo ./configure && sudo make install
# sudo pip install sasl
# sudo pip install pyhive
# sudo pip install pycrypto
import sys
from pyhive import hive
from Crypto.Cipher import AES
import base64
encrypter = AES.new('<16 char key>', AES.MODE_CBC, '<16 char IV>')
conn = hive.Connection(host = "<host>", port = 10000, username = "<user>")
cursor = conn.cursor()
cursor.execute("Select * from <table>")
inserter = conn.cursor()
for result in cursor.fetchall():
print('id is ' + str(result[0]))
encrypted_ssn = encrypter.encrypt(result[1]+'xxxxx') #padding
print(encrypted_ssn)
print('^ encrypted ssn')
encoded = base64.b64encode(encrypted_ssn)
print(encoded)
print('^ encoded encrypted ssn')
ins_statement = "Insert into table <table> values (" + str( result[0]) + " ,'" + encoded + "')"
inserter.execute(ins_statement)
|
986,342 | bb449fb3ac177bf29e63f9afff3704abfc6f77ef | #!/usr/bin/env python3
"""
Lesson 3: String Format Lab
Course: UW PY210
Author: Jason Jenkins
"""
def format_string_1(file_num, float_num1, int_num, float_num2):
"""
Task 1
Format the string
:param fileNum: value a filenumber
:param floatNum1: display it with 2 decimal places
:param intNum: display in scientific notation, with 2 decimal places
:param floatNum2: display in scientific notation with 3 sig fig
"""
text = "file_{:0>3d} :{:9.2f}, {:.2e}, {:.3g}"
result = text.format(file_num, float_num1, int_num, float_num2)
print(result)
return result
def format_string_2(file_num, float_num1, int_num, float_num2):
"""
Task 2
Format the string
:param fileNum: value a filenumber
:param floatNum1: display it with 2 decimal places
:param intNum: display in scientific notation, with 2 decimal places
:param floatNum2: display in scientific notation with 3 sig fig
"""
result = f"file_{file_num:0>3d} :{float_num1:9.2f}, {int_num:.2e}, {float_num2:.3g}"
print(result)
return result
def format_string_3(*tmp_tuple):
"""
Rewrite a tumpe to the following
the {total items} numbers are: {num1}, {num2}, ...
:param tmp_tuple: tuple to be converted to string
"""
total_items = len(tmp_tuple)
formated_string = "the {} numbers are: "
formated_string += ", ".join(["{}"] * total_items)
result = formated_string.format(total_items, *tmp_tuple)
print(result)
return result
def format_string_4(*tmp_tuple):
"""
Given a 5 element tuple: ( 4, 30, 2017, 2, 27)
use string formating to print: '02 27 2017 04 30'
:param tmp_tuple: tuple to be converted to string
"""
result = f"{tmp_tuple[3]:0>2d} {tmp_tuple[4]:0>2d} "
result += f"{tmp_tuple[2]:0>4d} {tmp_tuple[0]:0>2d} "
result += f"{tmp_tuple[1]:0>2d}"
print(result)
return result
def format_string_5(tmp_list=[]):
"""
Given the following four element list: ['oranges', 1.3, 'lemons', 1.1]
Write an f-string that will display:
The weight of an orange is 1.3 and the weight of a lemon is 1.1
but change to display the names of the fruit in upper case,
and the weight 20% higher (that is 1.2 times higher)
:param tmp_list: list to be converted to string
"""
result = f"The weight of an {tmp_list[0][:-1].upper()} is {tmp_list[1] * 1.2} and "
result += f"the weight of a {tmp_list[2][:-1].upper()} is {tmp_list[3] * 1.2}"
print(result)
return result
def format_string_6(*tmp_tuple):
"""
Part 1:
Print a table of several rows, each with a name, an age and a cost.
Make sure some of the costs are in the hundreds and thousands to test
your alignment specifiers.
Part 2:
Given a tuple with 10 consecutive numbers,
can you work how to quickly print the tuple in columns that are 5
charaters wide?
:param tmp_list: list to be converted to string
"""
col1 = "Name"
col2 = "Age"
col3 = "Cost"
# Part 1
basic_bourbon = ["Basicbourbon", "5", "25"]
nice_bourbon = ["Nicebourbon", "20", "213"]
wow_bourbon = ["Wowbourbon", "123", "2353"]
old_bourbon = ["oldbourbon", "1323", "23135"]
result = f"{col1:20} {col2:10} {col3:10}\n"
result += f"{basic_bourbon[0]:20} {basic_bourbon[1]:10} {basic_bourbon[2]:10}\n"
result += f"{nice_bourbon[0]:20} {nice_bourbon[1]:10} {nice_bourbon[2]:10}\n"
result += f"{wow_bourbon[0]:20} {wow_bourbon[1]:10} {wow_bourbon[2]:10}\n"
result += f"{old_bourbon[0]:20} {old_bourbon[1]:10} {old_bourbon[2]:10}\n"
print(result)
# Part 2 (Couldn't figure out how to do if using f"")
result = "".join(["{:5}"] * len(tmp_tuple)).format(*tmp_tuple)
print(result)
return(result)
if __name__ == "__main__":
# testing
result = format_string_1(2, 123.4567, 10000, 12345.67)
assert result == 'file_002 : 123.46, 1.00e+04, 1.23e+04'
result = format_string_1(53, 1233.464327, 1012432000, 153345.6417)
assert result == 'file_053 : 1233.46, 1.01e+09, 1.53e+05'
result = format_string_2(2, 123.4567, 10000, 12345.67)
assert result == 'file_002 : 123.46, 1.00e+04, 1.23e+04'
result = format_string_2(53, 1233.464327, 1012432000, 153345.6417)
assert result == 'file_053 : 1233.46, 1.01e+09, 1.53e+05'
assert format_string_3(2, 3, 5) == 'the 3 numbers are: 2, 3, 5'
assert format_string_3(2, 3, 5, 7, 9) == 'the 5 numbers are: 2, 3, 5, 7, 9'
assert format_string_4(4, 30, 2017, 2, 27) == '02 27 2017 04 30'
result = format_string_5(['oranges', 1.3, 'lemons', 1.1])
expected = "The weight of an ORANGE is 1.56 and the weight of a LEMON is 1.32"
assert result == expected
format_string_6(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
assert format_string_6(1, 11, 111, 1111) == ' 1 11 111 1111'
print()
print("All Tests Pass")
|
986,343 | 448ef5f26146ac8b99622a9636440d77ddb5990d | # Problem 298
# Easy
# Asked by Google
#
# A girl is walking along an apple orchard with a bag in each hand. She likes to pick
# apples from each tree as she goes along, but is meticulous about not putting
# different kinds of apples in the same bag.
#
# Given an input describing the types of apples she will pass on her path, in order,
# determine the length of the longest portion of her path that consists of just two
# types of apple trees.
#
# For example, given the input [2, 1, 2, 3, 3, 1, 3, 5], the longest portion will
# involve types 1 and 3, with a length of four.
# |
986,344 | c56d5af94251ac0e0b9608034a63cc2a9c2a8a42 | class ConfigModel():
pass |
986,345 | 0c23806ba512002eb6c7d9a2eddfb2c88270dcbe | from pydantic import BaseModel
from typing import List
import inspect
from uuid import uuid4
import json
from json import JSONEncoder
import requests
from enum import Enum
class MyEnum(str, Enum):
a = "bb"
b = None
class M(BaseModel):
x: List[str]
print(type(MyEnum))
x = 1
assert list(x) == [x]
l = list(MyEnum)
l2 = [MyEnum]
m = M(x=l)
print(MyEnum.b == "None")
x = MyEnum.a
# m = M(x=x)
# print(m)
# print(type(x))
# x = str(x)
# # x = x.replace("b", "a")
# print(type(x))
# print(x)
# print(len(x))
# print(MyEnum.a.__str__())
class A:
def __str__(self):
return "a"
class B:
def __str__(self):
return "b"
class C(A, B):
...
a = C()
print(a)
# x = json.dumps(MyEnum.a)
# print(x)
# requests.post("http://www.example.com", json=MyEnum.a)
|
986,346 | 0ef76c4d514b63a69c8ca3ff5020278148e3a634 | import os
import pandas as pd
import numpy as np
from DataGenerator import DataGenerator
class SanDiskGenerator(DataGenerator):
def __init__(self, path='../SanDisk/', do_standardize=True, take_last_k_cycles=-1, train=True, n_features=11):
self.path = path
if train:
prefix = 'tr_'
else:
prefix = 'test_'
df_pos = pd.read_csv(os.path.join(self.path, prefix + 'pos.csv'))
df_neg = pd.read_csv(os.path.join(self.path, prefix + 'neg.csv'))
df_pos = df_pos.drop(columns=['Unnamed: 0', 'PC', 'DUT', 'Bank', 'BLK', 'WL', 'Str'])
df_neg = df_neg.drop(columns=['Unnamed: 0', 'PC', 'DUT', 'Bank', 'BLK', 'WL', 'Str'])
df_pos = df_pos.fillna(0) # remove NaN values.
df_neg = df_neg.fillna(0)
n_cycles = int(df_neg.shape[1] / n_features)
# check where is the first cycle that the prog_status_cyc is '1' (which means failure)...
df_neg_status_prog = np.array([df_neg['Prog_Status_cyc_{}'.format(i)].values for i in range(1, n_cycles)]).transpose()
# generate sequence lengths, for the failed sequences it's computed above and for the
# positive sequences it's randomized.
self.seqlen = np.concatenate((np.argmax(df_neg_status_prog, axis=1),
np.random.randint(n_cycles, size=len(df_pos))))
# take each row and convert it from n_cycles*n_features to an array with shape (n_cycles, n_features)
data_neg = [df_neg.iloc[i].values.reshape(n_cycles, n_features) for i in range(len(df_neg))]
data_pos = [df_pos.iloc[i].values.reshape(n_cycles, n_features) for i in range(len(df_pos))]
# stack the positive and negative exampels and create their labels.
self.data = np.array(data_neg + data_pos)
self.labels = np.array([[0, 1]]*len(data_neg) + [[1, 0]]*len(data_pos))
# we must shuffle the data since it's all the negative followed by all the positive...
p = np.random.permutation(self.data.shape[0])
self.data = self.data[p]
self.labels = self.labels[p]
self.seqlen = self.seqlen[p]
self.batch_id = 0
def next(self, batch_size=np.inf):
""" Return a batch of data. When dataset end is reached, start over.
"""
if self.batch_id == len(self.data):
self.batch_id = 0
# shuffle the data each pass over it.
rng_state = np.random.get_state()
np.random.shuffle(self.data)
np.random.set_state(rng_state)
np.random.shuffle(self.labels)
end_idx = min(self.batch_id + batch_size, len(self.data))
batch_data = (self.data[self.batch_id:end_idx])
batch_labels = self.labels[self.batch_id:end_idx]
batch_seqlen = (self.seqlen[self.batch_id:end_idx])
self.batch_id = end_idx
return batch_data, batch_labels, batch_seqlen
if __name__=='__main__':
sample = SanDiskGenerator()
# sample_test = PointCloudGenerator('../PointClouds/ModelNet40_cloud.h5', mode='test')
data, labels, seqlens = sample.next(1)
print(data)
# print(len(sample_test.data))
# for i in range(5):
# batch_data, batch_labels = sample.next(10)
# print(batch_data.shape)
# print(batch_labels.shape)
|
986,347 | 69cb6a052352dd434d71c7da3d25a03a1a81326f | from random import *
offline = randint(4, 28)
print("오프라인 스터디 모임 날짜는 매월 " + str(offline) + " 일로 선정되었습니다.\n")
|
986,348 | 53914881c6228b20088fe28f6b70c93864895981 | from rl.env.random_jump import RandomJumpEnv
from rl.standalone.tf.gaussian_policy_tf import GaussianPolicyTF
from rl.standalone.tf.value_estimator_tf import ValueEstimatorTF
from rl.featurizer.rbf_featurizer import RBFFeaturizer
#
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import itertools
from collections import namedtuple
EpisodeStats = namedtuple("Stats", [ "episode_rewards"])
Transition = namedtuple("Transition", ["state", "action", "reward", "next_state"])
def actor_critic(env, policy_estimator, value_estimator, num_episodes, discounted_factor=1.0):
stats = EpisodeStats(episode_rewards=np.zeros(num_episodes))
for i_episode in range(num_episodes):
state = env.reset()
# print(state)
episode = []
for t in itertools.count():
# take a step
action = policy_estimator.predict(state)
# print(action)
next_state, reward, done, _ = env.step(action)
# next_state = next_state[0][0]
#
episode.append(Transition(state=state,
action=action,
reward=reward,
next_state=next_state))
#
stats.episode_rewards[i_episode] += reward
# Calculate TD Target
# print(state, next_state)
value_next = value_estimator.predict(next_state)
td_target = reward + discounted_factor * value_next
td_error = td_target - value_estimator.predict(state)
# upate the value estimator
value_estimator.update(state, td_target)
# Update the policy estimator
policy_estimator.update(state, td_error, action)
# Print out which step we're on, useful for debugging.
print("\rStep {} @ Episode {}/{} ({})".format(
t, i_episode + 1, num_episodes, stats.episode_rewards[i_episode - 1]), end="")
#
if done or t>1000:
break
state = next_state
print("")
return stats
#
env = RandomJumpEnv()
print("Action space : ", env.action_space)
print("Action space low : ", env.action_space.low[0])
print("Action space high: ", env.action_space.high[0])
print("Observation space : ", env.observation_space)
print("Observation space low : ", env.observation_space.low[0])
print("Observation space high: ", env.observation_space.high[0])
#
num_trails = 5
num_episodes = 100
mean_rewards = np.zeros(shape=(num_trails, num_episodes))
for i in range(num_trails):
tf.reset_default_graph()
global_step = tf.Variable(0, name="global_step", trainable=False)
rbf_featurizer = RBFFeaturizer(env=env, dim_features=20)
policy_estimator = GaussianPolicyTF(env, rbf_featurizer, learning_rate=0.0001)
value_estimator = ValueEstimatorTF(env, rbf_featurizer, learning_rate=0.01)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
stats = actor_critic(env, policy_estimator, value_estimator, num_episodes, discounted_factor=0.95)
mean_rewards[i, :] = stats.episode_rewards
sess.close()
fig = plt.figure()
plt.hold('on')
r_mean = np.mean(mean_rewards,axis=0)
r_std = np.std(mean_rewards, axis=0)
plt.fill_between(range(num_episodes), r_mean - r_std, r_mean + r_std, alpha=0.3)
plt.plot(range(num_episodes), r_mean)
plt.legend(loc='lower right')
plt.show()
|
986,349 | d81b027a0d3efddb4c224bff88a3a63ef3eaceeb | #!/bin/python3
from motor.model.pwm_motor import PWMMotor
class PWMFourWheelDrive:
def __init__(self, pin_1, pin_2, pin_en_1, pin_3, pin_4, pin_en_2, pin_5, pin_6, pin_en_3, pin_7, pin_8, pin_en_4):
self.motor_f_r = PWMMotor(pin_1, pin_2, pin_en_1)
self.motor_f_l = PWMMotor(pin_3, pin_4, pin_en_2)
self.motor_b_l = PWMMotor(pin_5, pin_6, pin_en_3)
self.motor_b_r = PWMMotor(pin_7, pin_8, pin_en_4)
self.speed_x_val = 0
self.speed_y_val = 0
def forward(self, speed=1):
print('forward')
self.motor_f_r.forward(speed)
self.motor_f_l.forward(speed)
self.motor_b_l.forward(speed)
self.motor_b_r.forward(speed)
def backward(self, speed=1):
print('backward')
self.motor_f_r.backward(speed)
self.motor_f_l.backward(speed)
self.motor_b_l.backward(speed)
self.motor_b_r.backward(speed)
def turn_left(self, speed=1):
print('turn left')
self.motor_f_r.forward(speed)
self.motor_f_l.backward(speed)
self.motor_b_l.backward(speed)
self.motor_b_r.forward(speed)
def turn_right(self, speed=1):
print('turn right')
self.motor_f_r.backward(speed)
self.motor_f_l.forward(speed)
self.motor_b_l.forward(speed)
self.motor_b_r.backward(speed)
def speed_x(self, speed=1):
self.speed_x_val = speed * 0.5
self.flash_speed()
def speed_y(self, speed=1):
self.speed_y_val = speed
self.flash_speed()
def unify_speed(self, speed):
return min(1, max(-1, speed))
def flash_speed(self):
self.motor_f_r.speed(self.unify_speed(self.speed_y_val + self.speed_x_val))
self.motor_f_l.speed(self.unify_speed(self.speed_y_val - self.speed_x_val))
self.motor_b_r.speed(self.unify_speed(self.speed_y_val + self.speed_x_val))
self.motor_b_l.speed(self.unify_speed(self.speed_y_val - self.speed_x_val))
def stop(self):
self.motor_f_r.stop()
self.motor_f_l.stop()
self.motor_b_l.stop()
self.motor_b_r.stop()
|
986,350 | 42a1a880e86def1e0dc62abdfc5335c705fdf436 | # Generated by Django 2.2.12 on 2020-08-18 14:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hogist', '0005_auto_20200818_1350'),
]
operations = [
migrations.AddField(
model_name='useripdetails',
name='time',
field=models.DateTimeField(null=True),
),
]
|
986,351 | 4da9edc4bef7ec679c25a3fda1ae863b92832198 | # -*- coding: utf-8 -*-
"""Implementation of the core game loop."""
import random
import logging
import pygame
from airportgame.colors import RED, GREEN
from airportgame.textinput import TextInput
from airportgame.pgtext import PgText
from airportgame.player import Player
from airportgame.airfield import Airfield
from airportgame.flight import Flight
from airportgame.path import EllipticalPathEnsemble
from airportgame.menu import Menu
class Game():
"""
The core of the game.
"""
WINDOW_WIDTH = 800
WINDOW_HEIGHT = 600
BORDER_MARGIN = 60
def __init__(self, skip_name_input=False):
"""
Constructor
"""
# Set up the font used by the game
self.pgtext = PgText("Consolas", 25)
self.clock = pygame.time.Clock()
self.player = None
self.textinput = TextInput(self.pgtext, color=RED)
self.airfield = None
self.max_fps = 60
self.time_since_last_flight_created = 0
self.incoming_flights = []
self.paths = []
self.selected_flight = None
self.selected_runway = None
self.skip_name_input = skip_name_input
self._draw_subpaths = True
self.logger = logging.getLogger(__name__)
# Screen surface that has the size of 800 x 600
self.screen = pygame.display.set_mode((self.WINDOW_WIDTH,
self.WINDOW_HEIGHT))
self.show_main_menu = True
self.menu = Menu(self.pgtext, self.WINDOW_WIDTH, self.WINDOW_HEIGHT)
# Start game loop
self.game_loop()
def game_loop(self):
"""
Main game loop
"""
running = True
while running:
# How much time has passed since the last call (milliseconds)
# Limit FPS to max_fps
elapsed_time = self.clock.tick(self.max_fps)
# Event handling
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
running = False
if (event.type == pygame.KEYDOWN and
event.key == pygame.K_ESCAPE):
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
if self.airfield is not None:
self.airfield.reset_airfield()
elif event.key == pygame.K_s:
self._draw_subpaths = not self._draw_subpaths
if self.player is not None:
# Only do this if game is properly initialized
if event.type == pygame.MOUSEBUTTONUP:
# Select flight
mouse_x, mouse_y = pygame.mouse.get_pos()
flight_under_mouse = self.find_closest_flight_in_range(mouse_x, mouse_y)
runway_under_mouse = self.find_closest_runway_in_range(mouse_x, mouse_y)
if self.selected_flight is None:
self.selected_flight = flight_under_mouse
else:
if runway_under_mouse is not None:
self.selected_runway = runway_under_mouse
self.logger.debug("Runway %d selected", self.selected_runway.number)
self.selected_flight.generate_landing_path(self.selected_runway)
else:
self.selected_runway = None
self.selected_flight = flight_under_mouse
self.logger.debug("Runway deselected")
# Update text input
if self.textinput.is_active:
self.textinput.update(elapsed_time, events)
if self.show_main_menu:
self.menu.update(elapsed_time, events)
self.update(elapsed_time)
self.draw(self.screen)
return
def update(self, elapsed_time):
"""
Update game logic.
"""
if self.show_main_menu:
self.show_main_menu = self.menu.show_menu
# A new player must be created:
elif self.player is None:
if not self.skip_name_input:
if not self.textinput.is_active():
self.textinput.activate()
self.textinput.set_pos(100, 150)
if self.textinput.was_return_pressed():
if self.textinput.get_value():
self.player = Player(self.textinput.get_value())
else:
self.player = Player("I am too important to input a name.")
self.textinput.deactivate()
# TODO: Choose difficulty
else:
self.player = Player("Debug Mode On")
elif self.player and self.airfield is None:
self.airfield = Airfield(offset=self.center_airfield())
self.create_circling_flight_paths()
elif self.player and self.airfield:
# Game is running normally
self.create_flight(elapsed_time)
for flight in self.incoming_flights:
if flight.path is None:
path_num = random.randint(0, len(self.paths) - 1)
flight.set_path(self.paths[path_num])
flight.update(elapsed_time)
self.remove_landed_flights()
return True
def draw(self, screen):
"""Draw the game.
Arguments:
screen {Surface} -- Surface to draw on.
"""
screen.fill(GREEN)
self.pgtext.display_text("AirPortGame", screen)
if self.show_main_menu:
self.menu.draw(screen)
elif self.player is None:
self.pgtext.display_text("Please enter your name: ", screen, 100, 100, RED)
self.textinput.draw(screen)
elif self.player and self.airfield:
self.airfield.draw(screen)
for flight in self.incoming_flights:
flight.draw(screen, draw_subpath=self._draw_subpaths)
if self.selected_flight is not None:
self.selected_flight.draw_selection_box(screen)
if self.selected_runway is not None:
self.selected_runway.draw_selection_circle(screen)
if ((self.selected_flight is not None) and
(self.selected_runway is not None)):
self.selected_flight.draw_path(screen)
for path in self.paths:
path.draw(screen)
self.show_fps(screen)
pygame.display.flip()
def show_fps(self, screen):
"""
Displays the current FPS on screen
"""
fps = self.clock.get_fps()
self.pgtext.display_text("FPS: {0:.2f}".format(fps), screen, 600, 10)
def center_airfield(self):
"""Get the offset coordinates for the Airfield so that it is centered.
Returns:
tuple -- x and y offset coordinates.
"""
x = self.WINDOW_WIDTH / 2 - (Airfield.FIELD_WIDTH / 2)
y = self.WINDOW_HEIGHT / 2 - (Airfield.FIELD_HEIGHT / 2)
return (x, y)
def create_flight(self, elapsed_time):
"""Create a new flight.
Arguments:
elapsed_time {float} -- Time elapsed since last call.
"""
time_limit = 180 * 1000
self.time_since_last_flight_created += elapsed_time
creation_rate = (self.time_since_last_flight_created
/ time_limit)
# Limit creation of new planes when there are too many
if len(self.incoming_flights) > 9:
creation_rate = 0.0005
chance = random.random()
if chance < creation_rate:
self.time_since_last_flight_created = 0
# TODO: Create name for flights
name = ""
x = random.randint(0, self.WINDOW_WIDTH - 1)
y = random.randint(0, self.WINDOW_HEIGHT - 1)
new_flight = Flight(name, None, x=x, y=y)
self.incoming_flights.append(new_flight)
def find_closest_flight_in_range(self, x, y, max_range=10):
"""
Return the flight closest to (x, y) within max_range.
"""
closest_flight = None
closest_distance = max_range
point = pygame.math.Vector2(x, y)
for flight in self.incoming_flights:
distance = point.distance_to(flight.get_pos())
if distance < closest_distance:
closest_distance = distance
closest_flight = flight
return closest_flight
def find_closest_runway_in_range(self, x, y,
max_range=Airfield.MINIMUM_DISTANCE):
"""
Returns the closest runway within max_range.
"""
closest_runway = None
closest_distance = max_range
point = pygame.math.Vector2(x, y)
for runway in self.airfield.get_runways():
distance = point.distance_to(runway.get_start_pos())
if distance < closest_distance:
closest_distance = distance
closest_runway = runway
# DEBUG
if closest_runway is not None:
self.logger.debug("Clicked at: %s, runway #%d at: %s", point,
closest_runway.get_number(),
(closest_runway.get_start_pos()))
return closest_runway
def create_circling_flight_paths(self, n=3):
"""Creates ellipticals paths around the airfield.
Keyword Arguments:
n {int} -- Number of paths. (default: {3})
"""
left_x1 = Game.BORDER_MARGIN
airfield_offset = self.airfield.get_offset()
left_x2 = airfield_offset[0] - Game.BORDER_MARGIN
assert left_x1 < left_x2
right_x1 = (airfield_offset[0] + self.airfield.FIELD_WIDTH
+ Game.BORDER_MARGIN)
right_x2 = Game.WINDOW_WIDTH - Game.BORDER_MARGIN
assert right_x1 < right_x2
top_y1 = Game.BORDER_MARGIN
top_y2 = airfield_offset[1] - Game.BORDER_MARGIN
assert top_y1 < top_y2
bottom_y1 = (airfield_offset[1] + self.airfield.FIELD_HEIGHT
+ Game.BORDER_MARGIN)
bottom_y2 = Game.WINDOW_HEIGHT - Game.BORDER_MARGIN
assert bottom_y1 < bottom_y2
left_dx = (left_x2 - left_x1) / (n - 1)
right_dx = (right_x2 - right_x1) / (n - 1)
top_dy = (top_y2 - top_y1) / (n - 1)
bottom_dy = (bottom_y2 - bottom_y1) / (n - 1)
top_left = pygame.math.Vector2(left_x1, top_y1)
bottom_right = pygame.math.Vector2(right_x2, bottom_y2)
d_top_left = pygame.math.Vector2(left_dx, top_dy)
d_bottom_right = pygame.math.Vector2(right_dx, bottom_dy)
for i in range(n):
xy1 = top_left + i * d_top_left
xy2 = bottom_right - i * d_bottom_right
self.paths.append(EllipticalPathEnsemble(xy1, xy2, circular=True))
def remove_landed_flights(self):
"""Remove all landed flights from lists."""
for i in range(len(self.incoming_flights) -1, -1, -1):
if self.incoming_flights[i].get_status() == Flight.STATUS_LANDED:
if id(self.incoming_flights[i]) == id(self.selected_flight):
self.selected_flight = None
del self.incoming_flights[i]
|
986,352 | bd14f2eeff3e22afa18466e7939d396f1ceda251 | import sys
import csv
import matplotlib.pyplot as plt
from datetime import *
from ggplot import *
import matplotlib.dates as dt
f = open(sys.argv[1])
reader = csv.reader(f,delimiter = ',')
NYPD = {}
TLC = {}
DPR = {}
count = 0
next(reader)
start_date = datetime.strptime('06/01/2013 00:00:00 AM', '%m/%d/%Y %H:%M:%S %p')
for i in reader:
if i[3] == 'NYPD':
date = datetime.strptime(i[1],'%m/%d/%Y %H:%M:%S %p')
delta = date - start_date
count = NYPD.setdefault(int(delta.days), 0)
NYPD[int(delta.days)] = count + 1
elif i[3] == 'TLC':
date = datetime.strptime(i[1],'%m/%d/%Y %H:%M:%S %p')
delta = date - start_date
count = TLC.setdefault(int(delta.days), 0)
TLC[int(delta.days)] = count + 1
elif i[3] == 'DPR':
date = datetime.strptime(i[1],'%m/%d/%Y %H:%M:%S %p')
delta = date - start_date
count = DPR.setdefault(int(delta.days), 0)
DPR[int(delta.days)] = count + 1
X = DPR.keys()
Y = DPR.values()
X1 = []
L = []
fig, ax = plt.subplots()
#print len(X),len(Y)
for i in xrange(0, len(X)):
L.append(start_date + timedelta(days = i))
plt.xticks(X, L, fontsize = 6)
plt.plot(L, Y, 'b', label = 'DPR')
X = NYPD.keys()
Y = NYPD.values()
plt.xticks(X, L, fontsize = 6)
plt.plot(L, Y, 'r', label = 'NYPD')
X = TLC.keys()
Y = TLC.values()
plt.xticks(X, L, fontsize = 6)
plt.plot(L, Y, 'g', label = 'TLC')
plt.legend(loc = 2)
ax.xaxis.set_major_formatter(dt.DateFormatter("%b %d %Y"))
ax.xaxis.set_major_locator(dt.DayLocator((1,8,16,24)))
plt.xlabel('Date')
plt.ylabel('Number of Complaints')
plt.show()
|
986,353 | ffe0fecac8fe4c647e9aaf8d5a6804ca23f91bde | import os
import numpy as np
import configparser
import math
import struct
from photogrammetry_importer.file_handlers.image_file_handler import (
ImageFileHandler,
)
from photogrammetry_importer.utility.os_utility import get_subdirs
from photogrammetry_importer.file_handlers.utility import (
check_radial_distortion,
)
from photogrammetry_importer.blender_utility.logging_utility import log_report
from photogrammetry_importer.types.camera import Camera
from photogrammetry_importer.types.point import Point
class MVEFileHandler:
"""Class to read and write :code:`MVE` workspaces."""
@staticmethod
def _str_to_arr(some_str, target_type):
return [target_type(x) for x in some_str.split()]
@staticmethod
def _readline_as_numbers(input_file, target_type):
line_str = input_file.readline().rstrip()
return MVEFileHandler._str_to_arr(line_str, target_type)
@staticmethod
def _parse_rotation_matrix(input_file):
row_1 = MVEFileHandler._readline_as_numbers(
input_file, target_type=float
)
row_2 = MVEFileHandler._readline_as_numbers(
input_file, target_type=float
)
row_3 = MVEFileHandler._readline_as_numbers(
input_file, target_type=float
)
return np.asarray([row_1, row_2, row_3], dtype=float)
@staticmethod
def parse_synth_out(synth_out_ifp):
"""Parse the :code:`synth_0.out` file in the :code:`MVE` workspace."""
points3D = []
with open(synth_out_ifp, "r") as input_file:
meta_data_line = input_file.readline()
num_cameras, num_points = MVEFileHandler._readline_as_numbers(
input_file, target_type=int
)
# The camera information provided in the synth_0.out file is incomplete
# Thus, we use the camera information provided in the view folders
# Consume the lines corresponding to the (incomplete) camera information
for cam_idx in range(num_cameras):
intrinsic_line = MVEFileHandler._readline_as_numbers(
input_file, target_type=float
)
rotation_mat = MVEFileHandler._parse_rotation_matrix(
input_file
)
camera_translation = np.asarray(
MVEFileHandler._readline_as_numbers(
input_file, target_type=float
)
)
for point_idx in range(num_points):
coord = MVEFileHandler._readline_as_numbers(
input_file, target_type=float
)
color = MVEFileHandler._readline_as_numbers(
input_file, target_type=int
)
measurement_line = MVEFileHandler._readline_as_numbers(
input_file, target_type=int
)
point = Point(
coord=coord, color=color, id=point_idx, scalars=[]
)
points3D.append(point)
return points3D
@staticmethod
def parse_meta(meta_ifp, width, height, camera_name, op):
"""Parse a :code:`meta.ini` file in the :code:`MVE` workspace."""
view_specific_dir = os.path.dirname(meta_ifp)
relative_image_fp = os.path.join(view_specific_dir, "undistorted.png")
image_dp = os.path.dirname(view_specific_dir)
camera = Camera()
camera.image_fp_type = Camera.IMAGE_FP_TYPE_RELATIVE
camera.image_dp = image_dp
camera._relative_fp = relative_image_fp
camera._absolute_fp = os.path.join(image_dp, relative_image_fp)
camera._undistorted_relative_fp = camera._relative_fp
camera._undistorted_absolute_fp = camera._absolute_fp
camera.width = width
camera.height = height
ini_config = configparser.RawConfigParser()
ini_config.read(meta_ifp)
focal_length_normalized = float(
ini_config.get(section="camera", option="focal_length")
)
pixel_aspect = float(
ini_config.get(section="camera", option="pixel_aspect")
)
if pixel_aspect != 1.0:
log_report(
"WARNING",
"Focal length differs in x and y direction,"
+ " setting it to the average value.",
op,
)
focal_length_normalized = (
focal_length_normalized
+ focal_length_normalized * pixel_aspect
) / 2
max_extend = max(width, height)
focal_length = focal_length_normalized * max_extend
principal_point_str = ini_config.get(
section="camera", option="principal_point"
)
principal_point_list = MVEFileHandler._str_to_arr(
principal_point_str, target_type=float
)
cx_normalized = principal_point_list[0]
cy_normalized = principal_point_list[1]
cx = cx_normalized * width
cy = cy_normalized * height
calib_mat = Camera.compute_calibration_mat(focal_length, cx, cy)
camera.set_calibration_mat(calib_mat)
radial_distortion_str = ini_config.get(
section="camera", option="radial_distortion"
)
radial_distortion_vec = np.asarray(
MVEFileHandler._str_to_arr(
radial_distortion_str, target_type=float
)
)
check_radial_distortion(radial_distortion_vec, relative_image_fp, op)
rotation_str = ini_config.get(section="camera", option="rotation")
rotation_mat = np.asarray(
MVEFileHandler._str_to_arr(rotation_str, target_type=float)
).reshape((3, 3))
translation_str = ini_config.get(
section="camera", option="translation"
)
translation_vec = np.asarray(
MVEFileHandler._str_to_arr(translation_str, target_type=float)
)
camera.set_rotation_with_rotation_mat(rotation_mat)
camera.set_camera_translation_vector_after_rotation(translation_vec)
return camera
@staticmethod
def parse_views(
views_idp,
default_width,
default_height,
add_depth_maps_as_point_cloud,
op=None,
):
"""Parse the :code:`views` directory in the :code:`MVE` workspace."""
cameras = []
subdirs = get_subdirs(views_idp)
for subdir in subdirs:
folder_name = os.path.basename(subdir)
# folder_name = view_0000.mve
camera_name = folder_name.split("_")[1].split(".")[0]
undistorted_img_ifp = os.path.join(subdir, "undistorted.png")
success, width, height = ImageFileHandler.read_image_size(
undistorted_img_ifp,
default_width=default_width,
default_height=default_height,
op=op,
)
assert success
meta_ifp = os.path.join(subdir, "meta.ini")
camera = MVEFileHandler.parse_meta(
meta_ifp, width, height, camera_name, op
)
if add_depth_maps_as_point_cloud:
for level in range(9):
depth_ifp = os.path.join(
subdir, "depth-L" + str(level) + ".mvei"
)
if os.path.isfile(depth_ifp):
camera.set_depth_map_callback(
MVEFileHandler.read_depth_map,
depth_ifp,
Camera.DEPTH_MAP_WRT_UNIT_VECTORS,
shift_depth_map_to_pixel_center=True,
)
break
if camera.get_depth_map_fp() is None:
log_report(
"WARNING", "No depth map found in " + subdir, op
)
cameras.append(camera)
return cameras
@staticmethod
def parse_mve_workspace(
workspace_idp,
default_width,
default_height,
add_depth_maps_as_point_cloud,
suppress_distortion_warnings,
op=None,
):
"""Parse a :code:`MVE` workspace. """
log_report("INFO", "Parse MVE workspace: ...", op)
log_report("INFO", workspace_idp, op)
views_idp = os.path.join(workspace_idp, "views")
synth_ifp = os.path.join(workspace_idp, "synth_0.out")
cameras = MVEFileHandler.parse_views(
views_idp,
default_width,
default_height,
add_depth_maps_as_point_cloud,
op,
)
points3D = MVEFileHandler.parse_synth_out(synth_ifp)
log_report("INFO", "Parse MVE workspace: Done", op)
return cameras, points3D
@staticmethod
def _read_next_bytes(
fid, num_bytes, format_char_sequence, endian_character="<"
):
"""Read and unpack the next bytes from a binary file.
:param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, etc.
:param format_char_sequence: List of {c, e, f, d, h, H, i, I, ...}.
:param endian_character: Any of {@, =, <, >, !}
:return: Tuple of read and unpacked values.
"""
data = fid.read(num_bytes)
return struct.unpack(endian_character + format_char_sequence, data)
@staticmethod
def read_depth_map(depth_map_ifp):
"""Read a depth map. """
# See:
# https://github.com/simonfuhrmann/mve/wiki/MVE-File-Format#the-mvei-image-format
# https://github.com/simonfuhrmann/mve/blob/master/libs/mve/image_io.cc
with open(depth_map_ifp, "rb") as fid:
mvei_file_signature = MVEFileHandler._read_next_bytes(
fid, 11, "ccccccccccc"
)
width = MVEFileHandler._read_next_bytes(fid, 4, "i")[0]
height = MVEFileHandler._read_next_bytes(fid, 4, "i")[0]
channels = MVEFileHandler._read_next_bytes(fid, 4, "i")[0]
assert channels == 1
raw_type = MVEFileHandler._read_next_bytes(fid, 4, "i")[0]
assert raw_type == 9 # IMAGE_TYPE_FLOAT
num_elements = width * height * channels
data = np.asarray(
MVEFileHandler._read_next_bytes(
fid, num_elements * 4, "f" * num_elements
)
)
return data.reshape((height, width))
|
986,354 | 072f0bedb0068408160886456907e04e011fe17a | class Figura:
def __init__(self, podstawa, bok, wysokosc, promien):
self.podstawa = podstawa
self.bok = bok
self.wysokosc = wysokosc
self.promien = promien
class Kwadrat(Figura):
def __init__(self):
super().__init__(int,0,0,0)
self.podstawa = int(input("Podaj podstawe Kwadratu: "))
def Pole(self):
print("Pole kwadratu = ",self.podstawa**2)
def Obw(self):
print("Obwód kwadratu =",self.podstawa*4)
k = Kwadrat()
k.Pole()
k.Obw()
class Trojkat_Prostokatny(Figura):
def __init__(self):
super().__init__(int,0,int,0)
self.podstawa = int(input("Podaj podstawe trójkąta: "))
self.wysokosc = int(input("Podaj wysokość trójkąta:"))
def Pole(self):
print("Pole trójkąta = ",(self.podstawa*self.wysokosc)/2)
def Obw(self):
przekatna = int(input("Podaj przekątną: "))
print("Obwód trójkąta =",self.podstawa+self.wysokosc+przekatna)
t = Trojkat_Prostokatny()
t.Pole()
t.Obw()
class Prostokat(Figura):
def __init__(self):
super().__init__(int,int,0,0)
self.podstawa = int(input("Podaj podstawe prostokąta: "))
self.bok = int(input("Podaj bok prostokąta:"))
def Pole(self):
print("Pole prostokąta = ",self.podstawa*self.bok)
def Obw(self):
print("Obwód prostokąta =",self.podstawa+self.bok)
p = Prostokat()
p.Pole()
p.Obw()
class Romb(Figura):
def __init__(self):
super().__init__(int,0,int,0)
self.podstawa = int(input("Podaj podstawe Rombu: "))
self.wysokosc = int(input("Podaj wysokość Rombu: "))
def Pole(self):
print("Pole Rombu = ",self.podstawa*self.wysokosc)
def Obw(self):
print("Obwód Rombu =",self.podstawa*4)
r = Romb()
r.Pole()
r.Obw()
class Kolo(Figura):
def __init__(self):
super().__init__(0,0,0,int)
self.promien = int(input("Podaj promień koła: "))
def Pole(self):
print("Pole koła = ",(3.14)*(self.promien)**2)
def Obw(self):
print("Obwód koła =",self.promien*2*(3.14))
k = Kolo()
k.Pole()
k.Obw() |
986,355 | a6eb88b453e44886e5899adb84298771982114a4 | import sys
print(sys.path)
print(__file__)
|
986,356 | dc2ccc9aff3649b06d1ea7b046c7a41c98e15ad2 | from .contact import ContactForm
from .news import NewsCommentForm
from .product import *
from .subscriber import SubscriberForm
|
986,357 | bc146fe407a125523710255e483a790a78f31632 | # Generated by Django 3.2.5 on 2021-07-13 11:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='FoodItem',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('desc', models.CharField(max_length=30)),
('price', models.CharField(max_length=30)),
('imageName', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='MenuCategory',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('foodCategory', models.CharField(max_length=30)),
('svgName', models.CharField(max_length=30)),
('food_items', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth_tes.fooditem')),
],
),
]
|
986,358 | 79bf8f881c9e0495c8fa54a2cd7cd1cc3b59ec08 | from django.core.management.base import BaseCommand, CommandError
from django.core.management.base import NoArgsCommand
from django.db import models
from TopicalRSS.rss import models
from TopicalRSS.rss.models import Feed
import elementtree.ElementTree as ET
import urllib2
class Command(NoArgsCommand):
help = "Updates the Database with parsed episodes"
def handle_noargs(self, **options):
self.stdout.write('Entered Main')
allFeeds = Feed.objects.all()
def get_episodes(feed):
#Open URL and get the stuff
url = feed.url
doc = urllib2.urlopen(url)
rssXML = doc.read()
doc.close()
tree = ET.XML(rssXML)
episodes = tree.findall('channel/item')
episodesTitles = tree.findall('channel/item/title')
for j in xrange(len(episodes)):
n = episodesTitles[j]
n = n.text
epN = ((len(episodes)) - j)
epXML = episodes[j]
epXML = ET.tostring(epXML)
newEp = models.Episode(name = n, episodeNum = epN, xml = epXML)
newEp.save()
for feed in allFeeds:
self.stdout.write('Entered Function')
get_episodes(feed)
|
986,359 | 3887b5309900796dd9db097fbd2be821463faf56 | import wx
from Shape import Shape
from Tetrominoes import Tetrominoes
class Board(wx.Panel):
BoardWidth = 10
BoardHeight = 22
Speed = 300
ID_TIMER = 1
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.initBoard()
def initBoard(self):
self.timer = wx.Timer(self, Board.ID_TIMER)
self.isWaitingAfterLine = False
self.curPiece = Shape()
self.nextPiece = Shape()
self.curX = 0
self.curY = 0
self.numLinesRemoved = 0
self.board = []
self.isStarted = False
self.isPaused = False
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.Bind(wx.EVT_TIMER, self.OnTimer, id=Board.ID_TIMER)
self.clearBoard()
def shapeAt(self, x, y):
pass
def setShapeAt(self, x, y, shape):
pass
def squareWidth(self):
pass
def squareHeight(self):
pass
def start(self):
if self.isPaused:
return
self.isStarted = True
self.isWaitingAfterLine = False
self.numLinesRemoved = 0
self.clearBoard()
self.newPiece()
self.timer.Start(Board.Speed)
def pause(self):
if not self.isStarted:
return
def clearBoard(self):
for i in range(Board.BoardHeight * Board.BoardWidth):
self.board.append(Tetrominoes.NoShape)
def OnPaint(self,event):
pass
def OnKeyDown(self, event):
pass
def OnTimer(self, event):
if event.GetId() == Board.ID_TIMER:
if self.isWaitingAfterLine:
self.isWaitingAfterLine = False
self.newPiece()
else:
self.oneLineDown()
else:
event.Skip()
def dropDown(self):
pass
def oneLineDown(self):
if not self.tryMove(self.curPiece, self.curX, self.curY - 1):
self.pieceDropped()
def pieceDropped(self):
for i in range(4):
x = self.curX + self.curPiece.x(i)
y = self.curY + self.curPiece.y(i)
self.setShapeAt(x, y, self.curPiece.shape())
self.removeFullLines()
if not self.isWaitingAfterLine:
self.newPiece()
def removeFullLines(self):
numFullLines = 0
statusbar = self.GetParent().statusbar
rowsToRemove = []
for i in range(Board.BoardHeight):
n = 0
for j in range(Board.BoardWidth):
if not self.shapeAt(j, i) == Tetrominoes.NoShape:
n = n + 1
if n == 10:
rowsToRemove.append(i)
rowsToRemove.reverse()
for m in rowsToRemove:
for k in range(m, Board.BoardHeight):
for l in range(Board.BoardWidth):
self.setShapeAt(l, k, self.shapeAt(l, k+1))
newFullLines = numFullLines + len(rowsToRemove)
if numFullLines > 0:
self.numLinesRemoved = self.numLinesRemoved + numFullLines
statusbar.SetStatusText(str(self.numLinesRemoved))
self.isWaitingAfterLine = True
self.curPiece.setShape(Tetrominoes.NoShape)
self.Refresh()
def newPiece(self):
self.curPiece = self.nextPiece
statusbar = self.GetParent().statusbar
self.nextPiece.setRandomShape()
self.curX = Board.BoardWidth / 2 + 1
self.curY = Board.BoardHeight -1 + self.curPiece.minY()
if not self.tryMove(self.curPiece, self.curX, self.curY):
self.curPiece.setShape(Tetrominoes.NoShape)
self.timer.Stop()
self.isStarted = False
statusbar.SetStatusText('Game Over')
def tryMove(self, newPiece, newX, newY):
for i in range(4):
x = newX + newPiece.x(i)
y = newY + newPiece.y(i)
if x < 0 or x >= Board.BoardWidth or y < 0 or y >= Board.BoardHeight:
return False
if self.shapeAt(x,y) != Tetrominoes.NoShape:
return False
self.curPiece = newPiece
self.curX = newX
self.curY = newY
self.Refresh()
return True
def drawSquare(self, dc, x, y, shape):
color = ['#000000','#CC66666', '#66CC66', '#6666CC',
'#CCCC66', '#CC66CC', '#66CCCC', '#DAAA00']
light = []
dark = []
pen = wx.Pen(light[shape])
pen.setCap(wx.CAP_PROJECTING)
dc.SetPen(pen)
dc.DrawLine(x, y + self.squareHeight() - 1, x, y)
dc.DrawLine(x, y, x + self.squareWidth() -1, y)
darkpen = wx.Pen(dark[shape])
darkpen.SetCap(wx.CAP_PROJECTING)
dc.SetPen(darkpen)
dc.DrawLine(x+1,y+self.squareHeight()-1,
x+self.squareWidth() - 1, y + self.squareHeight() - 1)
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(wx.Brush(color[shape]))
dc.DrawRectangle(x + 1, y + 1, self.squareWidth() - 2,
self.squareHeight() - 2)
|
986,360 | de5dcbc74a3b73db03156d81c03aaaf39965865e | def icd_chapter_section(condition_code):
try:
icd_section = int(condition_code.split('.')[0])
except:
return 0, 0
if icd_section >= 1 and icd_section <= 139:
chapter = 1
elif icd_section >= 140 and icd_section <= 239:
chapter = 2
elif icd_section >= 240 and icd_section <= 279:
chapter = 3
elif icd_section >= 280 and icd_section <= 289:
chapter = 4
elif icd_section >= 290 and icd_section <= 319:
chapter = 5
elif icd_section >= 320 and icd_section <= 389:
chapter = 6
elif icd_section >= 390 and icd_section <= 459:
chapter = 7
elif icd_section >= 460 and icd_section <= 519:
chapter = 8
elif icd_section >= 520 and icd_section <= 579:
chapter = 9
elif icd_section >= 580 and icd_section <= 629:
chapter = 10
elif icd_section >= 630 and icd_section <= 679:
chapter = 11
elif icd_section >= 680 and icd_section <= 709:
chapter = 12
elif icd_section >= 710 and icd_section <= 739:
chapter = 13
elif icd_section >= 740 and icd_section <= 759:
chapter = 14
elif icd_section >= 760 and icd_section <= 779:
chapter = 15
elif icd_section >= 780 and icd_section <= 799:
chapter = 16
elif icd_section >= 800 and icd_section <= 999:
chapter = 17
# 1. Infectious And Parasitic Diseases
if icd_section >= 1 and icd_section <= 9:
section = 1
elif icd_section >= 10 and icd_section <= 18:
section = 2
elif icd_section >= 20 and icd_section <= 27:
section = 3
elif icd_section >= 30 and icd_section <= 41:
section = 4
elif icd_section >= 42 and icd_section <= 44:
section = 5
elif icd_section >= 45 and icd_section <= 49:
section = 6
elif icd_section >= 50 and icd_section <= 59:
section = 7
elif icd_section >= 60 and icd_section <= 66:
section = 8
elif icd_section >= 70 and icd_section <= 79:
section = 9
elif icd_section >= 80 and icd_section <= 88:
section = 10
elif icd_section >= 90 and icd_section <= 99:
section = 11
elif icd_section >= 100 and icd_section <= 104:
section = 12
elif icd_section >= 110 and icd_section <= 118:
section = 13
elif icd_section >= 120 and icd_section <= 129:
section = 14
elif icd_section >= 130 and icd_section <= 136:
section = 15
elif icd_section >= 137 and icd_section <= 139:
section = 16
# 2. Neoplasms
elif icd_section >= 140 and icd_section <= 149:
section = 17
elif icd_section >= 150 and icd_section <= 159:
section = 18
elif icd_section >= 160 and icd_section <= 165:
section = 19
elif icd_section >= 170 and icd_section <= 176:
section = 20
elif icd_section >= 179 and icd_section <= 189:
section = 21
elif icd_section >= 190 and icd_section <= 199:
section = 22
elif icd_section >= 200 and icd_section <= 209:
section = 23
elif icd_section >= 210 and icd_section <= 229:
section = 24
elif icd_section >= 230 and icd_section <= 234:
section = 25
elif icd_section >= 235 and icd_section <= 238:
section = 26
elif icd_section >= 239 and icd_section <= 239:
section = 27
# 3. endocrine, nutritional and metabolic diseases, and immunity disorders
elif icd_section >= 240 and icd_section <= 246:
section = 28
elif icd_section >= 249 and icd_section <= 259:
section = 29
elif icd_section >= 260 and icd_section <= 269:
section = 30
elif icd_section >= 270 and icd_section <= 279:
section = 31
# 4. Diseases Of The Blood And Blood-Forming Organs
elif icd_section >= 280 and icd_section <= 289:
section = 32
# 5. Mental disorder
elif icd_section >= 290 and icd_section <= 294:
section = 33
elif icd_section >= 295 and icd_section <= 299:
section = 34
elif icd_section >= 300 and icd_section <= 316:
section = 35
elif icd_section >= 317 and icd_section <= 319:
section = 36
# 6. Diseases Of The Nervous System And Sense Organs
elif icd_section >= 320 and icd_section <= 327:
section = 37
elif icd_section >= 330 and icd_section <= 337:
section = 38
elif icd_section >= 338 and icd_section <= 338:
section = 39
elif icd_section >= 339 and icd_section <= 339:
section = 40
elif icd_section >= 340 and icd_section <= 349:
section = 41
elif icd_section >= 350 and icd_section <= 359:
section = 42
elif icd_section >= 360 and icd_section <= 379:
section = 43
elif icd_section >= 380 and icd_section <= 389:
section = 44
# 7. Diseases Of The Circulatory System
elif icd_section >= 390 and icd_section <= 392:
section = 45
elif icd_section >= 393 and icd_section <= 398:
section = 46
elif icd_section >= 401 and icd_section <= 405:
section = 47
elif icd_section >= 410 and icd_section <= 414:
section = 48
elif icd_section >= 415 and icd_section <= 417:
section = 49
elif icd_section >= 420 and icd_section <= 429:
section = 50
elif icd_section >= 430 and icd_section <= 438:
section = 51
elif icd_section >= 440 and icd_section <= 449:
section = 52
elif icd_section >= 451 and icd_section <= 459:
section = 53
# 8. Diseases Of The Respiratory System
elif icd_section >= 460 and icd_section <= 466:
section = 54
elif icd_section >= 470 and icd_section <= 478:
section = 55
elif icd_section >= 480 and icd_section <= 488:
section = 56
elif icd_section >= 490 and icd_section <= 496:
section = 57
elif icd_section >= 500 and icd_section <= 508:
section = 58
elif icd_section >= 510 and icd_section <= 519:
section = 59
# 9. Diseases Of The Digestive System
elif icd_section >= 520 and icd_section <= 529:
section = 60
elif icd_section >= 530 and icd_section <= 539:
section = 61
elif icd_section >= 540 and icd_section <= 543:
section = 62
elif icd_section >= 550 and icd_section <= 553:
section = 63
elif icd_section >= 555 and icd_section <= 558:
section = 64
elif icd_section >= 560 and icd_section <= 569:
section = 65
elif icd_section >= 570 and icd_section <= 579:
section = 66
# 10. Diseases Of The Genitourinary System
elif icd_section >= 580 and icd_section <= 589:
section = 67
elif icd_section >= 590 and icd_section <= 599:
section = 68
elif icd_section >= 600 and icd_section <= 608:
section = 69
elif icd_section >= 610 and icd_section <= 612:
section = 70
elif icd_section >= 614 and icd_section <= 616:
section = 71
elif icd_section >= 617 and icd_section <= 629:
section = 72
# 11. Complications Of Pregnancy, Childbirth, And The Puerperium
elif icd_section >= 630 and icd_section <= 639:
section = 73
elif icd_section >= 640 and icd_section <= 649:
section = 74
elif icd_section >= 650 and icd_section <= 659:
section = 75
elif icd_section >= 660 and icd_section <= 669:
section = 76
elif icd_section >= 670 and icd_section <= 677:
section = 77
elif icd_section >= 678 and icd_section <= 679:
section = 78
# 12. Diseases Of The Skin And Subcutaneous Tissue
elif icd_section >= 680 and icd_section <= 686:
section = 79
elif icd_section >= 690 and icd_section <= 698:
section = 80
elif icd_section >= 700 and icd_section <= 709:
section = 81
# 13. Diseases Of The Musculoskeletal System And Connective Tissue
elif icd_section >= 710 and icd_section <= 719:
section = 82
elif icd_section >= 720 and icd_section <= 724:
section = 83
elif icd_section >= 725 and icd_section <= 729:
section = 84
elif icd_section >= 730 and icd_section <= 739:
section = 85
# 14. Congenital Anomalies
elif icd_section >= 740 and icd_section <= 759:
section = 86
# 15. Certain Conditions Originating In The Perinatal Period
elif icd_section >= 760 and icd_section <= 763:
section = 87
elif icd_section >= 764 and icd_section <= 779:
section = 88
# 16. Symptoms, Signs, And Ill-Defined Conditions
elif icd_section >= 780 and icd_section <= 804:
section = 89
elif icd_section >= 790 and icd_section <= 809:
section = 90
elif icd_section >= 797 and icd_section <= 819:
section = 91
# 17. Injury And Poisoning
elif icd_section >= 800 and icd_section <= 804:
section = 92
elif icd_section >= 805 and icd_section <= 809:
section = 93
elif icd_section >= 810 and icd_section <= 819:
section = 94
elif icd_section >= 820 and icd_section <= 829:
section = 95
elif icd_section >= 830 and icd_section <= 839:
section = 96
elif icd_section >= 840 and icd_section <= 848:
section = 94
elif icd_section >= 850 and icd_section <= 854:
section = 98
elif icd_section >= 860 and icd_section <= 869:
section = 99
elif icd_section >= 870 and icd_section <= 879:
section = 100
elif icd_section >= 880 and icd_section <= 887:
section = 101
elif icd_section >= 890 and icd_section <= 897:
section = 102
elif icd_section >= 900 and icd_section <= 904:
section = 103
elif icd_section >= 905 and icd_section <= 909:
section = 104
elif icd_section >= 910 and icd_section <= 919:
section = 105
elif icd_section >= 920 and icd_section <= 924:
section = 106
elif icd_section >= 925 and icd_section <= 929:
section = 107
elif icd_section >= 930 and icd_section <= 939:
section = 108
elif icd_section >= 940 and icd_section <= 949:
section = 109
elif icd_section >= 950 and icd_section <= 957:
section = 110
elif icd_section >= 958 and icd_section <= 959:
section = 111
elif icd_section >= 960 and icd_section <= 979:
section = 112
elif icd_section >= 980 and icd_section <= 989:
section = 113
elif icd_section >= 990 and icd_section <= 995:
section = 114
elif icd_section >= 996 and icd_section <= 999:
section = 115
return chapter, section
def cause_of_death_disease(condition_code):
section_str = condition_code.split('.')[0]
try:
icd_num = float(condition_code)
except:
return -1
if int(icd_num) == 11 or int(icd_num) == 12:
return 0 # Respiratory tuberculosis
if int(icd_num) == 33:
return 1 # Pertussis
if icd_num in [34, 35] or icd_num == 41.09:
return 2 # Streptococcal sore throat, scarlatina, and erysipelas
if int(icd_num) == 38:
return 3 # Sepsis
if int(icd_num) == 55:
return 4 # Measles
if int(icd_num) == 45:
return 5 # Acute poliomyelitis
if int(icd_num) == 71:
return 6 # Rabbies
if int(icd_num) == 70:
return 7 # Viral hepatitis
if int(icd_num) == 150:
return 8 # Malignant neoplasm of esophagus
if int(icd_num) == 151:
return 9 # Malignant neoplasm of stomach
if int(icd_num) == 152:
return 10 # Malignant neoplasm of small intestine
if int(icd_num) in [153, 154]:
return 11 # Malignant neoplasm of colon, rectum and anus
if int(icd_num) in [155, 156]:
return 12 # Malignant neoplasm of liver and intrahepatic bile ducts
if int(icd_num) == 157:
return 13 # Malignant neoplasm of pancreas
if int(icd_num) == 161:
return 14 # Malignant neoplasm of larynx
if int(icd_num) in [162, 163]:
return 15 # Malignant neoplasm of trachea, bronchus and lung
if int(icd_num) in [172, 173]:
return 16 # Malignant neoplasm of skin
if int(icd_num) == 174:
return 17 # Malignant neoplasm of breast
if int(icd_num) in [179, 180, 181, 182]:
return 18 # Malignant neoplasm of cervix uteri
if int(icd_num) == 183:
return 19 # Malignant neoplasm of ovary
if int(icd_num) == 185:
return 20 # Malignant neoplasm of prostate
if int(icd_num) == 189:
return 21 # Malignant neoplasm of kidney and renal pelvis
if int(icd_num) == 188:
return 22 # Malignant neoplasm of bladder
if int(icd_num) in [191, 192]:
return 23 # Malignant neoplasm of meninges, brain and other parts of central nervous system
if int(icd_num) in [200, 201, 202, 203]:
return 24 # Malignant neoplasm of lymphoid, hematopoietic and related tissue
if int(icd_num) in [249, 250]:
return 25 # Diabetes mellitus
if int(icd_num) in [47, 320, 321, 322]:
return 26 # Meningitis
if int(icd_num) == 332:
return 27 # Parkinson disease
if int(icd_num) == 294:
return 28 # Alzheimer disease
if icd_num >= 402.1 and icd_num < 402.2:
return 29 # Benign hypertensive heart disease
if icd_num == 402.01:
return 30 # Malignant hypertensive heart disease with heart failure
if icd_num == 402.00:
return 31 # Malignant hypertensive heart disease without heart failure
if icd_num == 403.91:
return 32 # Hypertensive renal disease with renal failure
if icd_num == 403.93:
return 33 # Hypertensive heart and renal disease with (congestive) heart failure
if int(icd_num) == 410:
return 34 # Acute myocardial infarction
if int(icd_num) == 434:
return 35 # Cerebral infarction
if icd_num == 493.92:
return 36 # Acute exacerbation of asthma
if int(icd_num) == 493:
return 37 # Asthma
if int(icd_num) in [490, 491]:
return 38 # Bronchitis
if int(icd_num) in [492, 518, 998, 958]:
return 39 # Emphysema
if int(icd_num) == 584:
return 40 # Renal failure
if int(icd_num) == 580:
return 41 # Acute glomerulonephritis and nephrotic syndrome
if icd_num == 494.1:
return 42 # Acute exacerbation of bronchiectasis
if icd_num == 491.21:
return 43 # Acute exacerbation of chronic obstructive airways disease
return -1
def other_icd9(condition_code):
section_str = condition_code.split('.')[0]
if len(section_str) == 0: return -1
if section_str[0] == 'V':
if int(section_str[1:]) == 9:
return 0 # personal history of cancer
if int(section_str[1:]) == 10:
return 1 # personal history of cancer
if int(section_str[1:]) == 16:
return 2 # family history of cancer
if int(section_str[1:]) == 46:
return 3 # dependence on machines and devices
if float(condition_code[1:]) >= 58.1 and float(condition_code[1:]) < 58.2:
return 4 # Encounter for antineoplastic chemotherapy and immunotherapy
if float(condition_code[1:]) == 58.69:
return 5 # High-risk medications
if int(section_str[1:]) == 59:
return 6 # Donors
if float(condition_code[1:]) == 87.46:
return 7 # History of immunosuppressive therapy
if float(condition_code[1:]) == 87.43:
return 8 # History of estrogen therapy
if float(condition_code[1:]) >= 62.1 and float(condition_code[1:]) < 62.2 or int(section_str[1:]) == 87:
return 9 # Personal exposures and history presenting hazards to health
if float(condition_code[1:]) == 49.83:
return 10 # On waiting list for organ transplant
if int(section_str[1:]) >= 1 and int(section_str[1:]) <= 9:
return 11 # Persons With Potential Health Hazards Related To Communicable Diseases
if int(section_str[1:]) >= 10 and int(section_str[1:]) <= 19:
return 12 # Persons With Potential Health Hazards Related To Personal And Family History
if int(section_str[1:]) >= 20 and int(section_str[1:]) <= 29:
return 13 # Persons Encountering Health Services In Circumstances Related To Reproduction And Development
if int(section_str[1:]) >= 30 and int(section_str[1:]) <= 39:
return 14 # Liveborn Infants According To Type Of Birth
if int(section_str[1:]) >= 40 and int(section_str[1:]) <= 49:
return 15 # Persons With A Condition Influencing Their Health Status
if int(section_str[1:]) >= 50 and int(section_str[1:]) <= 59:
return 16 # Persons Encountering Health Services For Specific Procedures And Aftercare
if int(section_str[1:]) >= 60 and int(section_str[1:]) <= 69:
return 17 # Persons Encountering Health Services In Other Circumstances
if int(section_str[1:]) >= 70 and int(section_str[1:]) <= 82:
return 18 # Persons Without Reported Diagnosis Encountered During Examination And Investigation Of Individuals And Populations
if int(section_str[1:]) >= 83 and int(section_str[1:]) <= 84:
return 19 # Genetics
if int(section_str[1:]) == 86 and float(condition_code[1:]) == 86:
return 20 # Estrogen Receptor Status +
if int(section_str[1:]) == 86 and float(condition_code[1:]) == 86.1:
return 21 # Estrogen Receptor Status -
if int(section_str[1:]) == 87:
return 22 # Other Specified Personal Exposures And History Presenting Hazards To Health
if int(section_str[1:]) == 88:
return 23 # Acquired Absence Of Other Organs And Tissue
if int(section_str[1:]) == 89:
return 24 # Other Suspected Conditions Not Found
if int(section_str[1:]) == 90:
return 25 # Retained Foreign Body
if int(section_str[1:]) == 91:
return 26 # Multiple Gestation Placenta Status
if section_str[0] == 'E':
if int(section_str[1:]) >= 800 and int(section_str[1:]) < 850:
return 25 # Traffic Accidents
if int(section_str[1:]) >= 850 and int(section_str[1:]) < 870:
return 26 # Accidental Poisoning
if int(section_str[1:]) >= 870 and int(section_str[1:]) < 880:
return 27 # Misadventures To Patients During Surgical And Medical Care
if int(section_str[1:]) >= 880 and int(section_str[1:]) < 929:
return 28 # Accidents
if int(section_str[1:]) == 929:
return 29 # Late Effects Of Accidental Injury
if int(section_str[1:]) >= 930 and int(section_str[1:]) < 950:
return 30 # Adverse Effects In Therapeutic Use
if int(section_str[1:]) >= 950 and int(section_str[1:]) < 960:
return 31 # Suicide And Self-Inflicted Injury
if int(section_str[1:]) >= 960 and int(section_str[1:]) < 970:
return 32 # Homicide
return -1
def proc_cpt4(proc_code):
try:
num_proc = int(proc_code)
except:
return 30
if num_proc>=100 and num_proc<2000:
return 0 # Anesthesia
if num_proc>=10004 and num_proc<=10021:
return 1 # Fine Needle Aspiration Biopsy Procedures
if num_proc>=10030 and num_proc<=19499:
return 2 # Surgical Procedures on the Integumentary System
if num_proc>=20100 and num_proc<=29999:
return 3 # Surgical Procedures on the Musculoskeletal System
if num_proc>=30000 and num_proc<=32999:
return 4 # Surgical Procedures on the Respiratory System
if num_proc>=33016 and num_proc<=37799:
return 5 # Surgical Procedures on the Cardiovascular System
if num_proc>=39000 and num_proc<=39599:
return 6 # Surgical Procedures on the Mediastinum and Diaphragm
if num_proc>=38100 and num_proc<=38999:
return 7 # Surgical Procedures on the Hemic and Lymphatic Systems
if num_proc>=40490 and num_proc<=49999:
return 8 # Surgical Procedures on the Digestive System
if num_proc>=50010 and num_proc<=53899:
return 9 # Surgical Procedures on the Urinary System
if num_proc>=54000 and num_proc<=55899:
return 10 # Surgical Procedures on the Male Genital System
if num_proc==55920:
return 11 # Reproductive System Procedures
if num_proc>=55970 and num_proc<=55980:
return 12 # Intersex Surgery
if num_proc>=56405 and num_proc<=58999:
return 13 # Surgical Procedures on the Female Genital System
if num_proc>=59000 and num_proc<=59899:
return 14 # Surgical Procedures for Maternity Care and Delivery
if num_proc>=60000 and num_proc<=60699:
return 15 # Surgical Procedures on the Endocrine System
if num_proc>=61000 and num_proc<=64999:
return 16 # Surgical Procedures on the Nervous System
if num_proc>=65091 and num_proc<=68899:
return 17 # Surgical Procedures on the Eye and Ocular Adnexa
if num_proc>=69000 and num_proc<=69979:
return 18 # Surgical Procedures on the Auditory System
if num_proc==69990:
return 19 # Operating Microscope Procedures
if num_proc>=70010 and num_proc<=76499:
return 20 # Diagnostic Radiology (Diagnostic Imaging) Procedures
if num_proc>=76506 and num_proc<=76999:
return 21 # Diagnostic Ultrasound Procedures
if num_proc>=77001 and num_proc<=77022:
return 22 # Radiologic Guidance
if num_proc>=77046 and num_proc<=77067:
return 23 # Breast, Mammography
if num_proc>=77071 and num_proc<=77086:
return 24 # Bone/Joint Studies
if num_proc>=77261 and num_proc<=77799:
return 25 # Radiation Oncology Treatment
if num_proc>=78012 and num_proc<=79999:
return 26 # Nuclear Medicine Procedures
if num_proc>=80047 and num_proc<=89398:
return 27 # Pathology and Laboratory Procedures
if num_proc>=90281 and num_proc<=99756:
return 28 # Medicine Services and Procedures
if num_proc>=99201 and num_proc<=99499:
return 29 # Evaluation and Management Services
return 30
|
986,361 | 69405caea8231c1b1c210a3463c1173cbb8c3566 | import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
import time
import os
import re
f = open("../data/johnson.txt", "w")
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
driver = webdriver.Chrome(ChromeDriverManager().install(), options = chrome_options)
def scraping(url):
print("Scraping from " + url)
f.write("Scraping from " + url + "\n\n\n")
driver.get(url)
time.sleep(1)
result = driver.execute_script("return document.documentElement.outerHTML")
return BeautifulSoup(result, 'html.parser')
def findHref(data):
for i in range(len(data)):
for link in data[i].find_all('a'):
links.append(link.get('href'))
# scraping from https://www.johnson-county.com/dept_health.aspx?id=27320
url = "https://www.johnson-county.com/dept_health.aspx?id=27320"
soup = scraping(url)
data = soup.select("p")
links = []
for i in range(len(data)):
for link in data[i].select('a:first-child'):
links.append(link.get('href'))
print(links)
# make directory for pdfs
path = "../data/" + "johnson-PDF"
os.mkdir(path)
# start scraping for pdfs
def pdfs(link):
print("Scraping from " + link)
f.write("Scraping from " + link + "\n\n\n")
try:
r = requests.get(link, verify = False, stream = True)
if r.headers['Content-Type'] == "application/pdf":
f.write("A PDF HERE\n\n\n")
print("pdf " + link)
d = r.headers['Content-Disposition']
pre_title = re.findall("filename=(.+)", d)[0]
title = re.findall("\"(.*?)\"", pre_title)[0]
with open("../data/johnson-PDF/" + title,"wb") as pdf:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
pdf.write(chunk)
else:
print("----" + link)
except:
pass
for link in links:
print("processing...")
print(link)
pdfs(link)
# scraping from https://coronavirus-johnsoncounty.hub.arcgis.com/pages/frequently-asked-questions
url = "https://coronavirus-johnsoncounty.hub.arcgis.com/pages/frequently-asked-questions"
soup = scraping(url)
data = soup.find_all("div", class_= "markdown-card ember-view")
for i in range(len(data)):
f.write(data[i].text)
f.write("\n\n\n")
# scraping from https://coronavirus-johnsoncounty.hub.arcgis.com/"
url = "https://coronavirus-johnsoncounty.hub.arcgis.com/"
soup = scraping(url)
data = soup.find_all("div", id= "ember63")
for i in range(len(data)):
f.write(data[i].text)
f.write("\n\n\n")
f.close()
driver.quit()
print("finished")
|
986,362 | 704e77294eb932ee71af7ee6d8d15c7b3f37a40a | import requests
import random
from retrying import retry
from torent.fake_user_agent import headers
from lxml import etree
# 随机选择请求头 可根据不同需求进行改写
def user_agent():
UA = {
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"User-Agent": random.choice(headers)
}
return UA
# 返回解码后的 etree 对象
@retry(stop_max_attempt_number=4)
def get_etree_page(url):
UA = user_agent()
headers = UA
try:
print("parse", url)
response = requests.get(url, headers=headers, timeout=6)
if response.status_code == 200:
return etree.HTML(response.content.decode())
except Exception as e:
print(e)
return get_etree_page(url)
# 返回二进制对象
def get_binary_page(url):
UA = user_agent()
headers = UA
try:
print('Parsing', url)
response = requests.get(url, headers=headers, timeout=3)
if response.status_code == 200:
return response.content
except Exception as e:
print(e)
return None
# 返回未解码的普通文本
def get_normal_page(url):
UA = user_agent()
headers = UA
try:
response = requests.get(url, headers=headers, timeout=3)
if response.status_code == 200:
return response.text
except Exception as e:
print(e)
return None
|
986,363 | 0f0fafcf1c269c681b7e880fbb3e681ce8779012 | '''
Created on Apr 6, 2016
@author: dj
'''
from flask import Flask, render_template, request
app = Flask(__name__, static_folder='.', static_url_path='')
@app.route('/')
def home():
message = "Hello, World!"
print("message =", message)
return message
@app.route('/index')
def index():
'''/index'''
return app.send_static_file("index.html")
@app.route('/echo/<thing>')
def echo(thing):
'''/echo/<>'''
message = "This is " + thing + "!"
print("message =", message)
return message
@app.route('/display/<thing>')
def display(thing):
'''/display/<>'''
print("thing =", thing)
# "flask1.html" is inside sub-folder "./templates".
return render_template("flask1.html", thing=thing)
@app.route('/retrieve')
def retrieve():
'''Get /retrieve?thing=<>'''
thing = request.args.get('thing')
print("thing =", thing)
# "flask1.html" is inside sub-folder "./templates".
return render_template("flask1.html", thing=thing)
app.run(HOST='localhost', PORT=8002, debug=True)
if __name__ == '__main__':
pass
|
986,364 | 6d61bd789514057a5aca8465143f64bfacfb7249 | from random import *
c=randint(100,999)
print('Получено число',c)
print('сотни:',c//100)
print('десятки:',c//10%10)
print('единицы:',c%10)
|
986,365 | 692cf4e37647ebc38f67f0f1db54669677ef6e6f | import pytagcloud
# with와 같이 사용하면 close를 안해도 된다
with open('count.txt', 'r', encoding='utf-8') as f:
input_data = f.readlines()
data = []
for d in input_data:
d0, d1 = d.split() # do: 단어 d1:갯수
if len(d0) > 1: # 2글자 이상
data.append((d0, int(d1)))
print(data)
tag_list = pytagcloud.make_tags(data, maxsize=100)
pytagcloud.create_tag_image(tag_list,"dong1.png",
size=(900, 600), fontname="Korean", rectangular=False)
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = mpimg.imread("dong1.png")
imgplot = plt.imshow(img)
plt.axis("off")
plt.show() |
986,366 | 90dec008232379bf8cfdb21791fb6eb03512b675 | from . import faceid_source
|
986,367 | dea4bf0bc079d5bab1fdd9709eba2fe8a431be9d | #!python3
import os
import json
import requests
from pprint import pprint
base_folder = os.path.dirname(__file__)
filename = os.path.join(base_folder, 'mount-data.json')
# https://stackoverflow.com/questions/37228114/opening-a-json-file-in-python
with open(filename, 'r', encoding='utf-8') as fin:
#data = list(json.load(fin).items())
# data = json.load(fin).items() # not subscriptable !?
data = json.load(fin)
# for item in data.items():
# pprint(item)
pprint(type(data))
print()
# print(data[0]) # results in <class 'list'> and ('achievementPoints', 14565) if using data = list(json.load(fin).items())
print(data['class'])
# for item in data['mounts']:
# pprint(item)
# for item in data['mounts']['collected']:
# pprint(item)
for item in data['mounts']['collected']:
pprint(item['name'])
is_flying = []
for mount in data['mounts']['collected']:
if mount['isFlying']:
is_flying.append(mount)
#Collects all of the applicable mounts and stores them as a list of dicts
#You can then work with the data as normal:
print(len(is_flying))
for i in is_flying:
print(i)
for i in is_flying:
print(i['name'])
|
986,368 | 10ac119a13af729ea52dcb203945c44093fca104 | # from core import schedule, makespan, Event, insert_sendrecvs
from .core_heft import schedule, makespan, Event, insert_sendrecvs
|
986,369 | bb5cf1ba51f77eb0d905c3cdf724efc72a746063 | from ..utils.objects.metrics import Metrics
import torch
import time
from ..utils import log as logger
class Train(object):
def __init__(self, step, epochs, verbose=True):
self.epochs = epochs
self.step = step
self.history = History()
self.verbose = verbose
def __call__(self, train_loader_step, val_loader_step=None, early_stopping=None):
for epoch in range(self.epochs):
self.step.train()
train_stats = train_loader_step(self.step)
self.history(train_stats, epoch + 1)
if val_loader_step is not None:
with torch.no_grad():
self.step.eval()
val_stats = val_loader_step(self.step)
self.history(val_stats, epoch + 1)
print(self.history)
if early_stopping is not None:
valid_loss = val_stats.loss()
# early_stopping needs the validation loss to check if it has decreased,
# and if it has, it will make a checkpoint of the current model
if early_stopping(valid_loss):
self.history.log()
return
else:
print(self.history)
self.history.log()
def predict(step, test_loader_step):
print(f"Testing")
with torch.no_grad():
step.eval()
stats = test_loader_step(step)
metrics = Metrics(stats.outs(), stats.labels())
print(metrics)
metrics.log()
return metrics()["Accuracy"]
class History:
def __init__(self):
self.history = {}
self.epoch = 0
self.timer = time.time()
def __call__(self, stats, epoch):
self.epoch = epoch
if epoch in self.history:
self.history[epoch].append(stats)
else:
self.history[epoch] = [stats]
def __str__(self):
epoch = f"\nEpoch {self.epoch};"
stats = ' - '.join([f"{res}" for res in self.current()])
timer = f"Time: {(time.time() - self.timer)}"
return f"{epoch} - {stats} - {timer}"
def current(self):
return self.history[self.epoch]
def log(self):
msg = f"(Epoch: {self.epoch}) {' - '.join([f'({res})' for res in self.current()])}"
logger.log_info("history", msg)
|
986,370 | dd48ecb7e0ee12d8ff875a16ea5457b296b201c7 | #!/usr/bin/env python
# coding: utf-8
# In[3]:
from flask import Flask,redirect,url_for,render_template,request
import os
import shutil
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pycontractions import Contractions
import re
import scipy.stats as st
from get_top_intent import top_intent
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import google_auth_oauthlib.flow
import googleapiclient.discovery
import googleapiclient.errors
tf.get_logger().setLevel('ERROR')
saved_model_path = 'model/checkpoint_4'
model = tf.saved_model.load(saved_model_path)
contraction=Contractions(api_key="glove-twitter-100")
api_key= # enter your api key here
client_secrets_file = # enter client secret file name (.json file)
youtube=build('youtube','v3',developerKey=api_key)
scopes = ["https://www.googleapis.com/auth/youtube.force-ssl"]
flow = InstalledAppFlow.from_client_secrets_file(
client_secrets_file, scopes)
credentials = flow.run_local_server(port=8080,prompt="consent")
api_service_name = "youtube"
api_version = "v3"
youtube_comment = googleapiclient.discovery.build(
api_service_name, api_version, credentials=credentials)
# Code for flask application
app=Flask(__name__)
cached_prediction={}
@app.route("/",methods=["POST","GET"])
def get_user_request():
if request.method== "POST":
url=request.form['url']
videoId=url.split("v=")[1]
user_intent=request.form['intent'].lower()
K=request.form["K"]
key_phrase=request.form["keyPhrase"]
filter_words=request.form["filter"]
if(user_intent=="criticism/others"):
user_intent='misc'
elif(user_intent=="idea/suggestion/advice"):
user_intent="suggestion"
elif(user_intent=="request/asking"):
user_intent="request"
intent=top_intent(url,user_intent,model,contraction,key_phrase,filter_words=filter_words,k=K)
global intent_result
if not videoId in cached_prediction:
pred_df=intent.prediction()
cached_prediction[videoId]=pred_df
intent_result=intent.top_intent(pred_df)
else:
intent_result=intent.top_intent(cached_prediction[videoId])
intent_result=intent_result.loc[:,['comments','commentId']].values
return redirect(url_for("intent_results"))
else:
return render_template("youtube_input.html")
@app.route("/top_intent.html",methods=["POST","GET"])
def intent_results():
if request.method=="POST":
global commentId
commentId=dict(request.form)
commentId=list(commentId.keys())[0]
print(commentId)
return redirect(url_for("reply"))
else:
return render_template("top_intent.html",intent=intent_result)
@app.route("/reply",methods=["POST","GET"])
def reply():
if request.method=="POST":
reply_text=request.form["replyText"]
youtube_request = youtube_comment.comments().insert(
part="snippet",
body={
"snippet": {
"parentId": f"{commentId}",
"textOriginal": f"{reply_text}"
}
}
)
response = youtube_request.execute()
return redirect(url_for("intent_results"))
else:
return render_template("reply.html")
if __name__=="__main__":
app.run(debug=False)
|
986,371 | 58b1bb735826a4192c440bdedaca5f62a7978751 | #!/usr/bin/python3
""" 4-append_write.py """
def append_write(filename="", text=""):
""" appends a string at the end of a text file
(UTF8) and returns the number of characters added:
Args:
filename (str, optional): file to add. Defaults to "".
text (str, optional): str to add to filename. Defaults to "".
Returns:
[int]: number of characters added
"""
with open(filename, 'a', encoding='utf-8') as f:
return f.write(text)
|
986,372 | 9f118f942d63bf862a0439dd57d25ab95c6b465c | from ART2 import ART2
import numpy as np
input_data = np.array([0.8, 0.6])
nn = ART2(n=len(input_data), m=2, rho=0.9, theta=0.1)
nn.start_logging(to_console=True, to_file=False)
nn.learning_trial(idata=input_data)
nn.stop_logging()
|
986,373 | 12be5dafa9832e92ca1edcce3097ee4965d67e39 |
print("__main__.py") |
986,374 | 5706d11374baccfb691504352becd52d21e2f7b1 | import urllib.request
import requests
import PyPDF2
from bs4 import BeautifulSoup
import datetime
url = 'https://www.icsi.edu/JOBOPPORTUNITIES.aspx'
page = urllib.request.urlopen(url).read()
soup = BeautifulSoup(page, "lxml")
job_opportunity_for_members = soup.select(
'#dnn_ctr13100_HtmlModule_lblContent > table > tbody')[0]
job_opportunity_for_members_2_years_above = soup.select(
'#dnn_ctr12264_HtmlModule_lblContent > ul > table > tbody')[0]
job_opportunity_from_public_advt = soup.select(
'#dnn_ctr13079_HtmlModule_lblContent > table > tbody')[0]
jobs = [job_opportunity_for_members]
def download_file(url):
local_filename = '/tmp/' + url.split('/')[-1]
print('Downloading File...', local_filename)
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
print('Downloaded')
return local_filename
def post_to_jobs(raw, tds, pdf_url, d):
url = 'https://forum.csclub.co/posts'
payload = {
"api_key": "ef759d736fc5495f16fd29eef742e822cd1203c81a9e871eef863b3dc10aad2f",
"api_username": "vaibhavmule",
"title": "{} is Looking for Company Secretary in {}".format(
tds[0].text.strip('\n'), tds[1].text.strip('\n')),
"raw": raw,
"category": 5, # jobs category
}
r = requests.post(url, data=payload)
auto_close_on_expiry_date(1107, d)
print('response', r.text, r.status_code)
def auto_close_on_expiry_date(t, d):
url = 'https://forum.csclub.co/t/{}/timer'.format(t)
payload = {
"api_key": "ef759d736fc5495f16fd29eef742e822cd1203c81a9e871eef863b3dc10aad2f",
"api_username": "vaibhavmule",
'time': d.strftime('%Y-%m-%d %H:%M+05:30'),
'status_type': 'close',
}
r = requests.post(url, data=payload)
print('response', r.text, r.status_code)
for job in jobs:
for tr in job.find_all('tr')[1:]:
tds = tr.find_all('td')
try:
d = datetime.datetime.strptime(
' '.join(tds[3].text.split()), '%d-%m-%Y')
except ValueError:
d = datetime.datetime.strptime(
' '.join(tds[3].text.split()), '%d/%m/%Y')
if d > datetime.datetime.now():
pdf_url = tds[0].a['href']
file_name = download_file(pdf_url)
pdfFileObj = open(file_name, 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObj, strict=False)
pageObj = pdfReader.getPage(0)
if pdfReader.numPages == 2:
pageObj1 = pdfReader.getPage(1)
text = pageObj.extractText() + pageObj1.extractText()
else:
text = pageObj.extractText()
text = ' '.join(text.split()).split(':')
try:
raw = '{}\n\n**Job Description:**{}\n\n**Eligibility:**{}\n\n**Requirement:**{}\n\n**Salary Details:**{}\n\n**Job location:**{}\n\n**Apply at:**{}\n\n**Expiry Date of Job Posting:** {}\n\n**Source:** Uploaded on {} at [ICSI]({})"'.format(
text[1].replace(' Job Description', ' ').lstrip(),
text[2].replace(' Eligibility', ' '),
text[3].replace(' Requirement', ' '),
text[4].replace(' Salary Details', ' '),
text[5].replace(' Job location ', ' '),
text[6].replace(' Apply at', ' '),
text[7],
tds[3].text.strip('\n'),
tds[2].text.strip('\n'),
pdf_url)
post_to_jobs(raw, tds, pdf_url, d)
except IndexError:
print('error')
"""
https://forum.csclub.co/t/875/timer
time:2018-03-13 08:39+05:30
status_type:close
"""
|
986,375 | edb66a1cea2afbf08d30c68b070979236e1cb68a |
class Iterateur:
def __init__(self):
pass
def __iter__(self):
pass
return self
def __next__(self):
…
return xx #la valeur suivante
…
raise StopIteration
for i in iterateur(x): |
986,376 | 30d3ab4d6f4c2df09caae68c8ea93e953fd71f69 | """Implementation of ``cookbot`` script."""
from optparse import OptionParser
from settings import ConfigParserReader
from context import Context
class Command(object):
"""Command class."""
def __init__(self):
"""Constructor."""
self.cmd = None # The command to invoke.
self.cmd_args = [] # List of arguments for the command.
self.cfg = None
self.environment = None
self.machine = None
self.component = None
def __call__(self):
"""Make it a callable."""
context = Context()
return self.recipe.execute(context, self.cmd, self.cmd_args)
def parse_shell_args(self, *args, **kwargs):
"""Get configuration from :py:meth:`OptionParser.parse_args`."""
# Defaults.
configuration_file = 'etc/cookbot.cfg'
cmd = None
cmd_args = []
recipe = None
# Create and configure parser.
parser = OptionParser()
# Parse input.
(options, arguments) = parser.parse_args(*args, **kwargs)
# Check options and arguments.
# Load configuration.
with open(configuration_file) as configuration_fp:
recipe = ConfigParserReader(configuration_fp).parse()
# Check command.
if not arguments:
parser.error('Missing command to run.')
cmd = arguments[0]
# Command arguments.
if len(arguments) > 1:
cmd_args = arguments[1:]
# Assign local configuration to self.
self.recipe = recipe
self.cmd = cmd
self.cmd_args = cmd_args
def main():
"""Runs command."""
command = Command()
command.parse_shell_args()
command()
|
986,377 | ffb222ce2d8f87481725c378acc2649dfdbcff5e | from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from helpers import db
app = FastAPI()
origins = ['*']
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_methods=["*"],
allow_headers=["*"]
)
@app.get("/")
async def root():
return { "message": "Hello world" }
@app.get("/trending")
async def trending():
statement = """
SELECT
techs.tech, count(*) count,
tech_definitions.category,
tech_definitions.tech_group,
tech_definitions.variation_of
FROM techs
LEFT JOIN tech_definitions ON lower(techs.tech) = lower(tech_definitions.name)
GROUP BY techs.tech
HAVING count(techs.tech)
ORDER BY count DESC;
"""
results = db.cursor.execute(statement).fetchall()
return { "data": results }
@app.get('/jobdensity')
async def jobdensity():
statement = """
SELECT locations.kommune, count(*) as count FROM locations GROUP BY locations.kommune;
"""
results = db.cursor.execute(statement).fetchall()
return { "data": results }
@app.get('/allTechs')
async def allTags():
statement = """
SELECT tech, count(*) as count FROM techs GROUP BY tech ORDER BY count DESC;
"""
results = db.cursor.execute(statement).fetchall()
return { "data": results }
@app.get('/locations')
async def getLocations():
statement = """
"""
results = db.cursor.execute(statement).fetchall()
return { "data": results } |
986,378 | 296694ad5c10e9323575d0cbd8a79c7beca6a275 | import requests
import json
import time
import threading
from collections import deque
import os
class MatchFetcher:
RANKED_PARAM = "?rankedQueues=TEAM_BUILDER_DRAFT_RANKED_5x5&beginTime="
# These are all of the LoL regions. Deleting from this list and the corresponding
# SUMMONERS_BY_REGION dictionary will reduce the number of servers being queried.
# Current, the furthest regions are commented out as their response times are
# considerably longer. For a full query, add them back in.
REGIONS = {"BR": "br", "EUNE": "eune", "EUW": "euw", "JP": "jp", "KR": "kr",
"LAN": "lan", "LAS": "las", "NA": "na", "OCE": "oce", "RU": "ru", "TR": "tr"}
# Using these summoners to start our queries. They play a lot of ranked Solo Queue.
# The summoners for each region are either pros or high-ranked players, as they
# tend to be quite active.
SUMMONERS_BY_REGION = {"BR": deque(["488302", "522434", "410503", "9480188"]), \
"EUNE": deque(["22536759", "36822759", "35943882", "38723598"]), \
"EUW": deque(["20717177", "22659867", "25532701", "29776827"]), \
"JP": deque(["6160658", "6170777", "6310312", "6181771"]), \
"KR": deque(["4460427", "25291795", "7895259", "5310176"]), \
"LAN": deque(["135857", "139360", "53010", "104671"]), \
"LAS": deque(["135412", "185763", "167310", "110751"]), \
"NA": deque(["19134540", "72749304", "51405", "65009177"]), \
"OCE": deque(["346050", "293100", "432291", "484696"]), \
"RU": deque(["4780139", "5420417", "312366", "483422"]), \
"TR": deque(["2411323", "2024938", "2592438", "1883115"])}
def __init__(self):
self.total_matches = {}
self.api_key = "api_key=" + os.environ['api_key'] #Be sure to set the API_Key as an environment variable before creating a MatchFetcher!
def fetch_matches(self, query_distance, match_number_per_region):
total_query_distance = int((time.time() * 1000) - query_distance)
threads = []
regions = MatchFetcher.REGIONS.keys()
for region in regions:
threads.append(RequestThread(region, region, self.SUMMONERS_BY_REGION[region], total_query_distance, match_number_per_region, self.api_key))
self.total_matches[region] = {}
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.total_matches[thread.region].update(thread.total_matches)
return self.total_matches
class RequestThread(threading.Thread):
def __init__(self, region, threadName, starting_summoners, query_distance, matches_requested, api_key):
threading.Thread.__init__(self)
self.api_key = api_key
self.region = region
self.name = threadName
self.request_tracker = deque([])
self.big_request_tracker = deque([])
self.query_distance = query_distance
self.starting_summoners = starting_summoners
self.total_matches = {}
self.lockout_counter = 0
self.summoners = {}
self.matches_requested = matches_requested
def run(self):
print ("Starting fetcher for " + "{0}" + " region \n").format(self.region)
self.fetcher()
print ("Exiting fetcher for " + "{0}" + " region \n").format(self.region)
def build_regional_url(self, type):
if type == "Matchlist":
extension = "/v2.2/matchlist/by-summoner/"
elif type == "Match":
extension = "/v2.2/match/"
elif type == "Summoner":
extension = "v1.4/summoner/"
return ("https://" + "{0}" + ".api.pvp.net/api/lol/" + "{0}" + "{1}").format(MatchFetcher.REGIONS[self.region], extension)
def track_request(self):
self.request_tracker.append(time.time())
self.big_request_tracker.append(time.time())
def halt_requests(self):
small_api_limit = {"requests": 9, "time": 10} # These numbers are based on your API key
big_api_limit = {"requests": 499, "time": 600} # These numbers are also based on your API key
self.check_helper(self.request_tracker, small_api_limit)
self.check_helper(self.big_request_tracker, big_api_limit)
def check_helper(self, tracker, api_limit):
while len(tracker) > 0 and (time.time() - tracker[0]) > api_limit["time"]:
tracker.popleft()
buffer_time = 0.25
if len(tracker) >= api_limit["requests"]:
time.sleep(api_limit["time"] + buffer_time - (time.time() - tracker[0]))
tracker.popleft()
def extract_match_data(self, match_id, summoner_queue):
regional_url = self.build_regional_url("Match")
self.track_request()
match_data = requests.get(("{0}" + "{1}" + "?" + "{2}").format(regional_url, match_id, self.api_key))
print match_data.status_code
if match_data.status_code == 200:
match_details = json.loads(match_data.text)
self.lockout_counter = 0
self.total_matches[match_id] = match_details
for summoner in match_details["participantIdentities"]:
summoner_queue.append(summoner["player"]["summonerId"])
if len(self.total_matches) == self.matches_requested:
return True
elif match_data.status_code == 429: #break if getting rate limited. Don't want to get blacklisted!
if self.check_for_lockout(match_data) == False:
return True
elif match_data.status_code == 403 or match_data.status_code == 404:
return True #Break if forbidden. This could be blacklisting or an api key error. Also break on bad input
elif match_data.status_code == 500 or match_data.status_code == 503:
self.check_for_lockout(match_data)
return self.extract_match_data(match_id, summoner_queue)
return False
def fetch_matches(self, summoner_id = "none"):
if summoner_id == "none":
summoner_id = self.starting_summoners[0]
regional_url = self.build_regional_url("Matchlist")
request_url = ("{0}" + "{1}" + "{2}" + "{3}" + "&" + "{4}").format(regional_url, summoner_id, MatchFetcher.RANKED_PARAM, self.query_distance, self.api_key)
self.track_request()
matches = requests.get(request_url)
print matches.status_code
if matches.status_code == 200:
self.lockout_counter = 0
self.summoners[summoner_id] = True
return json.loads(matches.text)["matches"]
elif matches.status_code == 429: #break if getting rate limited. Don't want to get blacklisted!
if self.check_for_lockout(matches) == False:
return False
elif matches.status_code == 403 or matches.status_code == 404:
if summoner_id == self.starting_summoners[0]:
return None
else:
return False
elif matches.status_code == 500 or matches.status_code == 503:
self.halt_requests()
self.check_for_lockout(matches)
return self.fetch_matches(summoner_id)
def check_for_lockout(self, response):
if response.status_code == 429:
print response.headers['X-Rate-Limit-Count']
if 'X-Rate-Limit-Type' in response.headers: # check to make sure it's the program exceeding the limit, not something internal
print "Rate limit exceeded"
return False
else: # If it IS internal, give it a sec. Jeez.
if self.lockout_counter < 4:
self.lockout_counter += 1
lockout_chart = {"1": 1, "2": 15, "3": 30, "4": 60}
time.sleep(lockout_chart["{0}".format(self.lockout_counter)])
return True
def fetcher(self):
finished = False
match_queue = self.set_up_match_queue()
summoner_queue = deque([])
if len(match_queue) == 0:
return
while finished == False:
self.halt_requests()
if len(match_queue) > 0:
match_id = match_queue.popleft()
if not match_id in self.total_matches:
finished = self.extract_match_data(match_id, summoner_queue)
else: #only start finding more matches if there are none available.
if len(summoner_queue) > 0:
summoner_id = summoner_queue.popleft()
if not summoner_id in self.summoners:
matches = self.fetch_matches(summoner_id)
if matches == False: #returns out if rate limited
finished = True
else:
for match in matches:
if not match["matchId"] in self.total_matches and match["region"] == self.region:
match_queue.append(match["matchId"])
def set_up_match_queue(self):
match_queue = deque([])
initial_matches = None
counter = 0
while initial_matches is None:
self.halt_requests()
initial_matches = self.fetch_matches()
if initial_matches == False:
return match_queue
self.starting_summoners.append(self.starting_summoners.popleft)
counter += 1
if counter == 4:
print "Tried all summoners"
return match_queue
for match in initial_matches:
if match["region"] == self.region: #This makes sure that if a player transfers, we don't append matches on the wrong servers.
match_queue.append(match["matchId"])
return match_queue
# For testing a single thread
# a = RequestThread("NA", "NA", MatchFetcher.SUMMONERS_BY_REGION["NA"], 1464039831476, 10, API_KEY)
# a.start()
# a.join()
#
# For testing the fetcher class
# a = MatchFetcher()
# results = a.fetch_matches(1209600000, 10) # two weeks ago in milliseconds, finding 10 matches per region
# for region in results.keys():
# print ("Matches for {0}:").format(region)
# print results[region].keys()
|
986,379 | e476b3c6173e080832fd5b4e86d089915221055c | #!/usr/bin/env python3
# encoding: utf-8
import settings
print("You are about to make a Test List. Choose an option.")
def get_input():
inputt = input(f"\nPress {settings.OKGREEN}enter{settings.ENDC}"
f" {settings.OKGREEN}to make a List.{settings.ENDC}\n"
f"Type:\n{settings.OKBLUE}s{settings.ENDC} to "
f"{settings.OKBLUE}change settings{settings.ENDC}\n"
f"{settings.FAIL}x {settings.ENDC}"
f"to{settings.FAIL} quit the application{settings.ENDC}\n"
f"then press{settings.OKGREEN} enter{settings.ENDC}\n")
if inputt == '':
from main import run
if run() is False:
while True:
ans = input(f"You have already produced a list today, "
f"do you want to{settings.OKBLUE} make another list?{settings.ENDC}[y/n]\n")
if ans == "y":
run(anyway=True)
import sys
sys.exit()
elif ans == "n":
get_input()
else:
print(f"{settings.WARNING}GIVE AN ANSWER.{settings.ENDC}")
elif inputt == 's':
settings.change_settings()
get_input()
elif inputt == "x":
import sys
print("run again to apply changes")
sys.exit()
else:
print(f"\n{settings.WARNING}CHOOSE SOMETHING{settings.ENDC}")
get_input()
get_input()
|
986,380 | 40a9374b3649f4f141fe2500266797c050c9269a | import BuildSimHubAPI as bsh_api
import BuildSimHubAPI.postprocess as pp
import os
from sklearn.model_selection import cross_val_predict
from sklearn import linear_model
from plotly.offline import plot
import plotly.graph_objs as go
project_api_key = 'f98aadb3-254f-428d-a321-82a6e4b9424c'
model_api_key = '60952acf-bde2-44fa-9883-a0a78bf9eb56'
bsh = bsh_api.BuildSimHubAPIClient()
results = bsh.parametric_results(project_api_key, model_api_key)
# Collect results
result_dict = results.net_site_eui()
result_unit = results.last_parameter_unit
print(result_dict)
# Plot
param_plot = pp.ParametricPlot(result_dict, result_unit)
df = param_plot.pandas_df()
# Training code starts from here
y = df.loc[:, 'Value']
x = df.loc[:, df.columns != 'Value']
lr = linear_model.LinearRegression()
# default is 3 fold
predicted = cross_val_predict(lr, x, y)
trace1 = go.Scatter(x=y, y=predicted, mode='markers',
marker=dict(size=8,
color='rgb(0, 0, 255)',
line=dict(
width=2,
color='rgb(0, 0, 0)'))
)
trace2 = go.Scatter(x=[y.min(), y.max()], y=[y.min(), y.max()],
line=dict(color=('rgb(0, 0, 0)'),
width=5, dash='dash')
)
layout = go.Layout(showlegend=False,
yaxis=dict(
zeroline=False,
title='Predicted'),
xaxis=dict(
title='Measured', )
)
fig = go.Figure(data=[trace1, trace2], layout=layout)
# layout['annotations'] = annotation
dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
fig = dict(data=[trace1, trace2], layout=layout)
plot(fig, filename=dir + '/' + 'test.html')
|
986,381 | b59cefba669ddf1688b1c7be7b226ee6573d1d49 | from django.apps import AppConfig
class UnitTestAppConfig(AppConfig):
name = 'tests'
verbose_name = "Unit test app"
|
986,382 | 495c447fb9797ca6f9182fcfb6b1227dc97ef7fb | class Device:
apl_support: bool
viewport_profile: bool
def __init__(self, viewport_profile):
if viewport_profile is None:
self._set_default_session_variables()
elif str(viewport_profile) == 'ViewportProfile.HUB_LANDSCAPE_LARGE':
self.apl_support = True
elif str(viewport_profile) == 'ViewportProfile.HUB_LANDSCAPE_MEDIUM':
self.apl_support = True
elif str(viewport_profile) == 'ViewportProfile.HUB_LANDSCAPE_SMALL':
self.apl_support = True
elif str(viewport_profile) == 'ViewportProfile.HUB_ROUND_SMALL':
self.apl_support = True
elif str(viewport_profile) == 'ViewportProfile.TV_LANDSCAPE_XLARGE':
self.apl_support = True
elif str(viewport_profile) == 'ViewportProfile.MOBILE_LANDSCAPE_SMALL':
self.apl_support = True
else:
self.apl_support = False
def _set_default_session_variables(self):
self.viewport_profile = False
|
986,383 | ff8f901fad6379764fb523d2dcc9eeb9c775a3ad | """"""
# Standard library modules.
import sys
import subprocess
# Third party modules.
# Local modules.
import psutil
# Globals and constants variables.
def create_startupinfo():
if sys.platform == "win32":
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
startupinfo = None
return startupinfo
def kill_process(pid):
psprocess = psutil.Process(pid)
for subpsprocess in psprocess.children(recursive=True):
subpsprocess.kill()
psprocess.kill()
|
986,384 | a7161798861759300bfa29e3d432109f3e878407 | from matplotlib import pyplot as pp
from pymongo import MongoClient
import json
import pprint
import numpy as np
import datetime
client = MongoClient()
walmart = list(client["inventory"]["walmart"].find({}))
target = list(client["inventory"]["target"].find({}))
safeway = list(client["inventory"]["safeway"].find({}))
def gendates():
s = datetime.datetime.now().day - 5
client["inventory"]["walmart"].update_many({}, {"$set":{"past":[]}})
client["inventory"]["safeway"].update_many({}, {"$set":{"past":[]}})
client["inventory"]["target"].update_many({}, {"$set":{"past":[]}})
for i in range(5):
client["inventory"]["walmart"].update_many({}, {"$addToSet":{"past":{datetime.datetime(2020, 10, s+i).strftime("%x"):0}}})
client["inventory"]["safeway"].update_many({}, {"$addToSet":{"past":{datetime.datetime(2020, 10, s+i).strftime("%x"):0}}})
client["inventory"]["target"].update_many({}, {"$addToSet":{"past":{datetime.datetime(2020, 10, s+i).strftime("%x"):0}}})
def addDate():
res = findAverage()
for a in list(res.keys()):
for b in list(res[a].keys()):
client["inventory"][a].update({"address":b}, {"$addToSet":{"past":{datetime.datetime.now().strftime("%x"):res[a][b]}}})
def findAverage():
t = {"walmart":walmart, "safeway":safeway, "target":target}
#t = {"target":target}
res ={}
for a in list(t.keys()):
b = t[a]
temp = {}
for i in b:
ttl = 0
for x in list(i.keys()):
if x == "address" or x == "_id" or x == "past":
continue
try:
ttl = ttl + i[x]["score"]
except:
print i[x]
print x
print a
print i[x]["score"]
pass
temp[i["address"]] = ttl/17
res[a] = temp
return res
# def makeTrendGraph():
# #type of goods : time vs score overall
# #type of goods : time vs score by stores
# #type of goods vs score overall
def makeStoreGraph():
#time vs total score
#item vs item score
stores = ["walmart", "safeway", "target"]
for i in stores:
address = list(client["inventory"][i].find({}))
for v in address:
addresz = v["address"]
print addresz.replace(" ", "_")
print v["_id"]
#print(v["address"])
#print(v.values()[0]["score"])
#print len(v["past"])
#print ([a.keys()[0] for a in v["past"][(len(v["past"])-5):]])
pp.rcParams["figure.figsize"] = (7,3)
fig, axe = pp.subplots(1,2)
#print axe
xticks = [a.keys()[0] for a in v["past"][(len(v["past"])-5):]]
axe[0].plot(range(5), [a.values()[0] for a in v["past"][(len(v["past"])-5):]])
axe[0].set_xticks(range(5))
axe[0].set_xticklabels(xticks, rotation = 90)
axe[0].title.set_text("historic trend")
axe[0].tick_params(axis='both', which='minor', labelsize=8)
del(v["past"])
del(v["address"])
del(v["_id"])
#pprint.pprint([x["score"] for x in v.values()])
#pprint.pprint([v.keys()])
axe[1].bar(range(len(v.keys())), [x["score"] for x in v.values()])
axe[1].set_xticks(range(len(v.keys())))
xticks = v.keys()
axe[1].set_xticklabels(xticks, rotation = 90)
axe[1].title.set_text("availability of goods")
# axe[1].tick_params(axis='both', which='minor', labelsize=8)
a1 = axe[1].twinx()
a1.plot(range(len(v.keys())), [10 for z in range(len(v.keys()))], "r")
pp.tight_layout()
fig.savefig("../../FlaskApp/static/"+i+"/"+addresz.replace(" ", "_")+".png", bbox_inches="tight")
pp.close()
def graph_load():
makeStoreGraph()
# pprint.pprint(average_finder())
#what data do people want to see?
#perhaps ... change in supply for different stores over time
#IDEAS:
#Average score of different things across different stores - bar /
#Average price of different things across different stores - bar
#
#Location Analysis?(?)
#supply over time trend - plot
#price over time trend - plot
#
#supply over time based on county- bar
#supply over time based on shop type - bar
#supply over time based on over general - bar
#
#to simplify process, everything will happen at day-day interval
#
#persistent storage of score across different days
#schema: date - {address:address, old scores : {date:time}} |
986,385 | 781e04e07f57e58d3a9cc34417571680001724ca | from adj_stf.config.model_args import T5Args
from adj_stf.t5.t5_model import T5Model
|
986,386 | 23c5c749d46cb22d2e06c4bf536cbb860c0840c5 | # -*- coding: UTF-8 -*-
'''
Created on 2017年1月4日
@author: superhy
'''
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from numpy import float64
import matplotlib.pyplot as plt
import numpy as np
from interface import fileProcess
from matplotlib.pyplot import subplot
def transTrainingHistStrIntoList(histStr):
histStr = histStr[histStr.find('[') + 1 : histStr.find(']')]
histTupleStrs = list(tuple[tuple.find('(') + 1 : len(tuple) if tuple.find(')') == -1 else tuple.find(')')] for tuple in histStr.split('),'))
hist1 = []
print('load history...')
for i in range(len(histTupleStrs)):
histEleTuple = tuple(float64(ele) for ele in histTupleStrs[i].split(', '))
hist1.append(histEleTuple)
print('{0} iterate: {1}'.format(i + 1, histEleTuple))
return hist1
def loadHistFileData(histPath):
fr = open(histPath, 'r')
lines = fr.readlines()
fr.close()
histStr = lines[0]
resStr = lines[1]
hist1 = transTrainingHistStrIntoList(histStr)
resElesStr = resStr[resStr.find('[') + 1 : resStr.find(']')].split(', ')
res1 = [float64(resElesStr[0]), float64(resElesStr[1])]
return hist1, res1
'''
plot function
'''
def plotLine(hist1, hist2, res1, res2):
p_va1 = list(e[3] * 100 for e in hist1)
p_va2 = list(e[3] * 100 for e in hist2)
x = list(i + 1 for i in range(150))
print(p_va1)
print(p_va2)
# print(p_a2)
# plt.title('Attention vs Basic BiDirtGRU val_acc')
plt.xlabel('iteration')
plt.ylabel('acc(%)')
plt.plot(x, p_va1, 'r', label='bidirtgru_val_acc')
plt.plot(x, p_va2, 'b', label='attbidirtgru_val_acc')
plt.axhline(y=res1[1] * 100, color='r', linestyle=':', linewidth=1)
plt.axhline(y=res2[1] * 100, color='b', linestyle=':', linewidth=1)
plt.show()
def plotMutilLines(histList, resList):
p_va = []
for hist in histList:
p_va.append(list(e[3] * 100 for e in hist))
x = list(i + 1 for i in range(150))
fontsize = 14
plt.xlabel('iteration', fontsize=fontsize)
plt.ylabel('accuracy(%)', fontsize=fontsize)
plt.xlim(-0.5, 150.5)
color = ['r', 'b', 'm', 'g', 'c']
type = ['-', '-', '--', '-', '--']
# color = ['r', 'b', 'g', 'g']
# type = ['-', '-', '--', '--']
label = ['basic', 'bidecay', 'bicopy', 'unidecay', 'unicopy']
for i in range(len(histList)):
plt.plot(x, p_va[i], color=color[i], linestyle=type[i], linewidth=1.5, label=label[i]) # 曲线
plt.axhline(y=resList[i][1] * 100, color=color[i], linestyle=type[i], linewidth=1) # 横线
# show legend
plt.legend(loc='best', numpoints=1)
leg = plt.gca().get_legend()
ltext = leg.get_texts()
plt.setp(ltext, fontsize=fontsize)
# show coordinate line
plt.grid(True)
plt.show()
def showExpDecay(n_0=1.0, ld=1.0):
xminorLocator = MultipleLocator(0.1) #将x轴次刻度标签设置为0.1的倍数
yminorLocator = MultipleLocator(0.04) #将此y轴次刻度标签设置为0.04的倍数
fontsize = 14
x = np.linspace(0.0, 5.0, endpoint=True)
y_0 = n_0 * 1.0 * np.e ** (-ld * x * 1.0 / 25)
y_1 = n_0 * 1.0 * np.e ** (-ld * x * 1.0 / 5)
y_2 = n_0 * 1.0 * np.e ** (-ld * x * 1.0 / 1)
y_3 = n_0 * 1.0 * np.e ** (-ld * x * 1.0 * 5)
y_4 = n_0 * 1.0 * np.e ** (-ld * x * 1.0 * 25)
plt.xlabel(r'$t$', fontsize=fontsize)
plt.ylabel(r'$N(t)$', fontsize=fontsize)
# plt.xlim(-0.01, 5.01)
ax = subplot(111)
# labelTex_0 = r'$N_0 e^{-\frac{1}{25} t}$'
# labelTex_1 = r'$N_0 e^{-\frac{1}{5} t}$'
labelTex_0 = r'$N_0 e^{-t / 25}$'
labelTex_1 = r'$N_0 e^{-t / 5}$'
labelTex_2 = r'$N_0 e^{- t}$'
labelTex_3 = r'$N_0 e^{-5 t}$'
labelTex_4 = r'$N_0 e^{-25 t}$'
plt.plot(x,y_0,color = 'r',linestyle = '-', linewidth = 1.0, label=labelTex_0)
plt.plot(x,y_1,color = 'g',linestyle = '-', linewidth = 1.0, label=labelTex_1)
plt.plot(x,y_2,color = 'b',linestyle = '-', linewidth = 1.0, label=labelTex_2)
plt.plot(x,y_3,color = 'c',linestyle = '-', linewidth = 1.0, label=labelTex_3)
plt.plot(x,y_4,color = 'm',linestyle = '-', linewidth = 1.0, label=labelTex_4)
# show legend
plt.legend(loc='best', numpoints=1)
leg = plt.gca().get_legend()
ltext = leg.get_texts()
plt.setp(ltext, fontsize=fontsize)
ax.xaxis.set_minor_locator(xminorLocator)
ax.yaxis.set_minor_locator(yminorLocator)
# show coordinate line
plt.grid(True)
plt.show()
if __name__ == '__main__':
# path1 = '/home/superhy/文档/experiment/2017.1.7/2500vs/basic/RES_BiDirtGRU_Net_mat0_data2-1000.txt'
# path2 = '/home/superhy/文档/experiment/2017.1.7/2500vs/att/RES_BiDirtGRU_Net_mat1_data2-1000.txt'
# path3 = '/home/superhy/文档/experiment/2017.1.7/2500vs/att bicopy/RES_BiDirtGRU_Net_mat1_data2-1000.txt'
# path4 = '/home/superhy/文档/experiment/2017.1.7/2500vs/att unidecay/RES_BiDirtGRU_Net_mat1_data2-1000.txt'
# path5 = '/home/superhy/文档/experiment/2017.1.7/2500vs/att unicopy/RES_BiDirtGRU_Net_mat1_data2-1000.txt'
# path1 = '/home/superhy/文档/experiment/2017.1.7/2500vs/basic/RES_BiDirtLSTM_Net_mat0_data2-1000.txt'
# path2 = '/home/superhy/文档/experiment/2017.1.7/2500vs/att/RES_BiDirtLSTM_Net_mat1_data2-1000.txt'
# path3 = '/home/superhy/文档/experiment/2017.1.7/2500vs/att bicopy/RES_BiDirtLSTM_Net_mat1_data2-1000.txt'
# path4 = '/home/superhy/文档/experiment/2017.1.7/2500vs/att unidecay/RES_BiDirtLSTM_Net_mat1_data2-1000.txt'
# path5 = '/home/superhy/文档/experiment/2017.1.7/2500vs/att unicopy/RES_BiDirtLSTM_Net_mat1_data2-1000.txt'
network = {'cnns' : 'CNNs', 'gru' : 'GRU', 'lstm' : 'LSTM', 'bigru' : 'BiDirtGRU', 'bilstm' : 'BiDirtLSTM'}
data = [(2500, 1000), (3500, 3500), (5000, 5000)]
net = network['lstm']
pick = data[0]
path1 = 'D:/intent-exp/2017.1.7/{0}vs/basic/RES_{1}_Net_mat0_data2-{2}.txt'.format(pick[0], net, pick[1])
path2 = 'D:/intent-exp/2017.1.7/{0}vs/att/RES_{1}_Net_mat1_data2-{2}.txt'.format(pick[0], net, pick[1])
path3 = 'D:/intent-exp/2017.1.7/{0}vs/att bicopy/RES_{1}_Net_mat1_data2-{2}.txt'.format(pick[0], net, pick[1])
path4 = 'D:/intent-exp/2017.1.7/{0}vs/att unidecay/RES_{1}_Net_mat1_data2-{2}.txt'.format(pick[0], net, pick[1])
path5 = 'D:/intent-exp/2017.1.7/{0}vs/att unicopy/RES_{1}_Net_mat1_data2-{2}.txt'.format(pick[0], net, pick[1])
hist1, res1 = loadHistFileData(path1)
hist2, res2 = loadHistFileData(path2)
hist3, res3 = loadHistFileData(path3)
hist4, res4 = loadHistFileData(path4)
hist5, res5 = loadHistFileData(path5)
# plotLine(hist1, hist2, res1, res2)
# plotMutilLines([hist1, hist2, hist3, hist4, hist5], [res1, res2, res3, res4, res5])
showExpDecay(n_0=1.0, ld=1)
|
986,387 | 2aacbcbe31bdb9000bb0d097687c656f28db6517 | """440. Backpack III
"""
class Solution:
"""
@param A: an integer array
@param V: an integer array
@param m: An integer
@return: an array
"""
def backPackIII(self, A, V, m):
# write your code here
## Practice:
n = len(A)
dp = [[float('-inf')] * (m + 1) for _ in range(n + 1)]
dp[0][0] = 0
for i in range(1, n + 1):
for j in range(m + 1):
if j < A[i - 1]:
dp[i][j] = dp[i - 1][j]
else:
dp[i][j] = max(dp[i - 1][j], dp[i][j - A[i - 1]] + V[i - 1])
return max(dp[-1])
####
n = len(A)
dp = [[float('-inf')] * (m + 1) for _ in range(n + 1)]
dp[0][0] = 0
for i in range(1, n + 1):
for j in range(m + 1):
if j < A[i - 1]:
dp[i][j] = dp[i - 1][j]
else:
dp[i][j] = max(dp[i - 1][j], dp[i][j - A[i - 1]] + V[i -1])
return max(dp[n])
|
986,388 | 15fc1f68f0cbfee2d768393e7b57f994e7eab3de | ##
## EPITECH PROJECT, 2021
## B-MAT-200-PAR-2-1-110borwein-jean-baptiste.debize
## File description:
## calc_fonc
##
from math import sin
from math import pi
def fonc_eval(n, x):
result = 1
if x == 0:
return 1
for k in range(n + 1):
result *= sin(x / (2 * k + 1)) / (x / (2 * k + 1))
return result
def midpoint(n):
b = 5000
result = 0
dec = 0.5
for i in range(int(b / dec)):
result += fonc_eval(n, i * dec + 0.25) / 2
print("Midpoint:")
print("I%d = %.10f" %(n, result))
print("diff = %.10f" %(abs(result - pi/2)))
def trapezoids(n):
b = 5000
dec = 0.5
result = 0
for i in range(int(b / dec)):
result += fonc_eval(n, i * dec) + fonc_eval(n, (i + 1) * dec)
result *= dec / 2
print("Trapezoidal:")
print("I%d = %.10f" %(n, result))
print("diff = %.10f" %(abs(result - pi / 2)))
def simpson(n):
b = 5000
result = 0
dec = 0.5
for i in range(int(b / dec)):
a = i * dec
b = i * dec + 0.5
result += ((b - a) / 6) * (fonc_eval(n, a) + 4 * fonc_eval(n, (a + b) / 2) + fonc_eval(n , b))
print("Simpson:")
print("I%d = %.10f" % (n, result))
print("diff = %.10f" % (abs(result - pi / 2)))
|
986,389 | e5c1c634f4057b981b6ab49304570961aced40d6 | """
Created by Juraj Lahvička, xlahvi00
"""
from definitions import *
first = {}
def init_first():
for non_terminal in non_terminals:
first[non_terminal] = []
def check_first(non_terminal, line):
split_line = line.split(' ')
if terminals.__contains__(split_line[0].strip()):
first[non_terminal] += [split_line[0]]
elif non_terminals.__contains__(split_line[0]):
first[non_terminal] += first[split_line[0]]
def _get_first():
rules_dict = get_rules_set_dict()
for non_terminal in non_terminals:
for gram_line in rules_dict[non_terminal]:
check_first(non_terminal, gram_line)
def remove_duplicates():
for key in first:
first[key] = list(dict.fromkeys(first[key]))
def get_first():
_get_first()
_get_first()
_get_first()
_get_first()
remove_duplicates()
|
986,390 | f7fe9c73fea1d64d80fd92d6b9c03ee0787bd56e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Find and delete Movies that have been watched by a list of users.
Deletion is prompted
"""
from __future__ import print_function
from __future__ import unicode_literals
from builtins import input
from builtins import object
import requests
import sys
import os
import shutil
# ## EDIT THESE SETTINGS ##
TAUTULLI_APIKEY = 'xxxxxxxx' # Your Tautulli API key
TAUTULLI_URL = 'http://localhost:8181/' # Your Tautulli URL
LIBRARY_NAMES = ['My Movies'] # Whatever your movie libraries are called.
USER_LST = ['Joe', 'Alex'] # Name of users
class UserHIS(object):
def __init__(self, data=None):
d = data or {}
self.rating_key = d['rating_key']
class METAINFO(object):
def __init__(self, data=None):
d = data or {}
self.title = d['title']
media_info = d['media_info'][0]
parts = media_info['parts'][0]
self.file = parts['file']
def get_metadata(rating_key):
# Get the metadata for a media item.
payload = {'apikey': TAUTULLI_APIKEY,
'rating_key': rating_key,
'cmd': 'get_metadata',
'media_info': True}
try:
r = requests.get(TAUTULLI_URL.rstrip('/') + '/api/v2', params=payload)
response = r.json()
res_data = response['response']['data']
if res_data['library_name'] in LIBRARY_NAMES:
return METAINFO(data=res_data)
except Exception as e:
sys.stderr.write("Tautulli API 'get_metadata' request failed: {0}.".format(e))
pass
def get_history(user, start, length):
# Get the Tautulli history.
payload = {'apikey': TAUTULLI_APIKEY,
'cmd': 'get_history',
'user': user,
'media_type': 'movie',
'start': start,
'length': length}
try:
r = requests.get(TAUTULLI_URL.rstrip('/') + '/api/v2', params=payload)
response = r.json()
res_data = response['response']['data']['data']
return [UserHIS(data=d) for d in res_data if d['watched_status'] == 1]
except Exception as e:
sys.stderr.write("Tautulli API 'get_history' request failed: {0}.".format(e))
def delete_files(tmp_lst):
del_file = input('Delete all watched files? (yes/no)').lower()
if del_file.startswith('y'):
for x in tmp_lst:
print("Removing {}".format(os.path.dirname(x)))
shutil.rmtree(os.path.dirname(x))
else:
print('Ok. doing nothing.')
movie_dict = {}
movie_lst = []
delete_lst = []
count = 25
for user in USER_LST:
start = 0
while True:
# Getting all watched history for listed users
history = get_history(user, start, count)
try:
if all([history]):
start += count
for h in history:
# Getting metadata of what was watched
movies = get_metadata(h.rating_key)
if not any(d['title'] == movies.title for d in movie_lst):
movie_dict = {
'title': movies.title,
'file': movies.file,
'watched_by': [user]
}
movie_lst.append(movie_dict)
else:
for d in movie_lst:
if d['title'] == movies.title:
d['watched_by'].append(user)
continue
elif not all([history]):
break
start += count
except Exception as e:
print(e)
pass
for movie_dict in movie_lst:
if set(USER_LST) == set(movie_dict['watched_by']):
print(u"{} has been watched by {}".format(movie_dict['title'], " & ".join(USER_LST)))
delete_lst.append(movie_dict['file'])
delete_files(delete_lst)
|
986,391 | fcea5f66c57924a9b1e2762a1240769aba94371d | size = int(input("Enter the no of items you want to add in Dictonary: "))
diction = dict()
for i in range(size):
key = input("Enter the key for item " + str(i + 1) + " in dictonary: ")
value = int(input("Enter the value for item " + str(i + 1) + " in dictonary: "))
diction[key] = value
print("The second largest value in the Dictonary is", list(sorted(diction.values()))[-2])
|
986,392 | f57ef34d896c7b832a94cca87f91ab425144a7d7 | """
归并排序
"""
def merge(left, right):
"""
将左右两个列表进行比较添加
:param left:
:param right:
:return:
"""
result = []
while left and right:
if left[0] < right[0]:
result.append(left.pop(0))
else:
result.append(right.pop(0))
if len(left) > 0:
result.extend(left)
else:
result.extend(right)
print(result)
return result
def mergeSort(arr):
"""
将列表分成最小的每个为1
:param arr:
:return:
"""
if len(arr) < 2:
return arr
middle = len(arr) // 2
left, right = arr[:middle], arr[middle:]
return merge(mergeSort(left), mergeSort(right))
lst = [111, 333, 555, 666, 77, 777, 1, 11, 4, 6, 7, 9]
print(mergeSort(lst))
|
986,393 | a2415072fb341976dbf516a3eeb1a3936440ffb1 | import pandas as pd
import numpy as np
from pandas import DataFrame
from scipy.stats import spearmanr
from texttable import Texttable
from CNN_processing import get_ref_and_cand
from bleu import BLEU
df = pd.read_csv('../data/demo.tsv', sep='\t')
n_gram = 4
candidate_corpus, reference_corpus, human_scores = get_ref_and_cand(df)
# print(reference_corpus[0:10])
# print(candidate_corpus[0:10])
bleu_scores = []
for i in range(len(candidate_corpus)):
bleu = BLEU(reference_corpus[i], candidate_corpus[i], n_gram)
# print(bleu)
bleu_scores.append(bleu)
# print(max(bleu_scores))
# print(bleu_scores[0:500])
scorr = spearmanr(human_scores, bleu_scores)
corr = "{0:6.3f}".format(scorr.correlation)
if (scorr.pvalue >= 0.001):
pval = "{0:6.3f}".format(scorr.pvalue)
else:
pval = "{0:10.3e}".format(scorr.pvalue)
correlations = [["Method", "Correlation", "P value"]]
correlations.append(['BLEU', corr, pval])
#print("\nCorrelations from BLEU metric\n")
t = Texttable()
t.add_rows(correlations)
print(t.draw())
# filename = "../results/CNN_DailyMail_results/BLEU_scores_CNN.txt"
# with open(filename, 'w') as f:
# f.write("candidate_id\tsimilarity\tscore\n")
# for i in range(len(candidate_corpus)):
# f.write("{0}\t{1}\t{2}\n".format(i+1, bleu_scores[i], human_scores[i]))
|
986,394 | 331d7783f49328eaccaad5fffea5b411b8616584 | import _thread
import debugging
from machine import Pin
from time import sleep_ms
from maintenance import maint
class LEDs(object):
# These must be hard-coded to prevent a recursion issue where
# config_class.py cannot load the config file and throws an error.
# TODO Do I need this now? I don't think I'm running any err in
# config_class.py. But I am pretty sure I will be soon. And/or maybe I
# don't need it now that it's in a separate file?
good = Pin('P10', mode = Pin.OUT)
warn = Pin('P11', mode = Pin.OUT)
err = Pin('P12', mode = Pin.OUT)
# TODO Is this needed?
def __init__(self, debug = False, debug_level = 0):
debugging.enabled = debug
debugging.default_level = debug_level
self.debug = debugging.printmsg
self.debug("Initialization complete", level = 1)
self.default = {'good': False, 'warn': False, 'err': False}
def run_default(self):
'''Runs the default LED'''
self.debug("self.default: '" + str(self.default) + "'", level = 1)
for LED_name, value in self.default.items():
if LED_name is 'good':
self.good(value)
elif LED_name is 'warn':
self.warn(value)
elif LED_name is 'err':
self.err(value)
def LED(self, LED_name = None, default = False):
'''Shine the LED we specify. If we set a default this is what shines
even when we turn off the LED. That way for example if we are in a
warning state and we run another LED, we want to return to a warning
state when that is done. Calling default = True will set the last
called LED as the default.
'''
if not LED_name or LED_name is 'default':
if default:
self.default = {'good': False, 'warn': False, 'err': False}
self.run_default()
return
if LED_name is 'good':
self.good(True)
self.warn(False)
self.err(False)
elif LED_name is 'warn':
self.good(False)
self.warn(True)
self.err(False)
elif LED_name is 'err':
self.good(False)
self.warn(False)
self.err(True)
if default:
for this_LED in self.default:
if this_LED is LED_name:
self.default[this_LED] = True
else:
self.default[this_LED] = False
self.debug("self.default: '" + str(self.default) + "'", level = 1)
def blink(self, run = True, pattern = None, default = False, id = None):
'''Blink the LEDs on a pattern.
NOT CURRENTLY WORKING. Use standard LEDs off and on (no blink) for now.
Takes a run command and a pattern, which is a list of tuples.
The pattern is which LED (self.good, self.warn, or self.err) followed
by whether to turn it on (True) or off (False) followed by the delay in
number of milliseconds to run that LED. If delay is None, it is always
on.
This example will flash the warn and error LEDs every 500 milliseconds:
blink(run = True, pattern = [
(self.warn, True, 500),
(self.warn, False, 0),
(self.err, True, 500),
(self.err, False, 0)])
This example will start the warn LED and leave it on indefinitely:
blink(run = True, pattern = [(self.warn, True, None)])
This example will start the good LED for 300 milliseconds then off for
1700 milliseconds and set it as the default:
blink(run = True, pattern = [
(self.good, True, 300),
(self.good, False, 1700)],
default = True)
This example will stop any currently-running pattern and return to the
default.
blink(run = False)
Only one pattern can run at a time. Any pattern currently running is
stopped when a new one is called.
The default argument, if True, is the pattern displayed when the
currently-running pattern gets stopped. So if we are in a warning state
and flashing yellow and it is the default, if another pattern such as
red/yellow is initiated, when that pattern stops it returns back to the
default. Calling blink() multiple times with default = True will set
the last called pattern as the default.
'''
maint()
if not default:
self.default_pattern = None
else:
self.default_pattern = pattern
if run:
self.debug("Running the _blink thread", level = 1)
# Multithreading so we can get back to the next step in our process
_thread.start_new_thread(self._blink, (True, pattern, id))
else:
self.debug("Stopping the _blink thread and returning to default",
level = 1)
global _run
_run = False
_thread.start_new_thread(self._blink, (True, self.default_pattern))
def _blink(self, run, pattern, id = 0):
'''The actual blink process.
Don't run this directly, use blink() instead.
'''
self.debug("id: " + str(id), level = 1)
self.debug("pattern: '" + str(pattern) + "'", level = 1)
# TODO A kludge until Pycom fixes _thread.exit() from outside the
# thread
global _run
# Stop anything that's currently running
_run = False
_run = run
if not pattern:
return
# TODO What other internal variables do we use elsewhere that are not
# prepended with underscore (private)?
while _run:
self.debug("Begin of the while loop, id: " + str(id), level = 0)
for LED, state, delay in pattern:
self.debug("LED: '" + str(LED) + "'", level = 2)
self.debug("state: '" + str(state) + "'", level = 2)
self.debug("delay: '" + str(delay) + "'", level = 2)
if debugging.enabled and debugging.default_level > 0:
sleep_ms(1000)
maint()
self.debug("Before setting, LED is '" + str(LED.value()) + "'",
level = 2)
LED(state)
self.debug("Now LED is set to '" + str(LED.value()) + "'",
level = 2)
if not delay or delay < 1:
# Always have a little bit of delay. Don't want this to
# hammer our little system. 1ms should be imperceptible.
delay = 1
self.debug("Now delay is set to '" + str(delay) + "'",
level = 0)
# TODO Is it better maybe to setup a timer and callback?
for i in range(delay):
self.debug("Delay count: " + str(i), level = 3)
if not _run:
# TODO Fails to reach here
self.debug("_thread.exit()")
_thread.exit()
sleep_ms(1)
maint()
# End of class LEDs(object)
leds = LEDs() |
986,395 | 768dd4b4a9799600a6da33c8925d8d9ebd5aa16f | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-17 14:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("photography", "0002_auto_20160114_1613"),
]
operations = [
migrations.RenameField(model_name="photograph", old_name="title", new_name="alt_text",),
migrations.RemoveField(model_name="photograph", name="description",),
]
|
986,396 | 9c1a7528213ac3e9ffe85f20f8ddc2b99d59eee3 | # from __future__ import print_function
import sys, datetime
from multiprocessing import Pool
from clone_simulation_hpc2hpc import clone_simulation_hpc2hpc
from COMPS import Client
from COMPS.Data import Experiment, SimulationFile, QueryCriteria, Configuration, Simulation
from COMPS.Data.Simulation import SimulationState
import pdb
compshost = 'https://comps.idmod.org'
def should_rerun_sim_custom(s):
"""
Method to validate that this sim should be rerun (failed, is missing some
expected output files, etc...
"""
print(s.id)
if s.state == SimulationState.Failed or s.state==SimulationState.Canceled or s.state==SimulationState.CancelRequested:
print("found failed or canceled sim to rerun")
return True
try:
Client.auth_manager()
except:
Client.login(compshost)
fi = s.retrieve_output_file_info(None)
if not any(filter(lambda x: x.path_from_root == "output" and x.friendly_name.startswith("MalariaSummaryReport_"), fi)):
print("found sim to rerun")
return True
return False
def should_rerun_sim(s):
if s.state == SimulationState.Failed or s.state==SimulationState.Canceled or s.state==SimulationState.CancelRequested:
return True
else:
return False
if __name__ == "__main__":
if len(sys.argv) != 2:
print('\r\nUsage:\r\n\t{0} (id-of-exp)'.format(sys.argv[0]))
exit()
Client.login(compshost)
exp_id = sys.argv[1]
exp = Experiment.get(exp_id)
sims = exp.get_simulations()
import pdb
# pdb.set_trace()
print(datetime.datetime.now())
with Pool() as p:
results = p.map(should_rerun_sim_custom, sims)
sims_to_rerun = [ sims[i] for i in filter(lambda x: results[x] == True, range(len(results))) ]
print(datetime.datetime.now())
if len(sims_to_rerun) == 0:
print("Found no sims to rerun")
exit(0)
print("Found {} sims to rerun".format(len(sims_to_rerun)))
expid = None
for fs in sims_to_rerun:
new_sim_id = clone_simulation_hpc2hpc(fs.id, expid)
if expid == None:
new_sim = Simulation.get(new_sim_id)
expid = new_sim.experiment_id
print("Recommissioning experiment")
if exp.id == expid:
exp.commission()
# (my) existing exp... retag the sims I'm rerunning so I can delete them later
for fs in sims_to_rerun:
fs.merge_tags({'ClonedToRerun': None})
else:
exp2 = Experiment.get(expid)
exp2.commission()
print("Done") |
986,397 | e6e9e95c41dcb5a0e59be5247c83162e572cee01 | import random
def get_randomized_9_hands():
deck = shuffle_a_deck_of_cards()
hands, deck = deal_two_cards_to_each(deck)
return hands, deck
def shuffle_a_deck_of_cards():
deck = []
for i in range(2, 15):
for j in range(1,5):
deck.append([i, j])
random.shuffle(deck)
return deck
def deal_two_cards_to_each(deck):
"""
hands is 3 elements. First the two hands including suits. Then the hand without suits. Then whether it is suited
"""
hands = [deck[0:2] + [sorted([deck[0][0], deck[1][0]], reverse=True)] + [deck[0][1] == deck[1][1]],
deck[2:4] + [sorted([deck[2][0], deck[3][0]], reverse=True)] + [deck[2][1] == deck[3][1]],
deck[4:6] + [sorted([deck[4][0], deck[5][0]], reverse=True)] + [deck[4][1] == deck[5][1]],
deck[6:8] + [sorted([deck[6][0], deck[7][0]], reverse=True)] + [deck[6][1] == deck[7][1]],
deck[8:10] + [sorted([deck[8][0], deck[9][0]], reverse=True)] + [deck[8][1] == deck[9][1]],
deck[10:12] + [sorted([deck[10][0], deck[11][0]], reverse=True)] + [deck[10][1] == deck[11][1]]]
deck = deck[12:]
return hands, deck
|
986,398 | ac2e5ad8a8667ca5d1e49f009092c6dcd4671973 | from .base import BaseModel
import numpy as np
import scipy as sp
import GPy
from GPy.util.linalg import pdinv, dpotrs
from scipy.optimize import fmin_l_bfgs_b
class GPModel(BaseModel):
"""
Modified based on the GP model in GPyOpt.
:param kernel: GPy kernel to use in the GP model.
:param noise_var: value of the noise variance if known.
:param exact_feval: whether noiseless evaluations are available. IMPORTANT to make the optimization work well in noiseless scenarios (default, False).
:param optimizer: optimizer of the model. Check GPy for details.
:param max_iters: maximum number of iterations used to optimize the parameters of the model.
:param optimize_restarts: number of restarts in the optimization.
:param sparse: whether to use a sparse GP (default, False). This is useful when many observations are available.
:param num_inducing: number of inducing points if a sparse GP is used.
:param verbose: print out the model messages (default, False).
:param ARD: whether ARD is used in the kernel (default, False).
.. Note:: This model does Maximum likelihood estimation of the hyper-parameters.
"""
analytical_gradient_prediction = True # --- Needed in all models to check is the gradients of acquisitions are computable.
def __init__(self, kernel=None, noise_var=None, exact_feval=False, optimizer='bfgs',
max_iters=1000, optimize_restarts=5,sparse = False, num_inducing = 10,
verbose=False, ARD=False, seed=42):
self.kernel = kernel
self.noise_var = noise_var
self.exact_feval = exact_feval
self.optimize_restarts = optimize_restarts
self.optimizer = optimizer
self.max_iters = max_iters
self.verbose = verbose
self.sparse = sparse
self.num_inducing = num_inducing
self.model = None
self.ARD = ARD
self.seed = seed
np.random.rand(self.seed)
def _create_model(self, X, Y):
"""
Creates the model given some input data X and Y.
"""
# --- define kernel
self.input_dim = X.shape[1]
if self.kernel is None:
kern = GPy.kern.Matern52(self.input_dim, variance=1., ARD=self.ARD) #+ GPy.kern.Bias(self.input_dim)
else:
kern = self.kernel
self.kernel = None
# --- define model
noise_var = Y.var()*0.01 if self.noise_var is None else self.noise_var
if not self.sparse:
self.model = GPy.models.GPRegression(X, Y, kernel=kern, noise_var=noise_var)
else:
self.model = GPy.models.SparseGPRegression(X, Y, kernel=kern, num_inducing=self.num_inducing)
# --- restrict variance if exact evaluations of the objective
if self.exact_feval:
self.model.Gaussian_noise.constrain_fixed(1e-6, warning=False)
else:
# --- We make sure we do not get ridiculously small residual noise variance
self.model.Gaussian_noise.constrain_bounded(1e-9, 1e6, warning=False) #constrain_positive(warning=False)
def _update_model(self, X_all, Y_all):
"""
Updates the model with new observations.
"""
if self.model is None:
self._create_model(X_all, Y_all)
else:
self.model.set_XY(X_all, Y_all)
# WARNING: Even if self.max_iters=0, the hyperparameters are bit modified...
if self.max_iters > 0:
# --- update the model maximizing the marginal likelihood.
if self.optimize_restarts==1:
self.model.optimize(optimizer=self.optimizer, max_iters = self.max_iters, messages=False, ipython_notebook=False)
else:
self.model.optimize_restarts(num_restarts=self.optimize_restarts, optimizer=self.optimizer, max_iters = self.max_iters, verbose=self.verbose)
def predict(self, X):
"""
Predictions with the model. Returns posterior means and standard deviations at X. Note that this is different in GPy where the variances are given.
"""
if X.ndim==1: X = X[None,:]
m, v = self.model.predict(X)
v = np.clip(v, 1e-10, np.inf)
return m, np.sqrt(v)
def predict_full(self, X):
"""
Predictions with the model using the full covariance matrix
"""
mu, cov = self.model.predict(X, full_cov=True)
return mu, cov
def get_fmin(self):
"""
Returns the location where the posterior mean is takes its minimal value.
"""
return self.model.predict(self.model.X)[0].min()
def predict_withGradients(self, X):
"""
Returns the mean, standard deviation, mean gradient and standard deviation gradient at X.
"""
if X.ndim==1: X = X[None,:]
m, v = self.model.predict(X)
v = np.clip(v, 1e-10, np.inf)
dmdx, dvdx = self.model.predictive_gradients(X)
dmdx = dmdx[:,:,0]
dsdx = dvdx / (2*np.sqrt(v))
return m, np.sqrt(v), dmdx, dsdx
|
986,399 | 8920524e92917641ce0ca919d48a28a7fa620be6 | # -*- coding: utf-8 -*-
import csv
from prepaid_agreements.models import PrepaidAgreement
def write_agreements_data(path=''):
agreements_fieldnames = [
'issuer_name', 'product_name', 'product_id',
'agreement_effective_date', 'agreement_id', 'most_recent_agreement',
'created_date', 'current_status', 'withdrawal_date',
'prepaid_product_type', 'program_manager_exists', 'program_manager',
'other_relevant_parties', 'path', 'direct_download'
]
products_fieldnames = [
'issuer_name', 'product_name', 'product_id',
'agreement_effective_date', 'agreement_id',
'created_date', 'current_status', 'withdrawal_date',
'prepaid_product_type', 'program_manager_exists', 'program_manager',
'other_relevant_parties', 'path', 'direct_download'
]
agreements_location = path + 'prepaid_metadata_all_agreements.csv'
products_location = path + 'prepaid_metadata_recent_agreements.csv'
agreements_file = open(agreements_location, 'w', encoding='utf-8')
products_file = open(products_location, 'w', encoding='utf-8')
# Write a BOM at the top of the file so Excel knows it's UTF-8
agreements_file.write('\ufeff')
products_file.write('\ufeff')
agreements_writer = csv.DictWriter(
agreements_file,
fieldnames=agreements_fieldnames
)
agreements_writer.writeheader()
products_writer = csv.DictWriter(
products_file,
fieldnames=products_fieldnames
)
products_writer.writeheader()
agreements = sorted(
PrepaidAgreement.objects.all(),
key=lambda agreement: (
agreement.product.issuer_name,
agreement.product.name,
agreement.product.pk,
agreement.created_time
)
)
for agreement in agreements:
product = agreement.product
most_recent = 'Yes' if agreement.is_most_recent else 'No'
created_time = agreement.created_time.strftime('%Y-%m-%d %H:%M:%S')
other_relevant_parties = product.other_relevant_parties
if other_relevant_parties:
other_relevant_parties = other_relevant_parties.replace(
'\n', '; '
)
else:
other_relevant_parties = 'No information provided'
data = {
'issuer_name': product.issuer_name,
'product_name': product.name,
'product_id': product.pk,
'agreement_effective_date': agreement.effective_date,
'created_date': created_time,
'withdrawal_date': product.withdrawal_date,
'current_status': product.status,
'prepaid_product_type': product.prepaid_type,
'program_manager_exists': product.program_manager_exists,
'program_manager': product.program_manager,
'other_relevant_parties': other_relevant_parties,
'path': agreement.bulk_download_path,
'direct_download': agreement.compressed_files_url,
'agreement_id': agreement.pk
}
# Product-level CSV only includes data
# for a product's most recent agreement,
# such that there is one row per product ID
if agreement.is_most_recent:
products_writer.writerow(data)
data['most_recent_agreement'] = most_recent
agreements_writer.writerow(data)
agreements_file.close()
products_file.close()
def run(*args):
if args:
write_agreements_data(path=args[0])
else:
write_agreements_data()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.