index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
987,300 | ac806ebc71c159a119c078beac48856480f611fb | b = []
for i in [2,3,4]:
a = i *3
b.append(a)
print(sum(b)) |
987,301 | 8d185a319ae34754b7a0bd3ff1cc58a329a3ef6f | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0011_posts_user_contacted'),
('accounts', '0014_iptracker_interest_count'),
]
operations = [
migrations.RemoveField(
model_name='iptracker',
name='interest_count',
),
migrations.AddField(
model_name='iptracker',
name='intersets',
field=models.ManyToManyField(to='posts.Posts', null=True, blank=True),
),
]
|
987,302 | 69492fbbc065381ee13bcf7b92e50b245a7e1250 | from .state import *
class Task:
def __init__(self,script,targets,timeout=5,state=WAITNG):
self.script = script
self.targets = [ [agent,WAITNG,''] for agent in targets]
self.state = state
self.timeout = timeout |
987,303 | 508acb60d047202c8ca9886c0dc7bb52af1136ca | import threading
import requests
from queue import Queue
from bs4 import BeautifulSoup
import time
class ThreadCrawl(threading.Thread):
def __init__(self, crawler, blogQueue, dataQueue, header):
super(ThreadCrawl, self).__init__()
self.crawler = crawler
self.blogQueue = blogQueue
self.dataQueue = dataQueue
self.header = header
def run(self):
print("start " + self.crawler + "\n")
while not CRAWL_EXIT:
try:
blog = self.blogQueue.get(block=False)
url = "http://localhost:8081/" + str(blog)
content = requests.get(url, self.header).text
self.dataQueue.put(content)
except:
pass
print("end " + self.crawler + "\n")
class ThreadParse(threading.Thread):
def __init__(self, parser, dataQueue, file, lock):
super(ThreadParse, self).__init__()
self.parser = parser
self.dataQueue = dataQueue
self.file = file
self.lock = lock
def run(self):
print("start " + self.parser + "\n")
while not PARSE_EXIT:
try:
data = self.dataQueue.get(block=False)
soup = BeautifulSoup(data, "html.parser")
title = soup.find(class_="post-title").find_next("a").text
data = soup.find(class_="post-data")
ptime = data.find_next("time").text
category = data.find_all_next("a")[0].text
cnum = data.find_all_next("a")[1].text
# print(soup.find(class_='comment-list').find("li"))
commentList = []
for comment in soup.find(class_='comment-list').findAll("li"):
author = comment.find(class_="comment-author").text.replace("\n","")
commentText = comment.find(class_="comment-content").text.replace("\n","")
commnetTime = comment.find(class_="comment-time").text.replace("\n","")
commentEntity = {"author":author,"content":commentText,"time":commnetTime}
print(commentEntity)
commentList.append(commentEntity)
info = {"title": title, "public time": ptime, "category": category, "comment number": cnum,"comments":commentList}
with self.lock:
self.file.write(str(info) + "\n")
except:
pass
print("end " + self.parser + "\n")
if __name__ == "__main__":
CRAWL_EXIT = False
PARSE_EXIT = False
header = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/87.0.4280.88 Safari/537.36"}
blogQueue = Queue()
blogList = []
rootUrl = "http://localhost:8081/index"
rootContent = requests.get(rootUrl, headers=header).text
rootSoup = BeautifulSoup(rootContent, "html.parser")
searches = rootSoup.find_all("a", class_="item-thumb bg-deepgrey")
for search in searches:
href = search.attrs["href"]
blogList.append(href)
for blogUrl in blogList:
blogQueue.put(blogUrl)
dataQueue = Queue()
file = open("comments.txt", "a")
#file = open("data.txt", "a")
lock = threading.Lock()
crawlList = ["crawler 1", "crawler 2", "crawler 3", "crawler 4", "crawler 5"]
crawlThreadList = []
for crawler in crawlList:
thread = ThreadCrawl(crawler, blogQueue, dataQueue, header)
thread.start()
crawlThreadList.append(thread)
while not blogQueue.empty():
pass
CRAWL_EXIT = True
for crawlThread in crawlThreadList:
crawlThread.join()
print("blogQueue is empty, crawlThreads terminate")
parseList = ["parser 1", "parser 2", "parser 3", "parser 4", "parser 5"]
parseThreadList = []
for parser in parseList:
thread = ThreadParse(parser, dataQueue, file, lock)
thread.start()
parseThreadList.append(thread)
while not dataQueue.empty():
pass
PARSE_EXIT = True
for parseThread in parseThreadList:
parseThread.join()
print("dataQueue is empty, parseThreads terminate")
with lock:
file.close()
print("##### end of program #####")
|
987,304 | 7ae3c7fdb6eb8504e116fa6f727ecb4f277c5285 | from typing_extensions import Self
from stripe.api_resources.abstract.api_resource import APIResource as APIResource
class UpdateableAPIResource(APIResource):
@classmethod
def modify(cls, sid: str, **params) -> Self: ...
def save(self, idempotency_key: str | None = ...) -> Self: ...
|
987,305 | 17a35d326afe42cba1756192d402e24ba1c36fed | from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.sum_basic import SumBasicSummarizer
from sumy.parsers.plaintext import PlaintextParser
from .normalization import STOP_WORDS, stemmer
def sum_basic(text, config={'summary_length': 1}):
summarizer = SumBasicSummarizer(stemmer.lemmatize)
summarizer.stop_words = STOP_WORDS
parser = PlaintextParser.from_string(text, Tokenizer('english'))
summary = summarizer(parser.document, config['summary_length'])
return ' '.join([str(s) for s in summary])
|
987,306 | a13f58aa8b24cc2796c1bda66f9cea19b5584c58 | import json
import re
import bottle
import sqlitedatastore as datastore
@bottle.route('/')
def index_html():
return bottle.static_file('sample_06_09.html', root='./static')
@bottle.route('/file/<filename:path>')
def static(filename):
return bottle.static_file(filename, root='./static')
@bottle.get('/get')
def get():
doc_id = bottle.request.params.id
names = bottle.request.params.names.split()
row = datastore.get(doc_id, fl=['content'])
text = row['content']
# text = re.sub(r'[。!]', '\n', text)
data = {
'collection': {
'entity_types': [],
},
'annotation': {
'text': text,
'entities': [],
'relations': [],
},
}
mapping = {}
for name in names:
annos = datastore.get_annotation(doc_id, name)
for i, anno in enumerate(annos):
data['collection']['entity_types'].append({
'type': name,
'bgColor': '#7fa2ff',
'borderColor': 'darken'
})
Ti = 'T{0:d}'.format(len(data['annotation']['entities']) + 1)
data['annotation']['entities'].append([
Ti,
name,
[[anno['begin'], anno['end']]]
])
mapping[(name, i)] = Ti
for name in names:
annos = datastore.get_annotation(doc_id, name)
for i, anno in enumerate(annos):
if 'link' not in anno:
continue
name_linked, i_linked = anno['link']
if (name, i) not in mapping or (name_linked, i_linked) not in mapping:
continue
data['annotation']['relations'].append([
'R{0:d}'.format(len(data['annotation']['relations']) + 1),
'arg',
[['src', mapping[(name, i)]], ['tgt', mapping[(name_linked, i_linked)]]]
])
return json.dumps(data, ensure_ascii=False)
if __name__ == '__main__':
datastore.connect()
bottle.run(host='0.0.0.0', port='8702')
datastore.close()
|
987,307 | 7567847552b7250bdbc0e009b58be14db1452733 | import numpy as np
import random
from random import randint
from random import seed
## TODO: Implement this function
##
## Input:
## - dmat (np.array): symmetric array of distances
## - K (int): Number of clusters
##
## Output:
## (np.array): initialize by choosing random number of points as medioids
def random_init(dmat, K):
num_vertices = dmat.shape[0]
medioids = np.array(random.sample(range(0, num_vertices), K))
return medioids
## TODO: Implement this function
##
## Input:
## - dmat (np.array): symmetric array of distances
## - medioids (np.array): indices of current medioids
##
## Output:
## - (np.array): assignment of each point to nearest medioid
def assign(dmat, medioids):
num_vertices = dmat.shape[0]
num_clusters = len(medioids)
assignment = np.zeros((num_vertices))
for m in range(len(medioids)):
medioids[m] = int(medioids[m])
for i in range(num_vertices):
dist = sorted([(dmat[i][m], m) for m in medioids])
assignment[i] = dist[0][1]
return assignment
## TODO: Implement this function
##
## Input:
## - dmat (np.array): symmetric array of distances
## - assignment (np.array): cluster assignment for each point
## - K (int): number of clusters
##
## Output:
## (np.array): indices of selected medioids
def get_medioids(dmat, assignment, K):
medioid = np.zeros(K)
for i in range(K):
index = np.where(assignment == i)
index = np.array(index)
med = dmat[index, index.transpose()]
sums = med.sum(axis = 0)
med_min = np.argmin(sums)
medioid[i] = index[0, med_min]
return medioid
## TODO: Finish implementing this function
##
## Input:
## - dmat (np.array): symmetric array of distances
## - K (int): number of clusters
## - niter (int): maximum number of iterations
##
## Output:
## - (np.array): assignment of each point to cluster
def kmedioids(dmat, K, niter=10):
num_vertices = dmat.shape[0]
# we're checking for convergence by seeing if medioids
# don't change so set some value to compare to
old_mediods = np.full((K), np.inf, dtype=np.int)
medioids = random_init(dmat, K)
# this is here to define the variable before the loop
assignment = np.full((num_vertices), np.inf)
it = 0
while np.any(old_mediods != medioids) and it < niter:
it += 1
old_medioids = medioids
# finish implementing this section
assignment = assign(dmat, medioids)
medioids = get_medioids(dmat, assignment, K)
return assignment
|
987,308 | 8407ae6aa3dabfe866ef7a16ef001ba7e097f399 | import pandas as pd
from pandas import read_csv
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
print('IMPLEMENTING NAIVE BAYES CLASSIFIER TO PREDICT THE RESULT OF ARREST IN A GIVEN SITUATION')
cluster_map = read_csv('Data/cluster.csv')
l0 = []
l1 = []
l2 = []
li1 = cluster_map['cluster'].tolist()
li2 = cluster_map['data_index'].tolist()
crimes = read_csv('Data/Chicago_Crimes_2012_to_2017.csv')
crimes = crimes.dropna()
crimes.Date = pd.to_datetime(crimes.Date, format = '%m/%d/%Y %I:%M:%S %p')
crimes.index = pd.DatetimeIndex(crimes.Date)
crimes.insert(3, 'Hour', 'NULL')
crimes['Hour'] = crimes.index.hour
crimes.insert(3, 'Value', 'NULL')
for i in range(24):
if li1[i] == 0:
l0.append(li2[i])
if li1[i] == 1:
l1.append(li2[i])
if li1[i] == 2:
l2.append(li2[i])
for i in l0:
crimes.loc[crimes['Hour'] == i, ['Value']] = 'a'
for i in l1:
crimes.loc[crimes['Hour'] == i, ['Value']] = 'b'
for i in l2:
crimes.loc[crimes['Hour'] == i, ['Value']] = 'c'
cr_a = crimes.loc[crimes['Value'] == 'a', :]
cr_b = crimes.loc[crimes['Value'] == 'b', :]
cr_c = crimes.loc[crimes['Value'] == 'c', :]
df = cr_a.loc[:,['Arrest','Primary Type', 'Value']]
print(df)
|
987,309 | 6b3b0ac8c7790f70a9b991994106241f460dbbc5 | from fastai.vision.all import *
path = untar_data(URLs.PETS)/'images'
def is_cat(x): return x[0].isupper()
dls = ImageDataLoaders.from_name_func(
path, get_image_files(path), valid_pct=0.2, seed=42,
label_func=is_cat, item_tfms=Resize(224))
learn = cnn_learner(dls, resnet34, metrics=error_rate)
learn.fine_tune(1)
uploader = SimpleNamespace(data = ['images/chapter1_cat_example.jpg'])
img = PILImage.create(uploader.data[0])
is_cat, _, probs = learn.predict(img)
print(f"Is this a cat?: {is_cat}.")
print(f"Probability it's a cat: {probs[1].item():.6f}")
|
987,310 | b437e5f7ec4c6079953d66b47a1db21cfc6d531b | {
'targets': [
{
'target_name': 'lib_node_activex',
'type': 'static_library',
'sources': [
'src/utils.cpp',
'src/disp.cpp'
],
'defines': [
'BUILDING_NODE_EXTENSION',
],
'direct_dependent_settings': {
'include_dirs': ['include']
},
'dependencies': [
]
}
]
}
|
987,311 | bef39113f5058b36332bcfb113ad8276ff396604 | import os
import pathlib
from uuid import uuid4
import pytest
import panaetius
from panaetius.exceptions import InvalidPythonException, KeyErrorTooDeepException
# test config paths
def test_default_config_path_set(header):
# act
config = panaetius.Config(header)
# assert
assert str(config.config_path) == str(pathlib.Path.home() / ".config")
def test_user_config_path_set(header, shared_datadir):
# arrange
config_path = str(shared_datadir / "without_logging")
# act
config = panaetius.Config(header, config_path)
# assert
assert str(config.config_path) == config_path
def test_user_config_path_without_header_dir_set(header, shared_datadir):
# arrange
config_path = str(shared_datadir / "without_header")
# act
config = panaetius.Config(header, config_path, skip_header_init=True)
# assert
assert str(config.config_path) == config_path
# test config files
def test_config_file_exists(header, shared_datadir):
# arrange
config_path = str(shared_datadir / "without_logging")
# act
config = panaetius.Config(header, config_path)
_ = config.config
# assert
assert config._missing_config is False
def test_config_file_without_header_dir_exists(header, shared_datadir):
# arrange
config_path = str(shared_datadir / "without_header")
# act
config = panaetius.Config(header, config_path, skip_header_init=True)
_ = config.config
# assert
assert config._missing_config is False
def test_config_file_contents_read_success(header, shared_datadir, testing_config_contents):
# arrange
config_path = str(shared_datadir / "without_logging")
# act
config = panaetius.Config(header, config_path)
config_contents = config.config
# assert
assert config_contents == testing_config_contents
@pytest.mark.parametrize(
"set_config_key,get_config_key,expected_value",
[
("some_top_string", "some_top_string", "some_top_value"),
("second.some_second_string", "second_some_second_string", "some_second_value"),
(
"second.some_second_list",
"second_some_second_list",
["some", "second", "value"],
),
(
"second.some_second_table",
"second_some_second_table",
{"first": ["some", "first", "value"]},
),
(
"second.some_second_table_bools",
"second_some_second_table_bools",
{"bool": [True, False]},
),
("second.third.some_third_string", "second_third_some_third_string", "some_third_value"),
],
)
def test_get_value_from_key(set_config_key, get_config_key, expected_value, header, shared_datadir):
"""
Test the following:
- keys are read from top level key
- keys are read from two level key
- inline arrays are read correctly
- inline tables are read correctly
- inline tables & arrays read bools correctly
"""
# arrange
config_path = str(shared_datadir / "without_logging")
config = panaetius.Config(header, config_path)
panaetius.set_config(config, set_config_key)
# act
config_value = getattr(config, get_config_key)
# assert
assert config_value == expected_value
def test_get_value_environment_var_override(header, shared_datadir):
# arrange
os.environ[f"{header.upper()}_SOME_TOP_STRING"] = "some_overridden_value"
config_path = str(shared_datadir / "without_logging")
config = panaetius.Config(header, config_path)
panaetius.set_config(config, "some_top_string")
# act
config_value = getattr(config, "some_top_string")
# assert
assert config_value == "some_overridden_value"
# cleanup
del os.environ[f"{header.upper()}_SOME_TOP_STRING"]
def test_key_level_too_deep(header, shared_datadir):
# arrange
config_path = str(shared_datadir / "without_logging")
config = panaetius.Config(header, config_path)
key = "a.key.too.deep"
# act
with pytest.raises(KeyErrorTooDeepException) as key_error_too_deep:
panaetius.set_config(config, key)
# assert
assert str(key_error_too_deep.value) == f"Your key of {key} can only be 3 levels deep maximum."
def test_get_value_missing_key_from_default(header, shared_datadir):
# arrange
config_path = str(shared_datadir / "without_logging")
config = panaetius.Config(header, config_path)
panaetius.set_config(
config,
"missing.key_from_default",
default=["some", "default", "value", 1.0, True],
)
# act
default_value = getattr(config, "missing_key_from_default")
# assert
assert default_value == ["some", "default", "value", 1.0, True]
def test_get_value_missing_key_from_env(header, shared_datadir):
# arrange
os.environ[f"{header.upper()}_MISSING_KEY"] = "some missing key"
config_path = str(shared_datadir / "without_logging")
config = panaetius.Config(header, config_path)
panaetius.set_config(config, "missing_key")
# act
value_from_key = getattr(config, "missing_key")
# assert
assert value_from_key == "some missing key"
# cleanup
del os.environ[f"{header.upper()}_MISSING_KEY"]
# test env vars
def test_config_file_does_not_exist(header, shared_datadir):
# arrange
config_path = str(shared_datadir / "nonexistent_folder")
# act
config = panaetius.Config(header, config_path)
config_contents = config.config
# assert
assert config._missing_config is True
assert config_contents == {}
def test_missing_config_read_from_default(header, shared_datadir):
# arrange
config_path = str(shared_datadir / "nonexistent_folder")
# act
config = panaetius.Config(header, config_path)
panaetius.set_config(config, "missing.key_read_from_default", default=True)
# assert
assert getattr(config, "missing_key_read_from_default") is True
@pytest.mark.parametrize(
"env_value,expected_value",
[
("a missing string", "a missing string"),
("1", 1),
("1.0", 1.0),
("True", True),
(
'["an", "array", "of", "items", 1, True]',
["an", "array", "of", "items", 1, True],
),
(
'{"an": "array", "of": "items", "1": True}',
{"an": "array", "of": "items", "1": True},
),
],
)
def test_missing_config_read_from_env_var(env_value, expected_value, header, shared_datadir):
# arrange
config_path = str(shared_datadir / str(uuid4()))
os.environ[f"{header.upper()}_MISSING_KEY_READ_FROM_ENV_VAR"] = env_value
# act
config = panaetius.Config(header, config_path)
panaetius.set_config(config, "missing.key_read_from_env_var")
# assert
assert getattr(config, "missing_key_read_from_env_var") == expected_value
# cleanup
del os.environ[f"{header.upper()}_MISSING_KEY_READ_FROM_ENV_VAR"]
@pytest.mark.skip(reason="No longer needed as strings are loaded without quotes")
def test_missing_config_read_from_env_var_invalid_python(header):
# arrange
os.environ[f"{header.upper()}_INVALID_PYTHON"] = "a string without quotes"
config = panaetius.Config(header)
# act
with pytest.raises(InvalidPythonException) as invalid_python_exception:
panaetius.set_config(config, "invalid_python")
# assert
assert str(invalid_python_exception.value) == "a string without quotes is not valid Python."
# cleanup
del os.environ[f"{header.upper()}_INVALID_PYTHON"]
|
987,312 | b13fbbf279a0763a6091b327fb027f44940ac64b | from requests import get
from requests.exceptions import RequestException
from contextlib import closing
from bs4 import BeautifulSoup
from selenium import webdriver
# options = webdriver.ChromeOptions()
# options.add_argument('--ignore-certificate-errors')
# options.add_argument("--test-type")
# options.binary_location = "/usr/lib/chromium-browser/chromedriver"
driver = webdriver.Firefox("/usr/local/bin/")
driver.get('https://techcrunch.com/')
# def send_request(url):
# """sends get request to url and checks to see if the conten is html"""
# try:
# with closing(get(url, stream=True)) as resp:
# if is_good_response(resp):
# python_button = driver.find_element_by_class_name(
# "load-more ")
# python_button.click()
# return resp.content
# except RequestException as e:
# print(f'{url}:{str(e)}')
# return None
def send_request(url):
driver.get('https://techcrunch.com/')
python_button = driver.find_element_by_class_name("load-more ")
python_button.click()
return driver.page_source
# def is_good_response(resp):
# """returns true if response is html"""
# conten_type = resp.headers['content-Type'].lower()
# return (resp.status_code == 200
# and conten_type is not None
# and conten_type.find('html') > -1)
def get_url(html):
soup = BeautifulSoup(html)
urls = []
data = soup.findAll('a', attrs={'class': 'post-block__title__link'})
for a in data:
links = a.get("href")
urls.append(links)
return urls
def save_to_file(urls):
"""Save html to a file named index.html"""
try:
with open('url.txt', 'w') as file:
for url in urls:
file.write(url + "\n")
except:
print("ERROR SAVING FILE")
if __name__ == "__main__":
raw_html = send_request("https://techcrunch.com/")
print(f"lengh of raw html{len(raw_html)}")
urls = get_url(raw_html)
save_to_file(urls)
|
987,313 | 2731046b498b9f6ceab8bf0b2626dea31f53f8dc | casa=int(input('Qual o valor da casa? '))
salario=int(input('Qual o seu salario? '))
anos=int(input('Quantos anos pretende pagar? '))
prestação=casa/(anos*12)
if prestação>(salario*0.3):
print ('Empréstimo não aprovado')
else:
print ('Emprestimo Aprovado') |
987,314 | fe032c1df849e7c7b7a87b4a4eb09b50203d57f7 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-08-14 09:29
from __future__ import unicode_literals
from django.db import migrations
from django.utils.text import slugify
def slugify_title(apps, schema_editor):
News = apps.get_model('corp', 'News')
for n in News.objects.all():
n.slug = slugify(n.headline)
n.save()
class Migration(migrations.Migration):
dependencies = [
('corp', '0005_auto_20170814_0432'),
]
operations = [
migrations.RunPython(slugify_title, lambda x, y: None),
]
|
987,315 | 526225c1637e07a4ef8b42d1298dc72ba7f5fcba | #!/usr/bin/env python3
import sys
from pprint import pprint
from src import get_weather
if __name__ == '__main__':
if len(sys.argv) == 1:
sys.stderr.write('Not enough arguments\n')
sys.exit(1)
city, *_ = sys.argv[1:]
try:
pprint(get_weather(city))
except Exception:
e, info = sys.exc_info()
sys.stderr.write(f'''Error occurred: {e}
{info}
''')
sys.exit(1)
|
987,316 | e1bc1841cc5f8568599396e097a3f685b195c617 | import abc
from malib.utils.typing import Any
class BaseClient(abc.ABC):
def info(self, level: str, message: str, nid: str):
pass
@abc.abstractmethod
def create_table(self, primary: str, secondary: str, nid: str):
pass
@abc.abstractmethod
def send_scalar(self, key: int, tag: str, nid: str, content: Any):
pass
@abc.abstractmethod
def send_image(self, key, tag, image, serial):
pass
@abc.abstractmethod
def send_figure(self, key, tag, nid, figure):
pass
@abc.abstractmethod
def send_obj(self, key, tag, nid, obj, serial):
pass
@abc.abstractmethod
def send_binary_tensor(self, key, tag, nid, tensor):
pass
|
987,317 | d92d20be08ea8d497f557c14e794dd65e7e85dfb | import os
import cv2
path1 = './result_fullres_4channel/'
path2 = './result_fullres_3channel/'
save_path = './final_result_fullres'
if not os.path.exists(save_path):
os.makedirs(save_path)
for file in os.listdir(path1):
image_path1 = os.path.join(path1, file)
image_path2 = os.path.join(path2, file)
img1 = cv2.imread(image_path1)
img2 = cv2.imread(image_path2)
out = img1 / 2 + img2 / 2
save_path1 = os.path.join(save_path, file)
cv2.imwrite(save_path1, out) |
987,318 | 899d6f16fd5375c70b07de4377598108543cbfa8 | # -*- coding: utf-8 -*-
# @Time : 2019/10/14 0014 20:23
# @Author :
# @FileName: demo1.py
# @Software: PyCharm
import matplotlib.pylab as plt
import numpy as np
path = r'E:\bawei\DeepLearning_1\深度一\tensorflow补充'
img = plt.imread(path+r'\1.bmp')
gravity = np.array([1., 0., 0.])
greyimg = np.dot(255-img, gravity)/255
print(img.shape) |
987,319 | c0eeffa32a53840a8f8c75fc974908d9f4cbe227 | # from account import Account
# from current_account import CurrentAccount
# from negative_amount_error import NegativeAmountError # do we have to import all our errors to the client script?!
# from insufficient_funds_error import InsufficientFundsException
#
# barry = Account("barry", "jones", "f")
#
# # print(barry._full_name)
#
# try:
# print(barry.withdraw(-100))
# except NegativeAmountError as err:
# print(f"Withdrawal unsuccessful. {err}")
# else:
# print(f"Withdrawal successful: your new balance is £{barry.balance}.")
# finally:
# print("Would you like to make any further transactions?")
#
# print(barry) # overwritten __str__ so this returns balance
#
# michelle = CurrentAccount("michelle", "obama", 2000, 500)
#
# print(michelle.forename)
# print(michelle.surname)
#
# try:
# michelle.withdraw(3000)
# except NegativeAmountError as err:
# print(f"Withdrawal unsuccessful. {err}")
# except InsufficientFundsException as err:
# print(err)
# else:
# print(f"Withdrawal successful: your new balance is £{michelle.balance:,.2f}.")
# finally:
# print("Would you like to make any further transactions?") # make this a y/n
#
# email = "jackie.woodburn@gmail.com"
# email.split(".")
# from person import Person
# from person_errors import PhoneNumWrongLength, InvalidCountryName, InvalidDOB
#
#
# try:
# jackie = Person("Jackie", "woodburn", "jackie.woodburn@gmail.com", "1995-05-10", "France", "SW19 7AB",
# "07512625617")
# except InvalidDOB as err:
# print(err)
# exit()
# except InvalidCountryName as err:
# print(err)
# exit()
# except PhoneNumWrongLength as err:
# print(err)
# exit()
#
# print(jackie.email_address)
# print(jackie._int_phone_no)
from person_errors import PhoneNumWrongLength, InvalidCountryName, InvalidDOB, AgeRestrictedError
from customer import Customer
try:
suh = Customer("Suh", "Rashid", "hasuh@hotmail.co.uk", "1999-12-31", "Germany", "WD17 3AN", "07958523651", "female")
except InvalidDOB as err:
print(err)
exit()
except InvalidCountryName as err:
print(err)
exit()
except PhoneNumWrongLength as err:
print(err)
exit()
try:
jackie = Customer("Jackie", "woodburn", "jackie.woodburn@gmail.com", "1995-05-10", "France", "SW19 7AB",
"07512625617", "female")
except InvalidDOB as err:
print(err)
exit()
except InvalidCountryName as err:
print(err)
exit()
except PhoneNumWrongLength as err:
print(err)
exit()
print(suh.customer_num, jackie.customer_num)
from stock import stock
try:
jackie.is_customer_old_enough("rum",stock)
except AgeRestrictedError as err:
print(err)
else:
print("Purchase successful.")
|
987,320 | 2025acb95f9b0d79e7671f51ec1153a02b9fc81f | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2018 Kevin Ottens <ervin@ipsquad.net>
#
# The authors license this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from argparse import ArgumentParser
from datetime import timedelta
from gitparsing import GitParser
from pandas import DataFrame, DatetimeIndex
from bokeh.plotting import figure, show
from bokeh.models import HoverTool, LinearAxis, Range1d
from bokeh.models.annotations import Legend
from bokeh.models.sources import ColumnDataSource
from bokeh.palettes import Category10
from bokeh.io import output_file
if __name__ == "__main__":
# Parse the args before all else
arg_parser = ArgumentParser(description="A tool for visualizing, month by month the team size and activity",
parents=[GitParser.get_argument_parser()])
arg_parser.add_argument("-t", "--title",
help="Title")
arg_parser.add_argument("-o", "--output",
help="Output file (default is 'result.html')")
args = arg_parser.parse_args()
start_date = args.start
end_date = args.end
output_filename = args.output or "result.html"
parser = GitParser()
parser.add_repositories(args.paths)
log = parser.get_log(start_date, end_date)
log['date'] = log['date'].apply(lambda x: x.date())
log['date'] = DatetimeIndex(log['date']).to_period("W").to_timestamp()
log['date'] = log['date'].apply(lambda x: x - timedelta(days=3))
log_by_date = log.groupby('date')
team_size = DataFrame()
team_size['commit_count'] = log_by_date['id'].count()
team_size['authors_count'] = log_by_date['author_name'].nunique()
smoothed = team_size.rolling(50, center=True, win_type="triang").mean()
team_size['commit_count_smooth'] = smoothed['commit_count']
team_size['authors_count_smooth'] = smoothed['authors_count']
output_file(output_filename)
p = figure(x_axis_type="datetime",
sizing_mode="stretch_both",
active_scroll="wheel_zoom",
title=args.title)
p.xaxis.axis_label = 'Date'
p.yaxis.axis_label = "Commit Count"
p.extra_y_ranges = {'team_range': Range1d(start=0, end=team_size['authors_count'].max())}
p.add_layout(LinearAxis(y_range_name="team_range", axis_label="Team Size"), "right")
p.add_layout(Legend(), "below")
p.add_tools(HoverTool(tooltips=[("Date", "@date{%Y-w%V}"),
("Team Size", "@authors_count"),
("Commit Count", "@commit_count")],
formatters={'date': 'datetime'},
point_policy='snap_to_data'))
p.circle("date", "commit_count", source=ColumnDataSource(team_size),
color=Category10[3][0], fill_alpha=0.1, line_alpha=0.2)
p.circle("date", "authors_count", source=ColumnDataSource(team_size), y_range_name="team_range",
color=Category10[3][1], fill_alpha=0.1, line_alpha=0.2)
p.line("date", "commit_count_smooth", source=ColumnDataSource(team_size),
line_width=2, color=Category10[3][0], legend="Commit Count")
p.line("date", "authors_count_smooth", source=ColumnDataSource(team_size), y_range_name="team_range",
line_width=2, color=Category10[3][1], legend="Team Size")
show(p)
|
987,321 | 00570bc6fabeb13df85974dc2099faf631cb35c6 |
import sys
import box
class MovieHeader(box.FullBox):
def parse(self, buf):
super(MovieHeader, self).parse(buf)
if self.version == 1:
self.creation_time = buf.readint64()
self.modification_time = buf.readint64()
self.timescale = buf.readint32()
self.duration = buf.readint64()
else:
self.creation_time = buf.readint32()
self.modification_time = buf.readint32()
self.timescale = buf.readint32()
self.duration = buf.readint32()
self.rate = buf.readint32()
self.volume = buf.readint16()
buf.skipbytes(2 + 8)
self.matrix = [[buf.readint32() for j in range(3)] for i in range(3)]
buf.skipbytes(24)
self.next_track_id = buf.readint32()
def generate_fields(self):
for x in super(MovieHeader, self).generate_fields():
yield x
from utils import get_utc_from_seconds_since_1904
yield ("creation time", self.creation_time, get_utc_from_seconds_since_1904(self.creation_time).ctime())
yield ("modification time", self.creation_time, get_utc_from_seconds_since_1904(self.modification_time).ctime())
yield ("timescale", self.timescale)
yield ("duration", self.duration)
yield ("rate", "0x%08X" %(self.rate))
yield ("volume", "0x%04X" %(self.volume))
yield ("matrix", self.matrix)
yield ("next track id", self.next_track_id)
class TrackHeader(box.FullBox):
def parse(self, buf):
super(TrackHeader, self).parse(buf)
if self.version == 1:
self.creation_time = buf.readint64()
self.modification_time = buf.readint64()
self.track_id = buf.readint32()
buf.skipbytes(4)
self.duration = buf.readint64()
else:
self.creation_time = buf.readint32()
self.modification_time = buf.readint32()
self.track_id = buf.readint32()
buf.skipbytes(4)
self.duration = buf.readint32()
buf.skipbytes(8)
self.layer = buf.readint16()
self.altgroup = buf.readint16()
self.volume = buf.readint16()
buf.skipbytes(2)
self.matrix = [[buf.readint32() for j in range(3)] for i in range(3)]
self.width = buf.readint32()
self.height = buf.readint32()
def generate_fields(self):
for x in super(TrackHeader, self).generate_fields():
yield x
from utils import get_utc_from_seconds_since_1904
yield ("creation time", self.creation_time, get_utc_from_seconds_since_1904(self.creation_time).ctime())
yield ("modification time", self.modification_time, get_utc_from_seconds_since_1904(self.modification_time).ctime())
yield ("track id", self.track_id)
yield ("duration", self.duration)
yield ("layer", "0x%04X" %(self.layer))
yield ("alternate group", "0x%04X" %(self.altgroup))
yield ("volume", "0x%04X" %(self.volume))
yield ("matrix", self.matrix)
yield ("width", self.width)
yield ("height", self.height)
class MediaHeader(box.FullBox):
def parse(self, buf):
super(MediaHeader, self).parse(buf)
if self.version == 1:
self.creation_time = buf.readint64()
self.modification_time = buf.readint64()
self.timescale = buf.readint32()
self.duration = buf.readint64()
else:
self.creation_time = buf.readint32()
self.modification_time = buf.readint32()
self.timescale = buf.readint32()
self.duration = buf.readint32()
self.language = buf.readint16() & 0x7FFF
buf.skipbytes(2)
def generate_fields(self):
from utils import parse_iso639_2_15bit
from utils import get_utc_from_seconds_since_1904
for x in super(MediaHeader, self).generate_fields():
yield x
yield ("creation time", self.creation_time, get_utc_from_seconds_since_1904(self.creation_time).ctime())
yield ("modification time", self.modification_time, get_utc_from_seconds_since_1904(self.modification_time).ctime())
yield ("timescale", self.timescale)
yield ("duration", self.duration)
yield ("language", self.language, parse_iso639_2_15bit(self.language))
class VideoMediaHeader(box.FullBox):
def parse(self, buf):
super(VideoMediaHeader, self).parse(buf)
self.graphicsmode = buf.readint16()
self.opcolor = []
for i in range(0,3):
self.opcolor.append(buf.readint16())
def generate_fields(self):
for x in super(VideoMediaHeader, self).generate_fields():
yield x
yield ("graphics mode", self.graphicsmode)
yield ("opcolor", self.opcolor)
class SoundMediaHeader(box.FullBox):
def parse(self, buf):
super(SoundMediaHeader, self).parse(buf)
self.balance = buf.readint16()
buf.skipbytes(2)
def generate_fields(self):
for x in super(SoundMediaHeader, self).generate_fields():
yield x
yield ("balance", self.balance)
class HintMediaHeader(box.FullBox):
def parse(self, buf):
super(HintMediaHeader, self).parse(buf)
self.max_pdu_size = buf.readint16()
self.avg_pdu_size = buf.readint16()
self.max_bitrate = buf.readint16()
self.avg_bitrate = buf.readint16()
def generate_fields(self):
for x in super(HintMediaHeader, self).generate_fields():
yield x
yield ("Max PDU size", self.max_pdu_size)
yield ("Average PDU size", self.avg_pdu_size)
yield ("Max bitrate", self.max_bitrate)
yield ("Average bitrate", self.avg_bitrate)
class HandlerBox(box.FullBox):
def parse(self, buf):
super(HandlerBox, self).parse(buf)
buf.skipbytes(4)
self.handler = buf.readstr(4)
buf.skipbytes(12)
self.consumed_bytes += 20
self.name = buf.read_cstring(self.size - self.consumed_bytes)[0]
def generate_fields(self):
for x in super(HandlerBox, self).generate_fields():
yield x
yield ("handler", self.handler)
yield ("name", self.name if len(self.name) else '<empty>')
class SampleEntry(box.Box):
def parse(self, buf):
super(SampleEntry, self).parse(buf)
buf.skipbytes(6)
self.data_ref_index = buf.readint16()
self.consumed_bytes += 8
def generate_fields(self):
for x in super(SampleEntry, self).generate_fields():
yield x
yield ("data reference index", self.data_ref_index)
class HintSampleEntry(SampleEntry):
def parse(self, buf):
buf.skipbytes(self.size - self.consumed_bytes)
class VisualSampleEntry(SampleEntry):
def parse(self, buf):
super(VisualSampleEntry, self).parse(buf)
buf.skipbytes(2 + 2 + 3 * 4)
self.width = buf.readint16()
self.height = buf.readint16()
self.hori_resolution = buf.readint32()
self.vert_resolution = buf.readint32()
buf.skipbytes(4)
self.frame_count = buf.readint16()
compressor_name_length = buf.readbyte()
self.compressor_name = buf.readstr(compressor_name_length) if compressor_name_length else ''
buf.skipbytes(32 - compressor_name_length - 1)
self.depth = buf.readint16()
buf.skipbytes(2)
self.has_children = True
def generate_fields(self):
for x in super(VisualSampleEntry, self).generate_fields():
yield x
yield ("width", self.width)
yield ("height", self.height)
yield ("horizontal resolution", "0x%08X" %(self.hori_resolution))
yield ("vertical resolution", "0x%08X" %(self.vert_resolution))
yield ("frame count", self.frame_count)
yield ("compressor name", self.compressor_name)
yield ("depth", self.depth)
class AudioSampleEntry(SampleEntry):
def parse(self, buf):
super(AudioSampleEntry, self).parse(buf)
# 14496-12 says first eight bits are reserved.
# Apple QuickTime format (MOV) uses those bytes for version, revision and vendor
# The size of this box in QT varies according to the version, so we need the version
self.quicktime_version = buf.readint16()
buf.skipbytes(6)
self.channel_count = buf.readint16()
self.sample_size = buf.readint16()
buf.skipbytes(4)
self.sample_rate = buf.readint32()
if self.quicktime_version == 1:
self.samples_per_pkt = buf.readint32()
self.bytes_per_pkt = buf.readint32()
self.bytes_per_frame = buf.readint32()
self.bytes_per_sample = buf.readint32()
elif self.quicktime_version == 2:
buf.skipbytes(36)
self.has_children = True
def generate_fields(self):
for x in super(AudioSampleEntry, self).generate_fields():
yield x
yield ("channel count", self.channel_count)
yield ("sample size", self.sample_size)
yield ("sample rate", self.sample_rate, "%d, %d" %(self.sample_rate >> 16, self.sample_rate & 0xFFFF))
class SampleDescription(box.FullBox):
def parse(self, buf):
super(SampleDescription, self).parse(buf)
media = self.find_parent('mdia')
hdlr = media.find_child('hdlr') if media else None
handler = hdlr.handler if hdlr else None
self.entry_count = buf.readint32()
for i in range(self.entry_count):
if handler == 'soun':
self.children.append(AudioSampleEntry(buf))
elif handler == 'vide':
self.children.append(VisualSampleEntry(buf))
elif handler == 'hint':
self.children.append(HintSampleEntry(buf))
else:
entry = box.Box(buf)
self.children.append(entry)
buf.skipbytes(entry.size - entry.consumed_bytes)
if len(self.children) != 0:
self.has_children = True
def generate_fields(self):
for x in super(SampleDescription, self).generate_fields():
yield x
yield ("entry count", self.entry_count)
class DataEntryUrnBox(box.FullBox):
def parse(self, buf):
super(DataEntryUrnBox, self).parse(buf)
self.name = buf.read_cstring()[0]
self.location = buf.read_cstring()[0]
def generate_fields(self):
for x in super(DataEntryUrnBox, self).generate_fields():
yield x
yield ("name", self.name)
yield ("location", self.location)
class DataEntryUrlBox(box.FullBox):
def parse(self, buf):
super(DataEntryUrlBox, self).parse(buf)
self.location = buf.read_cstring(self.size - self.consumed_bytes)[0]
def generate_fields(self):
for x in super(DataEntryUrlBox, self).generate_fields():
yield x
yield ("location", self.location)
class DataReferenceBox(box.FullBox):
def parse(self, buf):
super(DataReferenceBox, self).parse(buf)
self.entry_count = buf.readint32()
self.has_children = True
for i in range(self.entry_count):
self.children.append(box.Box.getnextbox(buf, self))
def generate_fields(self):
for x in super(DataReferenceBox, self).generate_fields():
yield x
yield ("entry count", self.entry_count)
class TimeToSampleBox(box.FullBox):
def parse(self, buf):
super(TimeToSampleBox, self).parse(buf)
self.entry_count = buf.readint32()
self.entries = []
for i in range(self.entry_count):
count = buf.readint32()
delta = buf.readint32()
self.entries.append((count, delta))
def generate_fields(self):
for x in super(TimeToSampleBox, self).generate_fields():
yield x
yield ("entry count", self.entry_count)
for entry in self.entries:
yield ("sample count", entry[0])
yield ("sample delta", entry[1])
class SampleToChunkBox(box.FullBox):
def parse(self, buf):
super(SampleToChunkBox, self).parse(buf)
self.entry_count = buf.readint32()
self.entries = []
for i in range(self.entry_count):
first = buf.readint32()
samples_per_chunk = buf.readint32()
sdix = buf.readint32()
self.entries.append((first, samples_per_chunk, sdix))
def generate_fields(self):
for x in super(SampleToChunkBox, self).generate_fields():
yield x
yield ("entry count", self.entry_count)
for entry in self.entries:
yield ("first chunk", entry[0])
yield ("samples per chunk", entry[1])
yield ("sample description index", entry[2])
class ChunkOffsetBox(box.FullBox):
def parse(self, buf):
super(ChunkOffsetBox, self).parse(buf)
self.entry_count = buf.readint32()
self.entries = [buf.readint32() for i in range(self.entry_count)]
def generate_fields(self):
for x in super(ChunkOffsetBox, self).generate_fields():
yield x
yield ("entry count", self.entry_count)
yield ("chunk offsets", self.entries)
class SyncSampleBox(box.FullBox):
def parse(self, buf):
super(SyncSampleBox, self).parse(buf)
self.entry_count = buf.readint32()
self.entries = [buf.readint32() for i in range(self.entry_count)]
def generate_fields(self):
for x in super(SyncSampleBox, self).generate_fields():
yield x
yield ("entry count", self.entry_count)
yield ("sample numbers", self.entries)
class SampleSizeBox(box.FullBox):
def parse(self, buf):
super(SampleSizeBox, self).parse(buf)
self.sample_size = buf.readint32()
self.sample_count = buf.readint32()
if self.sample_size == 0:
self.entries = [buf.readint32() for i in range(self.sample_count)]
else:
self.entries = []
def generate_fields(self):
for x in super(SampleSizeBox, self).generate_fields():
yield x
yield ("sample size", self.sample_size)
yield ("sample count", self.sample_count)
if self.sample_size == 0:
yield ("sample sizes", self.entries)
class CompactSampleSizeBox(box.FullBox):
def parse(self, buf):
super(CompactSampleSizeBox, self).parse(buf)
buf.skipbytes(3)
self.field_size = buf.readbyte()
self.sample_count = buf.readint32()
self.entries = [buf.readbits(self.field_size) for i in range(self.sample_count)]
# skip padding bits
if self.field_size == 4 and self.sample_count % 2 != 0:
buf.readbits(4)
def generate_fields(self):
for x in super(CompactSampleSizeBox, self).generate_fields():
yield x
yield ("field size", self.sample_size)
yield ("sample count", self.sample_count)
yield ("entries", self.entries)
class MovieExtendsHeader(box.FullBox):
def parse(self, buf):
super(MovieExtendsHeader, self).parse(buf)
if self.version == 1:
self.fragment_duration = buf.readint64()
else:
self.fragment_duration = buf.readint32()
def generate_fields(self):
for x in super(MovieExtendsHeader, self).generate_fields():
yield x
yield ("Fragment duration", self.fragment_duration)
class TrackExtendsBox(box.FullBox):
def parse(self, buf):
super(TrackExtendsBox, self).parse(buf)
self.track_id = buf.readint32()
self.default_sample_description_index = buf.readint32()
self.default_sample_duration = buf.readint32()
self.default_sample_size = buf.readint32()
self.default_sample_flags = buf.readint32()
def generate_fields(self):
for x in super(TrackExtendsBox, self).generate_fields():
yield x
yield ("Track ID", self.track_id)
yield ("Default sample description index", self.default_sample_description_index)
yield ("Default sample duration", self.default_sample_duration)
yield ("Default sample size", self.default_sample_size)
yield ("Default sample flags", self.default_sample_flags)
class AvcCBox(box.Box):
def parse(self, buf):
super(AvcCBox, self).parse(buf)
self.configuration_level = buf.readbyte()
self.profile = buf.readbyte()
self.profile_compatibility = buf.readbyte()
self.level = buf.readbyte()
buf.readbits(6)
self.len_minus_1 = buf.readbits(2)
buf.readbits(3)
self.sps = []
num_of_sps = buf.readbits(5)
for x in xrange(num_of_sps):
sps_len = buf.readint16()
self.sps.append(buf.readstr(sps_len))
self.pps = []
num_of_pps = buf.readbyte()
for x in xrange(num_of_pps):
pps_len = buf.readint16()
self.pps.append(buf.readstr(pps_len))
self.has_children = False
def generate_fields(self):
for x in super(AvcCBox, self).generate_fields():
yield x
yield ("Confiuration level", self.configuration_level)
yield ("Profile", self.profile)
yield ("Profile compatibility", self.profile_compatibility)
yield ("Level", self.level)
yield ("Length size minus 1", self.len_minus_1)
for sps in self.sps:
yield ("SPS", sps.encode('hex'))
for pps in self.pps:
yield ("PPS", pps.encode('hex'))
boxmap = {
'mvhd' : MovieHeader,
'tkhd' : TrackHeader,
'mdhd' : MediaHeader,
'vmhd' : VideoMediaHeader,
'smhd' : SoundMediaHeader,
'hmhd' : HintMediaHeader,
'hdlr' : HandlerBox,
'stsd' : SampleDescription,
'dref' : DataReferenceBox,
'stts' : TimeToSampleBox,
'stsc' : SampleToChunkBox,
'stco' : ChunkOffsetBox,
'stss' : SyncSampleBox,
'stsz' : SampleSizeBox,
'stz2' : CompactSampleSizeBox,
'url ' : DataEntryUrlBox,
'urn ' : DataEntryUrnBox,
'mehd' : MovieExtendsHeader,
'trex' : TrackExtendsBox,
'avcC' : AvcCBox,
}
|
987,322 | c8d837e06ec1372891e72ca8fa74f2ab85e1b363 | from django.contrib import admin
# Register your models here.
from weather_hour.models import City, WeatherHour
@admin.register(City)
class CityAdmin(admin.ModelAdmin):
list_display = ("code", "name", "url", "parent")
@admin.register(WeatherHour)
class WeatherHourAdmin(admin.ModelAdmin):
list_display = ("create_at", "date", "time", "city", "temperature",
"humidity", "pressure", "windDirection", "windSpeed",
"rain1h", "rain24h", "rain12h", "rain6h")
|
987,323 | 9a72788b6cebd2c3e2dfc76bcdb3cb3b7f9992ff | import sys
import click
import requests
from fuzzywuzzy import process
VERSION = '0.0.5'
# -h로도 help를 볼 수 있도록 한다.
OPTIONS = dict(help_option_names=['-h', '--help'])
HOST_URL = 'https://gitignore.io/api/'
FUZZY_THRESHOLD = 80
@click.group(invoke_without_command=True, context_settings=OPTIONS)
@click.option('-v', '--version', is_flag=True, help='Show version')
def cli(version):
if version:
print_version()
sys.exit(0)
def print_version():
click.echo('giig version: ' + VERSION)
@cli.command(name='list', help='Show a list of the ignorable items')
def list_command():
click.echo(get_list())
@cli.command(help='Fuzzy search in ignorable items')
@click.argument('keyword')
def search(keyword):
if not keyword:
click.echo('No args, args required')
click.echo('Finding... ' + str(keyword))
result_list = get_result_list(keyword)
if len(result_list) == 0:
click.echo('No results found')
sys.exit(0)
click.echo(result_list)
@cli.command(help='Show api results')
@click.argument('items', nargs=-1)
def show(items):
items = validate_items(items)
check_item_length(items)
click.echo(get_gitignore(items))
@cli.command(help='Make .gitignore')
@click.argument('items', nargs=-1)
@click.option('-a', '--append', is_flag=True, help='append to .gitignore')
def make(items, append):
if append:
file = open('.gitignore', 'a')
else:
file = open('.gitignore', 'w')
items = validate_items(items)
check_item_length(items)
click.echo('Making... ' + str(items))
file.write(get_gitignore(items))
file.close()
click.echo('Done!! ' + str(items))
def get_list():
return requests.get(HOST_URL + 'list').text
def get_gitignore(items):
return requests.get(HOST_URL + ','.join(items)).text
def get_result_list(keyword):
search_list = get_search_list()
return [result[0] for result in process.extract(keyword, search_list) if result[1] > FUZZY_THRESHOLD]
def get_search_list():
return ','.join(get_list().split('\n')).split(',')
def validate_items(items):
validated_items = []
search_list = get_search_list()
for item in items:
if item in search_list:
validated_items.append(item)
return validated_items
def check_item_length(items):
if len(items) == 0:
click.echo('Please check your input')
sys.exit(1)
if __name__ == '__main__':
cli()
|
987,324 | b7711d0e7d856f5a9ab9b3529843a0bd9ba6dabd | #! /Users/tseccull/anaconda2/bin/python
# SCRIPT THAT USES THE EXTINCTION TABLE PROVIDED FOR THE FORS@ INSTRUMENT TO CORRECT ANY EXTICNTION IN THE SPECTRUM
# CAUSED BY AIRMASS. THIS SCRIPT REQUIRES THE PRESENCE OF THE 'fors2_extinct_table.fits'FILE IN THE DIRECTORY
# CONTAINING THE SPECTRA TO BE CORRECTED.
import astropy.io.fits as fits
import glob
import matplotlib.pyplot as plt
import scipy.interpolate as interp
with fits.open('fors2_extinct_table.fits') as han:
print han.info()
extab = han[1].data
files = glob.glob('*LSS*.fits')
for i in files:
with fits.open(i) as han:
head = han[0].header
wav = han[0].data[0]
flux = han[0].data[1]
if head['INSTRUME'] == 'FORS2' and head['HIERARCH GME UTC EXTRACTION DATE']:
quals = han[0].data[3]
interptab = interp.interp1d(extab['WAVE'], extab['EXTINCTION'])
interpextab = interptab(wav)
corrspec = flux * (10 ** (0.4 * head['HIERARCH ESO TEL AIRM START'] * interpextab))
plt.plot(wav, flux, wav, corrspec)
plt.show()
hdu = fits.PrimaryHDU([wav, corrspec, quals], header=head)
hdulist = fits.HDUList(hdu)
hdulist.writeto('_'.join(i.split('_')[0:-1]) + '_EXT_' + i.split('_')[-1])
hdulist.close()
|
987,325 | 490f762f16e7e268de10194d5dd04eba4a2423f3 |
def config_samples(samples) :
import ROOT
samples.AddSample('Data' , path='Data' ,legend_name='Data, background subtracted', isActive=True, plotColor=ROOT.kBlack, isData=True )
samples.AddSample('Wgg' , path='Wgg' ,legend_name='W#gamma#gamma Signal', isActive=True, plotColor=ROOT.kRed-3, scale=1.0, isSignal=False )
samples.AddSample('ZggFSR' , path='ZggFSR' ,legend_name='Z#gamma#gamma', isActive=False, plotColor=ROOT.kOrange-2, displayErrBand=True )
samples.AddSample('OtherDiPhoton' , path='OtherDiPhoton' ,legend_name='Other di-photon', isActive=False, plotColor=ROOT.kGray, displayErrBand=True )
#samples.AddSample('Zg' , path='Zg' ,legend_name='Z#gamma', isActive=True, plotColor=ROOT.kOrange+2, displayErrBand=True )
samples.AddSample('Top' , path='Top' ,legend_name='Top', isActive=False, plotColor=ROOT.kGreen, displayErrBand=True )
samples.AddSample('MultiBoson' , path='MultiBoson' ,legend_name='Multi Boson', isActive=False, plotColor=ROOT.kBlue, displayErrBand=True )
samples.AddSample('EleFake' , path='EleFake' ,legend_name='Electron fake estimate', isActive=False, plotColor=ROOT.kGreen+1 )
samples.AddSample('JetFake' , path='JetFake' ,legend_name='Jet fake estimate', isActive=False, plotColor=ROOT.kBlue-7, displayErrBand=True)
#samples.AddSample('MCBkg' , path='MCBkg' ,legend_name='MC background', isActive=True, isSignal=True, plotColor=ROOT.kGray+2 )
samples.AddSampleGroup( 'Background', legend_name='Background',
input_samples = ['ZggFSR', 'JetFake'],
plotColor = ROOT.kBlue,
isActive=False
)
def print_examples() :
pass
|
987,326 | 648b8fb6fa19346665c56500d12df1d574070b92 | import os
import datetime
from back_end.get_fault import faults_todb
from back_end.get_station import get_base_info
from back_end.get_status import delay_missing_to_db, get_delay_missing_list
from back_end.get_aws_value import get_aws_value, get_lastest_value
from AWSMonitor.models import MissingDelay
from back_end.get_status import get_online, get_online_status
from back_end.my_timer import my_timer_interface
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "MySite.settings")
import django
if django.VERSION >= (1, 7):#自动判断版本
django.setup()
if __name__ == "__main__":
#获取站点 仪器 配件信息到数据库
#get_base_info()
# 获取错误站点列表 从10.69.10.61
#faults_todb()
#获取心调包时间和最后通讯时间
#get_online()
# 获取各站点的在线状态 ok:正常 ST:延迟 Off关闭 ‘’被动
#get_online_status()
get_delay_missing_list(datetime.datetime.now())
|
987,327 | e0b24143e33392c45e53239cc4afc3bcfe1bdfe9 | NUM_MONTHS = 12
def main():
# creating a list to hold rain info
rain = [0] * NUM_MONTHS
# Get rainfall for each month
for index in range(NUM_MONTHS):
print("Enter the rainfall for month ", index + 1, ": ", sep="", end="")
rain = float(input())
values = [rain]
ave_rain = rain / 12
print("The average rain fall for the year is: ", ave_rain)
my_list = [rain]
print('The highest month of rain ', max(my_list))
my_list = [rain]
print('The lowest month of rain ', min(my_list))
main()
|
987,328 | 2ab344d6e7f1421d50ba399e5fe1c4bb13df7117 |
##Tick Tac Toe##
from collections import Counter
from time import sleep
import random
import os
openSpaces=[]
Grid = [
0,0,0,
0,0,0,
0,0,0
]
winScenario = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[1, 4, 7],
[2, 5, 8],
[3, 6, 9],
[1, 5, 9],
[3, 5, 7],
]
##RESET##
def reset():
clear()
print"reseting..."
sleep(1)
count=0
for a in Grid:
Grid[count]=0
count+=1
clear()
gameStart()
def clear():
os.system( 'cls' )
def checkWin():
for a in winScenario:
if Grid[a[0]-1]==Grid[a[1]-1]==Grid[a[2]-1]==1:
print(" ")
print("Player one wins!")
sleep(3)
reset()
elif Grid[a[0]-1]==Grid[a[1]-1]==Grid[a[2]-1]==2:
print(" ")
print("Player two wins!")
sleep(3)
reset()
count=0
for a in Grid:
if a > 0:
count+=1
if count==9:
print"Nobody won! Ha!"
sleep(3)
reset()
def displayGrid(): ##Update the Grid
clear()
count = 0
print" "
print"TIC TAC TOE "
print("-------------")
for spot in (Grid):
count+=1
Display = [(count),"X","O"]
num = spot
if count == 3 or count == 6 or count == 9: ##If it's the last in a row, start new line
print("|" + " " + str((Display[num])) +" " + "| ")
print("-------------")
else:
print("| " + str((Display[num])) ),
checkWin()
def getOpenSpaces():
count=0
openSpaces=[]
for a in Grid:
count+=1
if a==0:
openSpaces.append(count)
return openSpaces
def compTurn(): ##Computer players turn
count = 0
sleep(.5)
for a in winScenario:
count+=1
####Check if computer can win####
if Grid[a[0]-1]==Grid[a[1]-1]==2 and Grid[a[2]-1]==0:
placement = a[2]
print (placement)
compPlace(placement)
elif Grid[a[0]-1]==Grid[a[2]-1]==2 and Grid[a[1]-1]==0:
placement = a[1]
print (placement)
compPlace(placement)
elif Grid[a[1]-1]==Grid[a[2]-1]==2 and Grid[a[0]-1]==0:
placement = a[0]
print (placement)
compPlace(placement)
####Check if enemy is about to win####
elif Grid[a[0]-1]==Grid[a[1]-1]==1 and Grid[a[2]-1]==0:
placement = a[2]
print (placement)
compPlace(placement)
elif Grid[a[0]-1]==Grid[a[2]-1]==1 and Grid[a[1]-1]==0:
placement = a[1]
print (placement)
compPlace(placement)
elif Grid[a[1]-1]==Grid[a[2]-1]==1 and Grid[a[0]-1]==0:
placement = a[0]
print (placement)
compPlace(placement)
##otherwise, choose a random space##
##openSpaces = list(filter(lambda x: x == 0, Grid))
##openSpaces = [i for i, x in enumerate(Grid) if x == 0]
openSpaces = getOpenSpaces()
placement = random.choice(openSpaces)
sleep(.5)
compPlace(placement)
def compPlace(placement):
Grid[int(placement)-1]=2
displayGrid()
print" "
print("Computer placed circle at " + (str(placement)))
sleep(1)
displayGrid()
turnStart(True,1)
def playerturn(player):
print" "
print ("It is player "+(str(player)) + "'s turn")
sleep(0.5)
placement = input("Where do you want to place a marker?")
try:
placementSpot = Grid[int(placement)-1]
if Grid[int(placement)-1]>0:
displayGrid()
print" "
print"You cannot place there! Pick again"
sleep(1.5)
displayGrid()
playerturn(player)
else:
Grid[int(placement)-1]=int(player)
displayGrid()
print" "
print("Player "+ str(player) + " placed at " + (str(placement)))
print" "
sleep(1)
displayGrid()
except:
checkResponse(placement)
playerturn(player)
def turnStart(computerbol, playerStart): ##computerbol is a boolean value that determines if you are playing with a cpu or not
if playerStart==1 and computerbol==False:
playerturn(1)
playerturn(2)
turnStart(computerbol, playerStart)
elif playerStart==2 and computerbol==False:
playerturn(2)
playerturn(1)
turnStart(computerbol, playerStart)
if computerbol == True: ##If computer player is selected
if playerStart==2:
print" "
print("It is the computer's turn")
print" "
compTurn()
playerturn(1)
elif playerStart==1:
playerturn(1)
print" "
print("It is the computer's turn")
print" "
compTurn()
turnStart(computerbol, playerStart)
def checkResponse(answer):
Commands = {"r" : reset, }
##Easter egg respones##
Responses = {"hello" : "hi", "hi" : "hello"}
if answer.lower() in Commands:
Commands[answer]()
elif answer.lower() in Responses:
print(Responses[answer])
sleep(3)
displayGrid()
else:
displayGrid()
print" "
print"Invalid input. Nice try."
sleep(1.5)
displayGrid()
def gameStart():
print" "
print"Tic Tac Toe"
print" "
print"In game, type r to reset"
print" "
answer = input("Do you want to play with a computer player? Answer Y/N")
playerStart = random.randint(1,2) ##Deterine who will start the game
if answer.lower() in ("y","yes"):
if playerStart == 2:
print("The Computer will start.")
sleep(1)
else:
print("Player 1 will start.")
sleep(1)
displayGrid()
turnStart(True, playerStart)
elif answer.lower() in ("n","no"):
clear()
print("Player "+str(playerStart)+" will start.")
sleep(1)
displayGrid()
turnStart(False, playerStart)
else:
clear()
gameStart()
gameStart()
|
987,329 | 5e8ec4c6a0c43901b3cc506a995684f690f9afe8 | from board import Board
import unittest
from random import choice
class Service:
'''Class that contains the method needed for performing the services required by the problem.'''
def __init__(self, board):
'''
Constructor for the Service class.
:param board: an instance of the Board class, representing the board of the game
'''
self._board = board
@property
def board(self):
'''
Property representing the getter for the board.
:return:
'''
return self._board
def __str__(self):
'''
Overriding the str method.
:return: the board represented as a string, for printing on the screen
'''
return str(self._board)
def player_move(self, column):
'''
Function for adding a disc on the board on a position chosen by the human player.
:param column: the column on which the player wishes to add a disc
:return:
'''
self._board.move(column, "X")
def computer_move(self):
'''Function for implementing the computer player. It is going to move to win
the game whenever possible and to block the human player’s attempts at 1-move victory,
whenever possible. Otherwise, it chooses a random column from those which are not full and places a disc.'''
possible_moves = self._board.get_possible_moves()
'''Tries to win'''
for move in possible_moves:
board_copy = self._board.copy()
board_copy.move(move, "O")
if board_copy.win() == True:
self._board.move(move, "O")
return
'''Blocks the player.'''
for move in possible_moves:
board_copy = self._board.copy()
board_copy.move(move, "X")
if board_copy.win() == True:
self._board.move(move, "O")
return
self._board.move(choice(possible_moves), "O")
class Test_Service(unittest.TestCase):
def setUp(self):
self.board = Board()
self.service = Service(self.board)
def tearDown(self):
self.board = None
self.service = None
def test_move_human_player(self):
self.service.player_move(1)
board = self.service.board
self.assertEqual(board._data[5][1], "X")
def test_computer_move(self):
for j in range(0, 3):
self.service.player_move(j)
self.service.computer_move()
board = self.service.board
self.assertEqual(board._data[5][3], "O")
board.move(4, "O")
board.move(5, "O")
self.service.computer_move()
self.assertTrue(board.win()) |
987,330 | 8c3fb69b57984d80b9266fe42fb9823471e081c8 | from timezone_field import TimeZoneFormField
from django.forms import BooleanField, CharField, PasswordInput, ModelMultipleChoiceField
from django.contrib.auth import get_user_model
from core.forms import ModalForm, create_form
from .models import DepartmentGroup
from core.forms import TagSelect
_user = get_user_model()
class DepartmentGroupForm(ModalForm):
local_name = CharField(label='Name')
class Meta:
model = DepartmentGroup
fields = ['local_name']
class UserGroupsField(ModelMultipleChoiceField):
widget = TagSelect
department_prefix = False
def __init__(self, *args, **kwargs):
kwargs['queryset'] = None
super(UserGroupsField, self).__init__(*args, **kwargs)
self.help_text = ''
def label_from_instance(self, obj):
if self.department_prefix:
return "%s %s" % (obj.department.name, obj.local_name)
else:
return obj.local_name
class UserForm(ModalForm):
email = CharField(required=True)
password = CharField(widget=PasswordInput(render_value=False), required=False, min_length=8)
name = CharField(label='Name')
groups = UserGroupsField()
def set_groups(self, groups):
self.fields['groups'].queryset = groups
class Meta:
model = _user
fields = ['email', 'password', 'name', 'groups']
class UserSystemForm(UserForm):
is_superuser = BooleanField(required=False)
def __init__(self, *args, **kwargs):
super(UserSystemForm, self).__init__(*args, **kwargs)
self.fields.keyOrder = [
'email',
'password',
'name',
'groups',
'is_superuser']
class Meta:
model = _user
fields = ['email', 'password', 'name', 'groups', 'is_superuser']
class UserProfileForm(ModalForm):
email = CharField(required=True)
name = CharField(label='Name')
timezone = TimeZoneFormField()
class Meta:
model = _user
fields = ['email', 'name', 'timezone']
class UserPasswordForm(ModalForm):
password = CharField(widget=PasswordInput(render_value=False), required=False, min_length=8)
password2 = CharField(widget=PasswordInput(render_value=False), required=False, min_length=8,
label="Repeat password")
class Meta:
model = _user
fields = ['password']
def is_valid(self):
valid = super(UserPasswordForm, self).is_valid()
if not valid:
return valid
if self.cleaned_data['password'] != self.cleaned_data['password2']:
self._errors['not_match'] = 'Passwords do not match'
return False
return True
def account_create_form(name, request, id, args={}):
form_objects = {
'group': DepartmentGroupForm,
'user': UserForm,
'usersystem': UserSystemForm,
'user_profile': UserProfileForm,
'user_password': UserPasswordForm
}
form = create_form(form_objects, name, request.POST, id, args)
return form |
987,331 | 0ebb6bc4994eaf14601d1004df8ef874a261795c | import pygame
import os
import sys
import imgctrl as im
sys.path.append('C:\\Users\\YoungWoo\\Desktop\\git\\python-study\\src\\yw')
import yw_keyboard as key
pygame.init()
current_path = os.path.dirname(__file__)
img_path = os.path.join(current_path, "images")
background = im.ImageControl(os.path.join(img_path, 'background.png'))
stage = im.ImageControl(os.path.join(img_path, 'stage.png'))
character = im.ImageControl(os.path.join(img_path, 'character.png'))
# init position
character.position[1] = background.height - character.height - stage.height
character.rect[1] = background.height - character.height - stage.height
stage.position[1] = background.height - stage.height
weapons = []
balloons = []
origin_ball = im.Balls(0, 0, 0)
balloons.append(origin_ball)
clock = pygame.time.Clock()
tick = 35
character.speed = 20
weapon_speed = 25
screen = pygame.display.set_mode((background.width, background.height))
v_accel = [0, 0]
running = True
input_x = 0
input_y = 0
# Font
game_font = pygame.font.Font(None, 40)
total_time = 30
start_tick = pygame.time.get_ticks()
game_result = 'Game Over'
while running:
dt = clock.tick(tick) # 초당 프레임
key_input = key.figure_keboard(input_x, input_y)
input_x = key_input[0]
input_y = key_input[1]
input_weapon = key_input[2]
if key_input == [999, 999]:
running = False
# 키보드 입력
if key_input[0] > 0:
if character.position[0] + character.speed < background.width - character.speed:
character.movement(key_input[0] * character.speed, 0)
elif key_input[0] < 0:
if character.position[0] > 0:
character.movement(key_input[0] * character.speed, 0)
# 무기
if key_input[2] == 1:
weapon = im.ImageControl(os.path.join(img_path, 'weapon.png'))
weapon.position[0] = character.position[0] + (character.width - weapon.width) / 2
weapon.position[1] = character.position[1]
weapons.append(weapon)
for i in weapons:
if i.position[1] < 0:
weapons.remove(i)
else:
i.movement(0, weapon_speed * -1)
# 공 위치 정의
for i in balloons:
i.process()
i.calc()
# 출력
screen.blit(background.image, background.position)
for i in weapons:
screen.blit(i.image, i.position)
for i in balloons:
screen.blit(i.image, i.position)
screen.blit(stage.image, stage.position)
screen.blit(character.image, character.position)
# 경과시간
elapsed_time = (pygame.time.get_ticks() - start_tick) / 1000
timer = game_font.render("Time : {}".format(int(total_time - elapsed_time)), True, (200, 0, 255))
screen.blit(timer, (10, 10))
if total_time - elapsed_time <= 0:
game_result = 'Time Over'
running = False
for i in balloons:
if i.rect.colliderect(character.rect):
running = False
for j in weapons:
if i.rect.colliderect(j.rect):
return_hits = i.hits()
if i.size >= 4:
balloons.remove(i)
else:
newballoon = im.Balls(i.position[0], i.position[1], i.size, i.speed_x)
balloons.append(newballoon)
weapons.remove(j)
else:
continue
break
if len(balloons) == 0:
game_result = 'Mission Complete'
running = False
pygame.display.update() # 게임화면 다시 그리기
# 충돌체크
msg = game_font.render(game_result, True, (255, 255, 0))
screen.blit(msg, (300, 150))
pygame.display.update()
pygame.time.delay(1000)
pygame.quit()
|
987,332 | 3386d69cc0095347e17817908d60835ca44a1014 | from math import exp
# the Poission distribution when:
# if we have an average of success in specific region "lambda"
# and we have specific number of scuccess in this specific region K
# them we easly apply this for our random var p(x) = (lambda ^ k) * exp(-lambda) / factorial(K)
#____________ Task _____________
# A random variable, X , follows Poisson distribution with mean of 2.5 . Find # the probability with which the random variable is equal to 5 .
def fact(k):
if k == 0:
return 1
else:
return k*fact(k-1)
lambda_ = float(input())
k = int(input())
poisson_dist = (lambda_**k * math.exp(-lambda_)) / fact(k)
print(round(poisson_dist,3)) |
987,333 | 60e03c8a7052daf7fc13488aaf642c54d0ac5f97 | DB_NAME = 'records.db' |
987,334 | 6934ebb305ddc3824dda2e832d58b1009560106f | import subprocess
import time
import requests
from subprocess import Popen
from urllib3.exceptions import MaxRetryError
from log import log
kubernetes_connection_process = None
def kubernetes_request(query):
for i in range(0, 100):
try:
result = requests.get(query)
return result
except requests.exceptions.RequestException as e:
print(e)
log.info("Kubernetes API connection died, reconnecting")
global prometheus_connection_process
prometheus_connection_process = Popen(["kubectl", "proxy", "--port=8080"])
time.sleep(1)
|
987,335 | e028238c8da2f70c35693b198f61e6f6845c86e3 | from django.db import models
from django.conf import settings
from products.models import Product
from decimal import *
from django.db.models.signals import pre_save, post_save, m2m_changed
User = settings.AUTH_USER_MODEL
class CartManager(models.Manager):
def new_or_get(self,request):
cart_id = request.session.get("cart_id", None) # set the cart id based on request session
qs = self.get_queryset().filter(id = cart_id)
if qs.count() == 1:
# Cart is found
new_obj = False
cart_obj = qs.first()
if request.user.is_authenticated and cart_obj.user is None:
cart_obj.user = request.user
cart_obj.save()
else:
# cart is not found so make a new one
cart_obj = Cart.objects.new(user = request.user)
new_obj = True
print("New cart successfully created")
request.session['cart_id'] = cart_obj.id
return cart_obj, new_obj
def new(self, user=None):
user_obj = None
if user is not None:
if user.is_authenticated:
user_obj = user
return self.model.objects.create(user = user_obj)
class Cart(models.Model):
user = models.ForeignKey(User, null= True, blank = True,on_delete=models.DO_NOTHING,)
products = models.ManyToManyField(Product, blank = True)
subtotal = models.DecimalField(default = 0.00, max_digits = 8, decimal_places = 2)
total = models.DecimalField(default = 0.00, max_digits = 8, decimal_places = 2)
update = models.DateTimeField(auto_now = True)
timestamp = models.DateTimeField(auto_now_add = True)
objects = CartManager()
def __str__(self):
return str(self.id)
def m2m_changed_cart_receiver(sender, instance, action, *args, **kwargs):
if action == 'post_add' or action == 'post_remove' or action == 'post_clear':
products = instance.products.all()
total = 0
for x in products:
total += x.price
if instance.subtotal != total:
instance.subtotal = total
instance.save()
m2m_changed.connect(m2m_changed_cart_receiver, sender = Cart.products.through)
def pre_save_cart_receiver(sender, instance, *args, **kwargs):
if instance.subtotal>0:
instance.total = float(instance.subtotal) + float(1.08)
else:
instance.total = 0.00
pre_save.connect(pre_save_cart_receiver, sender = Cart)
|
987,336 | 2a15b910253852e899f21e898efcfe29ab56f3b4 | import json
from salesforce_bulk import SalesforceBulk
from salesforce_bulk.util import IteratorBytesIO
from time import sleep
from salesforce_bulk import CsvDictsAdapter
import pandas as pd
import unicodecsv
import config as cfg
#Authentication
bulk = SalesforceBulk(username=cfg.USERNAME,
password=cfg.PASSWORD,
security_token=cfg.SECURITY_KEY, sandbox=True)
#Source CSV File path for Account
input_file = "/home/baadmin/NCT_ETL/input_files/pg_extract_prd/PaymentProfileId_sf.csv"
#Target SFDC Object name
target_obj = "cpm__Payment_Profile__c"
# Mapping of Input csv Fields to SalesForce Fields
sf_fields = ['DirectDebitKey__c', 'Contact_Key__c', 'cpm__Contact__c', 'Payment_Profile_Key__c', 'Id']
# Extract the data from salesforce and save it to csv
job = bulk.create_query_job(target_obj, contentType='CSV')
sql = "SELECT " + ",".join(sf_fields) + " FROM " + target_obj
batch = bulk.query(job, sql)
bulk.close_job(job)
while not bulk.is_batch_done(batch):
sleep(10)
for result in bulk.get_all_results_for_query_batch(batch):
reader = unicodecsv.DictReader(result, encoding='utf-8')
with open(input_file, 'a') as f:
pd.DataFrame(list(reader)).to_csv(f, header=True)
|
987,337 | 6d872acaca9cbd404cd949bd21ee61a358da4c9d | from torchvision.transforms import ToTensor, Compose, Grayscale, ToPILImage
from torchvision.datasets import CIFAR10
from torch.utils.data import DataLoader
from torchvision.utils import make_grid
import matplotlib.pyplot as plt
##################
# simple composition of transforms
transforms = Compose([ToTensor(),
ToPILImage(),
Grayscale(),
ToTensor()])
cifar = CIFAR10(root='./../data', train=True, transform=transforms)
image_size = 1024
plt.ion()
plt.ioff()
dl = DataLoader(cifar, batch_size=100, shuffle=True)
for images, labels in dl:
fig, ax = plt.subplots(figsize=(16, 16))
ax.set_xticks([])
ax.set_yticks([])
ax.imshow(make_grid(images[:100], 10).permute(1, 2, 0))
fig.show()
break
|
987,338 | a3510cbb14c9ea54d7e3786616b0edda621ca96b | from contextual_bandit import *
class ContextualEpsilonGreedy(ContextualBandit):
def __init__(self, policy_args, model_args):
super().__init__(policy_args=policy_args, model_args=model_args)
self.p = policy_args["p"]
self.decay = policy_args["decay"]
self.policy_name = "ContextualEpsilonGreedy" + "(p: {}, decay: {}, oracle: {})".format(self.p, self.decay, model_args["class_model_name"])
def compute_score(self, arm, context, normalising_factor):
score = self.oracles[arm]["model"].predict(context)
score = self.augment_prediction(arm=arm, prediction=score, normalising_factor=normalising_factor)
return score
def update(self, arm, event):
ContextualBandit.update(self, arm=arm, event=event)
self.p *= self.decay
|
987,339 | dd1621a5815ee04cf0dd81293300f13c5ea22c72 | import random,time
from autocorrect import spell
# from __future__ import division
# from __future__ import print_function
import json
import sys
import argparse
import cv2,os
import editdistance
import numpy as np
import random
import pandas as pd
import tensorflow as tf
from flask import jsonify
from keras.models import Sequential,Model
from keras.layers import LSTM,Bidirectional,Dense,Activation,Lambda,Input
import keras.backend as K
from keras.optimizers import Adam, SGD, RMSprop,Adadelta
from keras.preprocessing.sequence import pad_sequences
from keras.callbacks import ModelCheckpoint
charset = u' !@#><~%&\$^*+,-./0123456789:;?ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
maxstrokeslen = 500
outputtextlen = len(charset)+1
input_layer = Input((maxstrokeslen, 3))
X = Bidirectional( LSTM(units = 512,return_sequences = True) ) (input_layer)
X = Bidirectional( LSTM(units = 512,return_sequences = True) ) (X)
X = Dense(outputtextlen)(X)
X = Activation('softmax', name='softmax')(X)
test_model = Model(input_layer,X)#.summary()
test_model.load_weights('Modell00000010.hdf5')
graph = tf.get_default_graph()
def labels_to_text(labels):
ret = []
for c in labels:
if c == len(charset): # CTC Blank
ret.append("")
else:
ret.append(charset[c])
return "".join(ret)
import itertools
#charset = u' !@#><~%&\$^*+,-./0123456789:;?ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
def decode_label(out):
# out : (1, 32, 42)
out_best = list(np.argmax(out[0, :], axis=-1)) # get max index -> len = 32
#print(out_best)
out_best = [k for k, g in itertools.groupby(out_best)] # remove overlap value
#print(out_best)
outstr = ''
for i in out_best:
if i < len(charset) and i > 0:
outstr += charset[i]
return outstr
def levenshteinDistance(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2+1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
return distances[-1]
def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
# the 2 is critical here since the first couple outputs of the RNN
# tend to be garbage:
y_pred = y_pred[:, 2:, :]
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
def text_to_labels(text):
ret = []
for char in text:
ret.append(charset.find(char))
return ret
import math
def cubicBezierPoint(a0, a1, a2, a3, t):
return math.pow(1 - t, 3) * a0 + 3 * math.pow(1 - t, 2) * t * a1 + 3 * (1 - t) * math.pow(t, 2) * a2+ math.pow(t, 3) * a3
def bezier_curve(points):
controlpoints = []
renderpoints = []
for i in range(1,len(points)-1,2):
cp = ((points[i-1][0]+points[i][0])/2,(points[i-1][1]+points[i][1])/2,(points[i-1][2]+points[i][2])/2)
controlpoints.append(cp)
controlpoints.append(points[i])
controlpoints.append(points[i+1])
if (i + 2 ) < (len(points) - 1) :
cp1 = ((points[i+1][0]+points[i+2][0])/2,(points[i+1][1]+points[i+2][1])/2,(points[i+1][2]+points[i+2][2])/2)
controlpoints.append(cp1)
for i in range(0, len(controlpoints) - 3,4) :
a0 = controlpoints[i]
a1 = controlpoints[i+1]
a2 = controlpoints[i+2]
a3 = controlpoints[i+3]
op = (cubicBezierPoint(a0[0], a1[0], a2[0], a3[0], 0),cubicBezierPoint(a0[1], a1[1], a2[1], a3[1], 0),cubicBezierPoint(a0[2], a1[2], a2[2], a3[2], 0))
renderpoints.append(op)
#print(renderpoints)
return renderpoints
def beizerprocess(data):
builder = []
for i in range(0,len(data)):
x_cord = []
y_cord = []
for j in range(0,len(data[i][0])):
x_cord.append(data[i][0][j])
y_cord.append(data[i][1][j])
minX = min(x_cord)
maxX = max(x_cord)
Xvalue = maxX - minX
x_cord_mean = [(elt - minX)/Xvalue for elt in x_cord]
minY = min(y_cord)
maxY = max(y_cord)
Yvalue = maxY - minY
y_cord_mean = [(elt - minY)/Yvalue for elt in y_cord]
temptuple = []
for i in range(0,len(x_cord_mean)):
temptuple.append( (x_cord_mean[i],y_cord_mean[i],0) )
bezpoints = bezier_curve(temptuple)
for l in range(0,len(bezpoints)):
if (l == 0):
templ = []
templ.append(bezpoints[l][0])
templ.append(bezpoints[l][1])
templ.append(1)
else:
templ = []
templ.append(bezpoints[l][0])
templ.append(bezpoints[l][1])
templ.append(0)
builder.append(templ)
return builder
def HWRmodel(text_data):
data = text_data
if len(data) != maxstrokeslen:
c = len(data)
for j in range(c, maxstrokeslen):
data.append([-1,-1,-1])
c+=1
num = 71
t = np.expand_dims(data,axis = 0)
st = time.time()
with graph.as_default():
prediction = test_model.predict(t)
et = time.time()
# print(et - st)
de = decode_label(prediction)
sde = spell(de)
print("prediction and spellprediction",de,sde)
return sde
from flask import Flask, url_for, request
app = Flask(__name__)
# @app.route('/apitest')
# def apitest():
# return 'API working'
#@app.route('/hwrrecog', methods=['POST'])
def hwrrecog(data):
beizerdata = beizerprocess(data)
text_out = HWRmodel(beizerdata)
op1 = {'output':text_out}
return jsonify(op1)
|
987,340 | 67b94a9a7dad6dea31be596d85751c8fd56d4cbd | # Generated by Django 3.0.2 on 2020-03-23 11:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('company', '0002_company_user'),
]
operations = [
migrations.AddField(
model_name='company',
name='currency',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='company',
name='description',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='company',
name='email',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='company',
name='language',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='company',
name='phone',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='company',
name='telegram',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='company',
name='time_zone',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='company',
name='vat',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='company',
name='viber',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='company',
name='whatsap',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
987,341 | 25c59bcef389adc60261c732c9188789ac1e6770 | def check_sum_before_and_after_index(int_list):
"""Determine if there exists an element in the array such that the sum of
the elements on its left is equal to the sum of the elements on its right.
If there are no elements to the left/right, then the sum is considered to be
zero.
>>> int_lst = [1, 2, 3, 3]
>>> check_sum_before_and_after_index(int_lst)
YES
"""
# a one-item list always meets this requirement
if len(int_list) == 1:
print "YES"
return
# for each integer, find the sum of all the integers
# before it, storing the total product so far each time
sum_of_all_ints_before_index = [None] * len(int_list)
sum_so_far = 0
for i in xrange(len(int_list)):
sum_of_all_ints_before_index[i] = sum_so_far
sum_so_far += int_list[i]
# for each integer, find the sum of all the integers
# after it, storing the total product so far each time
sum_of_all_ints_after_index = [None] * len(int_list)
sum_so_far = 0
i = len(int_list) - 1
while i >= 0:
sum_of_all_ints_after_index[i] = sum_so_far
sum_so_far += int_list[i]
i -= 1
# for each index, compare before / after entries
for i in xrange(len(int_list)):
if sum_of_all_ints_before_index[i] == sum_of_all_ints_after_index[i]:
print "YES"
return
# if no number exists meeting the sum criteria
print "NO"
return
|
987,342 | 946db708e84c5ff80f8d6fe625b6392c63fc8552 |
# coding: utf-8
# In[2]:
import tensorflow as tf
from tensorflow.keras.layers import Dense,GlobalAvgPool2D,Input
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.models import Model
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.callbacks import EarlyStopping,CSVLogger,ModelCheckpoint,ReduceLROnPlateau
import string
import numpy as np
import os
from plot_model import plot_model
# In[3]:
# 字符包含所有数字和所有小写英文字母,一共62个
characters = string.digits + string.ascii_letters
# 类别数62
num_classes = len(characters)
# 批次大小
batch_size = 64
# 周期数
epochs=100
# 训练集数据,大约50000张图片
# 事先用captcha模块生成,长度都是4
train_dir = "./captcha/train/"
# 测试集数据,大约10000张图片
# 事先用captcha模块生成,长度都是4
test_dir = "./captcha/test/"
# 图片宽度
width=160
# 图片高度
height=60
# In[4]:
# 获取所有验证码图片路径和标签
def get_filenames_and_classes(dataset_dir):
# 存放图片路径
photo_filenames = []
# 存放图片标签
y = []
for filename in os.listdir(dataset_dir):
# 获取文件完整路径
path = os.path.join(dataset_dir, filename)
# 保存图片路径
photo_filenames.append(path)
# 取文件名前4位,也就是验证码的标签
captcha_text = filename[0:4]
# 定义一个空label
label = np.zeros((4, num_classes), dtype=np.uint8)
# 标签转独热编码
for i, ch in enumerate(captcha_text):
# 设置标签,独热编码one-hot格式
# characters.find(ch)得到ch在characters中的位置,可以理解为ch的编号
label[i, characters.find(ch)] = 1
# 保存独热编码的标签
y.append(label)
# 返回图片路径和标签
return np.array(photo_filenames),np.array(y)
# In[5]:
# 获取训练集图片路径和标签
x_train,y_train = get_filenames_and_classes(train_dir)
# 获取测试集图片路径和标签
x_test,y_test = get_filenames_and_classes(test_dir)
# In[6]:
# 图像处理函数
# 获得每一条数据的图片路径和标签
def image_function(filenames, label):
# 根据图片路径读取图片内容
image = tf.io.read_file(filenames)
# 将图像解码为jpeg格式的3维数据
image = tf.image.decode_jpeg(image, channels=3)
# 归一化
image = tf.cast(image, tf.float32) / 255.0
# 返回图片数据和标签
return image, label
# In[7]:
# 标签处理函数
# 获得每一个批次的图片数据和标签
def label_function(image, label):
# transpose改变数据的维度,比如原来的数据shape是(64,4,62)
# 这里的64是批次大小,验证码长度为4有4个标签,62是62个不同的字符
# tf.transpose(label,[1,0,2])计算后得到的shape为(4,64,62)
# 原来的第1个维度变成了第0维度,原来的第0维度变成了1维度,第2维不变
# (64,4,62)->(4,64,62)
label = tf.transpose(label,[1,0,2])
# 返回图片内容和标签,注意这里标签的返回,我们的模型会定义4个任务,所以这里返回4个标签
# 每个标签的shape为(64,62),64是批次大小,62是独热编码格式的标签
return image, (label[0],label[1],label[2],label[3])
# In[8]:
# 创建dataset对象,传入训练集图片路径和标签
dataset_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))
# 打乱数据,buffer_size定义数据缓冲器大小,随意设置一个较大的值
# reshuffle_each_iteration=True,每次迭代都会随机打乱
dataset_train = dataset_train.shuffle(buffer_size=1000,reshuffle_each_iteration=True)
# map-可以自定义一个函数来处理每一条数据
dataset_train = dataset_train.map(image_function)
# 数据重复生成1个周期
dataset_train = dataset_train.repeat(1)
# 定义批次大小
dataset_train = dataset_train.batch(batch_size)
# 注意这个map和前面的map有所不同,第一个map在batch之前,所以是处理每一条数据
# 这个map在batch之后,所以是处理每一个batch的数据
dataset_train = dataset_train.map(label_function)
# 创建dataset对象,传入测试集图片路径和标签
dataset_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))
# 打乱数据,buffer_size定义数据缓冲器大小,随意设置一个较大的值
# reshuffle_each_iteration=True,每次迭代都会随机打乱
dataset_test = dataset_test.shuffle(buffer_size=1000,reshuffle_each_iteration=True)
# map-可以自定义一个函数来处理每一条数据
dataset_test = dataset_test.map(image_function)
# 数据重复生成1个周期
dataset_test = dataset_test.repeat(1)
# 定义批次大小
dataset_test = dataset_test.batch(batch_size)
# 注意这个map和前面的map有所不同,第一个map在batch之前,所以是处理每一条数据
# 这个map在batch之后,所以是处理每一个batch的数据
dataset_test = dataset_test.map(label_function)
# In[13]:
# 生成一个批次的数据和标签
# 可以用于查看数据和标签的情况
x,y = next(iter(dataset_test))
print(x.shape)
print(np.array(y).shape)
# In[10]:
# 也可以使用循环迭代的方式循环一个周期的数据,每次循环获得一个批次
# for x,y in dataset_test:
# pass
# In[11]:
# 载入预训练的resnet50模型
resnet50 = ResNet50(weights='imagenet', include_top=False, input_shape=(height,width,3))
# 设置输入
inputs = Input((height,width,3))
# 使用resnet50进行特征提取
x = resnet50(inputs)
# 平均池化
x = GlobalAvgPool2D()(x)
# 把验证码识别的4个字符看成是4个不同的任务
# 每个任务负责识别1个字符
# 任务1识别第1个字符,任务2识别第2个字符,任务3识别第3个字符,任务4识别第4个字符
x0 = Dense(num_classes, activation='softmax', name='out0')(x)
x1 = Dense(num_classes, activation='softmax', name='out1')(x)
x2 = Dense(num_classes, activation='softmax', name='out2')(x)
x3 = Dense(num_classes, activation='softmax', name='out3')(x)
# 定义模型
model = Model(inputs, [x0,x1,x2,x3])
# 画图
plot_model(model,style=0)
# In[12]:
# 4个任务我们可以定义4个loss
# loss_weights可以用来设置不同任务的权重,验证码识别的4个任务权重都一样
model.compile(loss={'out0':'categorical_crossentropy',
'out1':'categorical_crossentropy',
'out2':'categorical_crossentropy',
'out3':'categorical_crossentropy'},
loss_weights={'out0':1,
'out1':1,
'out2':1,
'out3':1},
optimizer=SGD(lr=1e-2,momentum=0.9),
metrics=['acc'])
# 监控指标统一使用val_loss
# 可以使用EarlyStopping来让模型停止,连续6个周期val_loss没有下降就结束训练
# CSVLogger保存训练数据
# ModelCheckpoint保存所有训练周期中val_loss最低的模型
# ReduceLROnPlateau学习率调整策略,连续3个周期val_loss没有下降当前学习率乘以0.1
callbacks = [EarlyStopping(monitor='val_loss', patience=6, verbose=1),
CSVLogger('Captcha_tfdata.csv'),
ModelCheckpoint('Best_Captcha_tfdata.h5', monitor='val_loss', save_best_only=True),
ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)]
# In[13]:
# 训练模型
# 把之前定义的dataset_train和dataset_test传入进行训练
model.fit(x=dataset_train,
epochs=epochs,
validation_data=dataset_test,
callbacks=callbacks)
|
987,343 | 6392cf728fb47d07f06a33ffc2ed67cc28e36bbb | # -----------------------------------------------------------------------------
# http://pythonhosted.org/django-auth-ldap/
# -----------------------------------------------------------------------------
from django_auth_ldap.config import LDAPSearch, PosixGroupType
import ldap
LDAP_BASE_DC = 'dc=dighum,dc=kcl,dc=ac,dc=uk'
LDAP_BASE_OU = 'ou=groups,' + LDAP_BASE_DC
# Baseline configuration
AUTH_LDAP_SERVER_URI = 'ldap://ldap1.cch.kcl.ac.uk'
AUTH_LDAP_BIND_DN = ''
AUTH_LDAP_BIND_PASSWORD = ''
AUTH_LDAP_USER_DN_TEMPLATE = 'uid=%(user)s,ou=people,' + LDAP_BASE_DC
# Set up the basic group parameters
AUTH_LDAP_GROUP_SEARCH = LDAPSearch(
LDAP_BASE_OU,
ldap.SCOPE_SUBTREE,
'(objectClass=posixGroup)'
)
AUTH_LDAP_GROUP_TYPE = PosixGroupType(name_attr='cn')
# Simple group restrictions
# TODO: Set this value in the project settings
AUTH_LDAP_REQUIRE_GROUP = ''
# Populate the Django user from the LDAP directory
AUTH_LDAP_USER_ATTR_MAP = {
'first_name': 'givenName',
'last_name': 'sn',
'email': 'mail'
}
AUTH_LDAP_USER_FLAGS_BY_GROUP = {
'is_active': 'cn=confluence-users,' + LDAP_BASE_OU,
'is_staff': 'cn=kdl-staff,' + LDAP_BASE_OU,
'is_superuser': 'cn=sysadmin,' + LDAP_BASE_OU
}
AUTH_LDAP_PROFILE_FLAGS_BY_GROUP = {}
# This is the default, but I like to be explicit
AUTH_LDAP_ALWAYS_UPDATE_USER = True
# Cache group memberships for an hour to minimize LDAP traffic
AUTH_LDAP_CACHE_GROUPS = True
AUTH_LDAP_GROUP_CACHE_TIMEOUT = 60 * 60
AUTHENTICATION_BACKENDS = (
'django_auth_ldap.backend.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
)
|
987,344 | c8e9bcc9d2557363139407595083471d5fa2f156 | from __future__ import print_function
import chainermn
def train(hyperparameters, num_gpus, hosts, current_host):
if len(hosts) == 1:
raise Exception('Exception on a single machine')
communicator = hyperparameters.get('communicator', 'naive' if num_gpus == 0 else 'pure_nccl')
comm = chainermn.create_communicator(communicator)
node_to_fail = hyperparameters.get('node_to_fail')
# When running in local mode, setting rank to 'inter_rank' simulates multi-node training.
rank = comm.inter_rank
if node_to_fail == rank:
raise Exception('exception from node {}'.format(rank))
|
987,345 | 3af817ae4c372794628d0f550c23148c16e6be54 | from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
'''
PART II
np.radom.rand() generate a normal distribution with a \sigma^2 = 1 and
\mu = 0
'''
def cahistn(N, mu, corr_mat, BINS):
N = int(N)
normdist = np.random.multivariate_normal(mu, corr_mat, N)
H, xbins, ybins = np.histogram2d(normdist[:, 0], normdist[:, 1],
bins=BINS)
return H, xbins, ybins
# LOAD CONSTANTS
N = 1e6
mu = [6, 6]
BINS = 25
# LOAD CORRELATION MATRICES
corr1 = [[1, 0], [0, 1]]
corr2 = [[1, 0], [0, 1]]
corr3 = [[5, 0], [0, 2]]
corr4 = [[1, 0.5], [0.5, 1]]
corr5 = [[5, 0.5], [0.5, 2]]
corr_mats = [corr1, corr3, corr3, corr4, corr5]
fig = plt.figure(figsize=(10, 10))
for index, corr_mat in enumerate(corr_mats):
pdf_xy, xbins, ybins = cahistn(N, mu, corr_mat, BINS)
pdf_xy = pdf_xy/(N**2)
X, Y = np.meshgrid(xbins[:-1], ybins[:-1]) # create meshgrid from binedges
ax = fig.add_subplot(3, 2, index+1, projection='3d')
#surf = ax.plot_surface(X, Y, pdf_xy, rstride=1, cstride=1, linewidth=0,
# antialiased=False) # plot ampltude of pdf_
cset = ax.contour(X, Y, pdf_xy, zdir='z', offset=0, cmap=cm.coolwarm)
cset = ax.contour(X, Y, pdf_xy, zdir='x', offset=min(xbins), cmap=cm.coolwarm)
cset = ax.contour(X, Y, pdf_xy, zdir='y', offset=max(ybins), cmap=cm.coolwarm)
ax.set_xlabel('X-Value')
ax.set_ylabel('Y-Value')
ax.set_zlabel('PDF_XY')
str_title = 'Covariance Matrix ' \
+ str(index+1)
plt.title(str_title)
fig.patch.set_facecolor('white')
plt.tight_layout() |
987,346 | 0c0c47651de01c1e6e8f9c187725f087ed18856b | from django import forms
class LoginForm(forms.Form):
username = forms.CharField(label="Username",required=True,max_length=200)
username.widget = forms.TextInput(attrs={'class':'form-control'})
password = forms.CharField(label="Password",required=True,max_length=200)
password.widget = forms.PasswordInput(attrs={'class':'form-control'})
class SignUpForm(forms.Form):
email = forms.EmailField(label='Email Id',required=True)
email.widget = forms.TextInput(attrs={'class':'form-control',})
username = forms.CharField(label="Username",required=True,max_length=200)
username.widget = forms.TextInput(attrs={'class':'form-control',})
password = forms.CharField(label="Password",required=True,max_length=200)
password.widget = forms.PasswordInput(attrs={'class':'form-control',})
confirm_password = forms.CharField(label="Confirm Password",required=True,max_length=200)
confirm_password.widget = forms.PasswordInput(attrs={'class':'form-control',})
|
987,347 | 26d8d516af09cb46cb2f68b7bc7f13a0d159e733 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
from geometry_msgs.msg import Twist
from geometry_msgs.msg import PoseWithCovarianceStamped
from sensor_msgs.msg import LaserScan
import tf
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
import actionlib_msgs
import math
# Ref: https://hotblackrobotics.github.io/en/blog/2018/01/29/action-client-py/
from std_msgs.msg import Bool, String, Int32
import numpy as np
from std_msgs.msg import Int32MultiArray
PI = math.pi
class EnemyPointsFinder():
def __init__(self):
self.max_distance = 0.8 # 0.7
# self.thresh_corner = 0.6 # 0.25
# self.thresh_center = 0.5 # 0.35
self.pose_x = 0
self.pose_y = 0
self.th = 0
# lidar scan
self.scan = []
self.is_near_wall = False
# publisher
self.is_enemy_points_pub = rospy.Publisher('is_enemy_points', Bool, queue_size=10)
self.enemy_direction_pub = rospy.Publisher('enemy_direction', Int32, queue_size=10)
# subscriber
self.pose_sub = rospy.Subscriber('amcl_pose', PoseWithCovarianceStamped, self.poseCallback)
self.lidar_sub = rospy.Subscriber('scan', LaserScan, self.lidarCallback)
self.is_near_enemy = False
self.enemy_direction = None
self.enemy_dist = None
self.is_initialized_pose = False
def findEnemy(self, scan, pose_x, pose_y, th):
'''
input scan. list of lider range, robot locate(pose_x, pose_y, th)
return is_near_enemy(BOOL), enemy_direction[rad](float)
'''
if not len(scan) == 360:
return False
# update pose
self.pose_x = pose_x
self.pose_y = pose_y
self.th = th
# drop too big and small value ex) 0.0 , 2.0
near_scan = [x if self.max_distance > x > 0.1 else 0.0 for x in scan]
enemy_scan = [1 if self.is_point_enemy(x,i) else 0 for i,x in enumerate(near_scan)]
is_near_enemy = sum(enemy_scan) > 6 # if less than 5 points, maybe noise
if is_near_enemy:
idx_l = [i for i, x in enumerate(enemy_scan) if x == 1]
idx = idx_l[len(idx_l)/2]
enemy_direction_deg = idx
enemy_direction = idx / 360.0 * 2*PI
enemy_dist = near_scan[idx]
else:
enemy_direction_deg = None
enemy_direction = None
enemy_dist = None
# print("Enemy: {}, Direction: {}, Direction[deg]: {}".format(is_near_enemy, enemy_direction, enemy_direction_deg))
# print("enemy points {}".format(sum(enemy_scan)))
self.is_enemy_points_pub.publish(is_near_enemy)
self.enemy_direction_pub.publish(enemy_direction_deg)
return is_near_enemy, enemy_direction, enemy_dist
def is_point_enemy(self, dist, ang_deg):
if dist == 0:
return False
ang_rad = ang_deg /360. * 2 * PI
point_x = self.pose_x + dist * math.cos(self.th + ang_rad)
point_y = self.pose_y + dist * math.sin(self.th + ang_rad)
#フィールド内かチェック
filed_size = 1.3 #1.53
if point_y > (-point_x + filed_size):
return False
elif point_y < (-point_x - filed_size):
return False
elif point_y > ( point_x + filed_size):
return False
elif point_y < ( point_x - filed_size):
return False
else:
#フィールド内の物体でないかチェック
#小さい方
locate = 0.53 # 0.53
radius = 0.3 #半径
large_radius = 0.5 #半径
if (pow((point_x - locate), 2) + pow((point_y - locate), 2)) < pow(radius, 2):
return False
elif (pow((point_x - locate), 2) + pow((point_y + locate), 2)) < pow(radius, 2):
return False
elif (pow((point_x + locate), 2) + pow((point_y - locate), 2)) < pow(radius, 2):
return False
elif (pow((point_x + locate), 2) + pow((point_y + locate), 2)) < pow(radius, 2):
return False
# 真ん中の大きい障害物
elif (pow(point_x, 2) + pow(point_y, 2)) < pow(radius, 2):
return False
else:
return True
"""
locate = 0.60 # 0.53
len_p1 = math.sqrt(pow((point_x - locate), 2) + pow((point_y - locate), 2))
len_p2 = math.sqrt(pow((point_x - locate), 2) + pow((point_y + locate), 2))
len_p3 = math.sqrt(pow((point_x + locate), 2) + pow((point_y - locate), 2))
len_p4 = math.sqrt(pow((point_x + locate), 2) + pow((point_y + locate), 2))
len_p5 = math.sqrt(pow(point_x , 2) + pow(point_y , 2))
if len_p1 < self.thresh_corner or len_p2 < self.thresh_corner or len_p3 < self.thresh_corner or len_p4 < self.thresh_corner or len_p5 < self.thresh_center:
return False
else:
#print(point_x, point_y, self.pose_x, self.pose_y, self.th, dist, ang_deg, ang_rad)
#print(len_p1, len_p2, len_p3, len_p4, len_p5)
return True
"""
def poseCallback(self, data):
'''
pose topic from amcl localizer
update robot twist
'''
self.pose_x = data.pose.pose.position.x
self.pose_y = data.pose.pose.position.y
quaternion = data.pose.pose.orientation
rpy = tf.transformations.euler_from_quaternion((quaternion.x, quaternion.y, quaternion.z, quaternion.w))
self.th = rpy[2]
self.is_initialized_pose = True
def lidarCallback(self, data):
'''
lidar scan use for bumper , and find enemy
controll speed.x
'''
scan = data.ranges
self.scan = scan
self.is_near_wall = self.isNearWall(scan)
# enemy detection
if self.is_initialized_pose:
self.is_near_enemy, self.enemy_direction, self.enemy_dist = self.findEnemy(scan, self.pose_x, self.pose_y, self.th)
# if self.is_near_enemy:
# self.updateNearEnemyTwist()
def isNearWall(self, scan):
if not len(scan) == 360:
return False
forword_scan = scan[:15] + scan[-15:]
# drop too small value ex) 0.0
forword_scan = [x for x in forword_scan if x > 0.1]
if min(forword_scan) < 0.2:
return True
return False
if __name__ == '__main__':
try:
rospy.init_node('enemy_points_finder', anonymous=False)
finder = EnemyPointsFinder()
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
rate.sleep()
except rospy.ROSInterruptException:
pass |
987,348 | 0e3f4cc1ed2d2a81928acf6c9a9915968dfc88d3 | from torch.distributions.categorical import Categorical
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from collections import namedtuple
from tensorboardX import SummaryWriter
import torch.multiprocessing as mp
import math
import os
def run_step(env,a):
next_s,r,done, _ = env.step(a)
if done:
next_s = env.reset()
return Transition(None,a,r,next_s,done)
class PGAgent():
def __init__(self,model,device):
"""A simple PG agent"""
self.model = model
self.device = device
def get_action(self, state):
"""interface for Agent"""
s = torch.FloatTensor(state).to(self.device)
logits = self.model(s).detach()
m = Categorical(logits = logits)
return m.sample().cpu().data.numpy().tolist()[0]
def evaluate(env, agent, n_games=1):
""" Plays n_games full games. If greedy, picks actions as argmax(qvalues). Returns mean reward. """
t_max = env.spec.timestep_limit or 1000
rewards = []
for _ in range(n_games):
s = env.reset()
reward = 0.0
for _ in range(t_max):
action = agent.get_action(np.array([s]))
s, r, done, _ = env.step(action)
reward += r
if done: break
rewards.append(reward)
return np.mean(rewards)
def data_func(net,device, train_queue):
# each process runs multiple instances of the environment, round-robin
print("start work process:",os.getpid())
envs = [gym.make(GAME) for _ in range(ENVS_PER_PROCESS)]
agent = PGAgent(net,device)
states= [env.reset() for env in envs]
while True:
for i,env in enumerate(envs):
s0 = states[i]
a0 = agent.get_action([s0])
a = a0
r_total = 0.0
for j in range(N_STEP):
next_s,r,done, _ = env.step(a)
r_total = r_total + r * GAMMA**j
if done:
next_s = env.reset()
break
a = agent.get_action([next_s])
states[i] = next_s
train_queue.put(Transition(s0,a0,r_total,next_s, done))
Transition = namedtuple('Transition',('state', 'action', 'reward','next_state','done'))
# for training
STEPS = 20000
LEARNING_RATE = 0.0005
BATCH_SIZE = 32
BETA = 0.1
GAMMA = 1.0
TAU = 0.05
N_STEP = 1
GAME = "LunarLander-v2"
#GAME = "CartPole-v0"
TOTAL_ENVS = 32
PROCESSES_COUNT = max((mp.cpu_count() - 2),1)
ENVS_PER_PROCESS = math.ceil(TOTAL_ENVS / PROCESSES_COUNT)
batch_envs = [gym.make(GAME) for _ in range(BATCH_SIZE)]
# for evaluation
eval_env = gym.make(GAME)
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")
x, y = eval_env.observation_space.shape[0], eval_env.action_space.n
actor = torch.nn.Sequential(nn.Linear(x,128),nn.ReLU(),nn.Linear(128,256),nn.ReLU(),nn.Linear(256,y)).to(device)
critic = torch.nn.Sequential(nn.Linear(x,64),nn.ReLU(),nn.Linear(64,128),nn.ReLU(),nn.Linear(128,1)).to(device)
critic_target = torch.nn.Sequential(nn.Linear(x,64),nn.ReLU(),nn.Linear(64,128),nn.ReLU(),nn.Linear(128,1)).to(device)
actor.share_memory()
agent = PGAgent(actor,device)
optimizer_actor = torch.optim.Adam(actor.parameters(), lr=LEARNING_RATE)
optimizer_critic = torch.optim.Adam(critic.parameters(), lr=LEARNING_RATE*5)
eval_env.seed(0)
torch.random.manual_seed(0)
train_queue = mp.Queue(maxsize=PROCESSES_COUNT)
data_proc_list = []
# Spawn processes to run data_func
for _ in range(PROCESSES_COUNT):
data_proc = mp.Process(target=data_func, args=(actor, device,train_queue))
data_proc.start()
data_proc_list.append(data_proc)
batch = []
writer = SummaryWriter(comment="a3c")
try:
for step_idx in range(STEPS):
while len(batch) < BATCH_SIZE:
train_entry = train_queue.get()
batch.append(train_entry)
transitions = Transition(*zip(*batch))
batch.clear()
states_t = torch.FloatTensor(transitions.state).to(device)
actions_t = torch.LongTensor(transitions.action).to(device)
rewards_t = torch.FloatTensor(transitions.reward).to(device)
next_states_t = torch.FloatTensor(transitions.next_state).to(device)
done_t = torch.FloatTensor(transitions.done).to(device)
logits_t = actor(states_t)
m = Categorical(logits=logits_t)
# critic loss
predicted_states_v = critic(states_t).squeeze()
with torch.no_grad():
predicted_next_states_v = critic_target(next_states_t).squeeze() * (1 - done_t)
target_states_v = predicted_next_states_v * GAMMA + rewards_t
L_critic = F.smooth_l1_loss(predicted_states_v, target_states_v)
optimizer_critic.zero_grad()
L_critic.backward()
optimizer_critic.step()
# actor loss
log_probs_t = m.log_prob(actions_t)
advantages_t = (target_states_v - predicted_states_v).detach()
J_actor = (advantages_t * log_probs_t).mean()
# entropy
entropy = m.entropy().mean()
L_actor = -J_actor - entropy * BETA
optimizer_actor.zero_grad()
L_actor.backward()
optimizer_actor.step()
# smooth update target
for target_param, new_param in zip(critic_target.parameters(), critic.parameters()):
target_param.data = target_param.data * (1 - TAU) + new_param.data * TAU
writer.add_scalar("Entropy", entropy, step_idx)
writer.add_scalar("Critic_Loss", L_critic, step_idx)
writer.add_scalar("Actor_Loss", L_actor, step_idx)
writer.add_scalar("V",predicted_states_v.mean(),step_idx)
if step_idx % 50 == 0:
#critic_target.load_state_dict(critic.state_dict())
score = evaluate(eval_env, agent, n_games=5)
print("Step {}: with score : {:.3f}".format(step_idx, score))
writer.add_scalar("Score", score, step_idx)
if score>= 200:
print("Reach the target score 200 of 5 games")
break
finally:
for p in data_proc_list:
p.terminate()
p.join()
|
987,349 | da880aae0bb01a28ad0293f7b8b8168e89369c1d | import random
import string
from django.http import JsonResponse, HttpResponse
from django.shortcuts import render,redirect
from django.views.decorators.csrf import csrf_exempt
from index.models import TOrder, TAddress, TUser, TShop
# Create your views here.
#======================订单页面渲染======================
def show_order(request):
if request.session.get('login'):
user=request.session.get('username')
user_inf=TUser.objects.get(username=user)
addr=TAddress.objects.filter(user_id=user_inf.id)
car_inf=request.session.get('car_inf')
return render(request,'indent.html',{'addr':addr,'car_inf':car_inf})
else:
request.session['red']='/dangdang/show_order'
return redirect('log_reg:login')
#=====================订单逻辑处理=======================
#-----------订单提交逻辑------------
@csrf_exempt
def ordelogic(request):
username=request.POST.get('username')
addr=request.POST.get('address')
zip=request.POST.get('zipcode')
tel=request.POST.get('telphone')
mob=request.POST.get('mobilephone')
id_flag=request.POST.get('id_flag')
user=request.session.get('username')
print(username,addr,zip,tel)
user_i=TUser.objects.get(username=user)
if id_flag:
pass
else:
TAddress.objects.create(username=username,address=addr,zipcode=zip,telephone=tel,mobilephone=mob,user_id=user_i.id)
code = random.sample(string.digits, 10)
ord_id=''.join(code)
car_inf=request.session.get('car_inf')
ord_price=car_inf.all_price
ord_num=len(car_inf.car_item)
del request.session['car_inf']
username=request.session.get('username')
user=TUser.objects.get(username=username)
a = TShop.objects.filter(user_id=user.id)
a.delete()
return render(request,'indent ok.html',{'ord_id':ord_id,'ord_price':ord_price,'ord_num':ord_num,'username':username})
#-----------地址响应逻辑------------
def address(request):
try:
id=request.GET.get('id')
addr_c=TAddress.objects.get(id=id)
def mydefault(u):
if isinstance(u,TAddress):
addr_c={'username':u.username,'address':u.address,'zipcode':u.zipcode,'tel':u.telephone,'mobile':u.mobilephone}
return addr_c
if addr_c:
return JsonResponse(addr_c,safe=False,json_dumps_params={'default':mydefault})
except:
return HttpResponse('1') |
987,350 | 14a20a28b9144603a98dee5b543c042dc1dabae1 | import json#cPickle as pickle
import cv2
import numpy as np
from sys import stdout
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Dropout, Flatten, Dense
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint
from scipy.misc import imsave
import time
from keras import backend as K
from sklearn.datasets import load_files
from keras.utils import np_utils
import numpy as np
from glob import glob
# define function to load train, test, and validation datasets
def load_dataset(path):
data = load_files(path)
dog_files = np.array(data['filenames'])
dog_targets = np_utils.to_categorical(np.array(data['target']), 133)
return dog_files, dog_targets
from keras.preprocessing import image
from tqdm import tqdm
def path_to_tensor(img_path):
# loads RGB image as PIL.Image.Image type
img = image.load_img(img_path, target_size=(224, 224))
# convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)
x = image.img_to_array(img)
# convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
return np.expand_dims(x, axis=0)
def paths_to_tensor(img_paths):
list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]
return np.vstack(list_of_tensors)
def run(train_tensors, train_targets, valid_tensors, valid_targets, test_tensors, test_targets):
# Keep record of test accuracy. ########################################
#accHistory = {}
# Hyper parameter history. #############################################
hpHistory = []
hpHistory.append({ 'l1_filters':16,
'l1_kSize':2,
'l1_strides':1,
'l1_padding':'valid',
'l1_poolSize':2,
'l1_poolStrides':2,
'l2_filters':32,
'l2_kSize':4,
'l2_strides':1,
'l2_padding':'valid',
'l2_poolSize':2,
'l2_poolStrides':2,
'l3_filters':64,
'l3_kSize':8,
'l3_strides':1,
'l3_padding':'valid',
'l3_poolSize':2,
'l3_poolStrides':2}
)
hpHistory.append({ 'l1_filters':16,
'l1_kSize':4,
'l1_strides':1,
'l1_padding':'valid',
'l1_poolSize':2,
'l1_poolStrides':2,
'l2_filters':32,
'l2_kSize':4,
'l2_strides':1,
'l2_padding':'valid',
'l2_poolSize':2,
'l2_poolStrides':2,
'l3_filters':64,
'l3_kSize':4,
'l3_strides':1,
'l3_padding':'valid',
'l3_poolSize':2,
'l3_poolStrides':2}
)
hpHistory.append({ 'l1_filters':16,
'l1_kSize':4,
'l1_strides':2,
'l1_padding':'valid',
'l1_poolSize':2,
'l1_poolStrides':2,
'l2_filters':32,
'l2_kSize':4,
'l2_strides':2,
'l2_padding':'valid',
'l2_poolSize':2,
'l2_poolStrides':2,
'l3_filters':64,
'l3_kSize':4,
'l3_strides':2,
'l3_padding':'valid',
'l3_poolSize':2,
'l3_poolStrides':2}
)
hpHistory.append({ 'l1_filters':16,
'l1_kSize':4,
'l1_strides':4,
'l1_padding':'valid',
'l1_poolSize':2,
'l1_poolStrides':2,
'l2_filters':32,
'l2_kSize':4,
'l2_strides':2,
'l2_padding':'valid',
'l2_poolSize':2,
'l2_poolStrides':2,
'l3_filters':64,
'l3_kSize':4,
'l3_strides':1,
'l3_padding':'valid',
'l3_poolSize':2,
'l3_poolStrides':2}
)
hpHistory.append({ 'l1_filters':16,
'l1_kSize':8,
'l1_strides':1,
'l1_padding':'valid',
'l1_poolSize':4,
'l1_poolStrides':4,
'l2_filters':32,
'l2_kSize':4,
'l2_strides':1,
'l2_padding':'valid',
'l2_poolSize':4,
'l2_poolStrides':4,
'l3_filters':64,
'l3_kSize':2,
'l3_strides':1,
'l3_padding':'valid',
'l3_poolSize':4,
'l3_poolStrides':4}
)
hpHistory.append({ 'l1_filters':16,
'l1_kSize':8,
'l1_strides':1,
'l1_padding':'valid',
'l1_poolSize':4,
'l1_poolStrides':4,
'l2_filters':32,
'l2_kSize':8,
'l2_strides':1,
'l2_padding':'valid',
'l2_poolSize':4,
'l2_poolStrides':4,
'l3_filters':64,
'l3_kSize':8,
'l3_strides':1,
'l3_padding':'valid',
'l3_poolSize':4,
'l3_poolStrides':4}
)
hpHistory.append({ 'l1_filters':16,
'l1_kSize':8,
'l1_strides':1,
'l1_padding':'valid',
'l1_poolSize':4,
'l1_poolStrides':4,
'l2_filters':32,
'l2_kSize':8,
'l2_strides':1,
'l2_padding':'valid',
'l2_poolSize':2,
'l2_poolStrides':2,
'l3_filters':64,
'l3_kSize':8,
'l3_strides':1,
'l3_padding':'valid',
'l3_poolSize':2,
'l3_poolStrides':2}
)
hpHistory.append({ 'l1_filters':16,
'l1_kSize':4,
'l1_strides':1,
'l1_padding':'valid',
'l1_poolSize':2,
'l1_poolStrides':2,
'l2_filters':32,
'l2_kSize':4,
'l2_strides':1,
'l2_padding':'valid',
'l2_poolSize':2,
'l2_poolStrides':2,
'l3_filters':64,
'l3_kSize':4,
'l3_strides':1,
'l3_padding':'valid',
'l3_poolSize':2,
'l3_poolStrides':2,
'l4_filters':64,
'l4_kSize':4,
'l4_strides':1,
'l4_padding':'valid',
'l4_poolSize':2,
'l4_poolStrides':2}
)
# Loop through the different param settings. ###########################
for iSetting in range(len(hpHistory)):
current_setting = hpHistory[iSetting]
print('Testing setting {n:g} ***************************************************************************'.format(n = iSetting))
startTime = time.time()
print('Setting up model.')
# Build the CNN. #######################################################
model = Sequential()
# First convolutional layer.
model.add( Conv2D( filters = hpHistory[iSetting]['l1_filters'],
kernel_size = hpHistory[iSetting]['l1_kSize'],
strides = hpHistory[iSetting]['l1_strides'],
padding = hpHistory[iSetting]['l1_padding'],
activation = 'relu',
input_shape=train_tensors[0].shape,
name = 'conv_1'
)
)
model.add( MaxPooling2D( pool_size = hpHistory[iSetting]['l1_poolSize'],
strides = hpHistory[iSetting]['l1_poolStrides'],
padding = hpHistory[iSetting]['l1_padding'],
name = 'pool_1'
)
)
# Second convolutional layer.
if 'l2_kSize' in hpHistory[iSetting].keys():
model.add( Conv2D( filters = hpHistory[iSetting]['l2_filters'],
kernel_size = hpHistory[iSetting]['l2_kSize'],
strides = hpHistory[iSetting]['l2_strides'],
padding = hpHistory[iSetting]['l2_padding'],
activation = 'relu',
name = 'conv_2' ))
model.add( MaxPooling2D( pool_size = hpHistory[iSetting]['l2_poolSize'],
strides = hpHistory[iSetting]['l2_poolStrides'],
padding = hpHistory[iSetting]['l2_padding'],
name = 'pool_2' ))
# Third convolutional layer.
if 'l3_kSize' in hpHistory[iSetting].keys():
model.add( Conv2D( filters = hpHistory[iSetting]['l3_filters'],
kernel_size = hpHistory[iSetting]['l3_kSize'],
strides = hpHistory[iSetting]['l3_strides'],
padding = hpHistory[iSetting]['l3_padding'],
activation = 'relu',
name = 'conv_3' ))
model.add( MaxPooling2D( pool_size = hpHistory[iSetting]['l3_poolSize'],
strides = hpHistory[iSetting]['l3_poolStrides'],
padding = hpHistory[iSetting]['l3_padding'],
name = 'pool_3' ))
# Fourth convolutional layer.
if 'l4_kSize' in hpHistory[iSetting].keys():
model.add( Conv2D( filters = hpHistory[iSetting]['l4_filters'],
kernel_size = hpHistory[iSetting]['l4_kSize'],
strides = hpHistory[iSetting]['l4_strides'],
padding = hpHistory[iSetting]['l4_padding'],
activation = 'relu',
name = 'conv_4' ))
model.add( MaxPooling2D( pool_size = hpHistory[iSetting]['l4_poolSize'],
strides = hpHistory[iSetting]['l4_poolStrides'],
padding = hpHistory[iSetting]['l4_padding'],
name = 'pool_4' ))
# Add global pooling layer.
model.add( GlobalAveragePooling2D() )
# Add classification layer.
model.add( Dense(133, activation='softmax') )
model.summary()
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
# Train the model. #####################################################
print('')
print('Training model.')
epochs = 5
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.from_scratch.hdf5', verbose=1, save_best_only=True)
model.fit(train_tensors, train_targets, validation_data=(valid_tensors, valid_targets), epochs=epochs, batch_size=20, callbacks=[checkpointer], verbose=1)
train_time = time.time() - startTime
# Load the best weights.
model.load_weights('saved_models/weights.best.from_scratch.hdf5')
# Visualize the weights. ###############################################
print('')
print('Creating weight images.')
# dimensions of the generated pictures for each filter.
img_width = train_tensors[0].shape[0]
img_height = train_tensors[0].shape[1]
# util function to convert a tensor into a valid image
def deprocess_image(x):
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + K.epsilon())
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
if K.image_data_format() == 'channels_first':
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
# this is the placeholder for the input images
input_img = model.input
# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers])
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon())
# The name of the layer we want to visualize.
layer_names = []
for name in layer_dict:
if 'conv' in name:
layer_names.append(name)
#layer_name = 'conv_1'
# Create weight images for each convolutional layer.
for layer_name in layer_names:
print(' Creating weight image for layer {n:s}'.format(n = layer_name))
n_filters = layer_dict[layer_name].filters
kept_filters = []
for filter_index in range(n_filters):
print(' Processing filter %d' % filter_index)
start_time = time.time()
# we build a loss function that maximizes the activation
# of the nth filter of the layer considered
layer_output = layer_dict[layer_name].output
if K.image_data_format() == 'channels_first':
loss = K.mean(layer_output[:, filter_index, :, :])
else:
loss = K.mean(layer_output[:, :, :, filter_index])
# we compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_img)[0]
# normalization trick: we normalize the gradient
grads = normalize(grads)
# this function returns the loss and grads given the input picture
iterate = K.function([input_img], [loss, grads])
# step size for gradient ascent
step = 1.
# we start from a gray image with some random noise
if K.image_data_format() == 'channels_first':
input_img_data = np.random.random((1, 3, img_width, img_height))
else:
input_img_data = np.random.random((1, img_width, img_height, 3))
input_img_data = (input_img_data - 0.5) * 20 + 128
# we run gradient ascent for 20 steps
for i in range(30):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
#print('Current loss value:', loss_value)
stdout.write('{r:s} Current loss value: {n:2.2f}'.format(r = '\r', n = loss_value))
stdout.flush()
if loss_value <= 0.:
# some filters get stuck to 0, we can skip them
break
print('')
# Decode the resulting input image.
img = deprocess_image(input_img_data[0])
kept_filters.append((img, loss_value))
end_time = time.time()
print(' Filter %d processed in %ds' % (filter_index, end_time - start_time))
# Create the image and save it.
n = 8
if n_filters <=36:
n = 6
if n_filters <= 25:
n = 5
if n_filters <= 16:
n = 4
if n_filters <= 9:
n = 3
if n_filters <=4:
n = 2
# The filters that have the highest loss are assumed to be better-looking. Sort by loss.
kept_filters.sort(key=lambda x: x[1], reverse=True)
# Build a black picture with enough space for all filter images.
# Keep 5px margin between pictures.
margin = 5
width = n * img_width + (n - 1) * margin
height = n * img_height + (n - 1) * margin
stitched_filters = np.zeros((width, height, 3))
# fill the picture with our saved filters
for i in range(n):
for j in range(n):
try:
img, loss = kept_filters[i * n + j]
stitched_filters[(img_width + margin) * i: (img_width + margin) * i + img_width,
(img_height + margin) * j: (img_height + margin) * j + img_height, :] = img
except IndexError:
pass
# Save the result to disk
print(' Saving image.')
cv2.imwrite('weightImages/hp{n:g}_{l:s}.png'.format(n = iSetting, l = layer_name), stitched_filters)
# Test the CNN. ######################################################
# get index of predicted dog breed for each image in test set
dog_breed_predictions = [np.argmax(model.predict(np.expand_dims(tensor, axis=0))) for tensor in test_tensors]
# Report test accuracy
test_accuracy = 100*np.sum(np.array(dog_breed_predictions)==np.argmax(test_targets, axis=1))/len(dog_breed_predictions)
print('')
print('Test accuracy: %.4f%%' % test_accuracy)
hpHistory[iSetting]['accuracy'] = test_accuracy
hpHistory[iSetting]['time'] = train_time
hpHistory[iSetting]['i'] = iSetting
# Save the results.
with open('results', 'w') as file:
file.write(json.dumps(hpHistory))
print('Done in {n:g} seconds.'.format(n = time.time() - startTime))
print('')
print('')
if __name__ == "__main__":
print('Loading data.')
# load train, test, and validation datasets
train_files, train_targets = load_dataset('dogImages/train')
valid_files, valid_targets = load_dataset('dogImages/valid')
test_files, test_targets = load_dataset('dogImages/test')
# load list of dog names
dog_names = [item[20:-1] for item in sorted(glob("dogImages/train/*/"))]
print('Preparing tensors.')
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# pre-process the data for Keras
train_tensors = paths_to_tensor(train_files).astype('float32')/255
valid_tensors = paths_to_tensor(valid_files).astype('float32')/255
test_tensors = paths_to_tensor(test_files).astype('float32')/255
print('Running.')
run(train_tensors, train_targets, valid_tensors, valid_targets, test_tensors, test_targets) |
987,351 | 4c9ccdebe93bff970b17ae8c1f114f76928faabc | import numpy
from scipy import misc
def get_picture(paths):
address = 0
fo = open("texture","w")
for path in paths:
image = misc.imread(path,mode="RGBA")
the_shape = image.shape
new_shape = (the_shape[0],the_shape[1])
shape_array = numpy.ones(new_shape, dtype=numpy.int)
tri = numpy.tril(shape_array)
for i in range(the_shape[0]):
for j in range(the_shape[1]):
if i == 0 and j == 0:
print(str(hex(address))[2:])
if tri[i][j] == 1:
fo.write("@{0}\n".format(str(hex(address))[2:]))
address = address + 1
fo.write(numpy.binary_repr(image[the_shape[0] - i - 1][j][0],width=8))
fo.write(numpy.binary_repr(image[the_shape[0] - i - 1][j][1],width=8))
fo.write(numpy.binary_repr(image[the_shape[0] - i - 1][j][2],width=8))
fo.write("\n")
fo.close()
if __name__ == "__main__":
get_picture(["5_Target.png","5_Target.png"]) |
987,352 | 6baf8eedbcbc433251641ec21e46b114415ebf5f | """The main module of Space Invaders game
"""
import os
import time
import random
from pathlib import Path
import pygame
from tools.asset_library import AssetsLibrary
class InputsTemp:
"""Temporary class for inputs.
"""
def __init__(self):
self.width_height = (750, 750)
def setup_display(width_height: tuple, caption: str="Space Invaders"):
"""Display setup function
Args:
width_height (tuple): tuple with window size (width, height)
caption (str, optional): The caption on window ribon. Defaults to "Space Invaders".
"""
window = pygame.display.set_mode(width_height)
pygame.display.set_caption(caption)
return window
def initialize():
pygame.font.init()
def main():
"""Main function of Space Invaders game
"""
initialize()
inputs = InputsTemp()
ui_font = pygame.font.SysFont("Comic Sans MS", 50)
assets_library = AssetsLibrary((Path(__file__).parent / "Assets"))
# todo: create display class to wrap display from pygame
window = setup_display(inputs.width_height)
background_img = assets_library.assets.bg_black
run = True
FPS = 60
lives = 5
level = 1
clock = pygame.time.Clock()
ui_margin = {
"left": 10,
"right": 10,
"top": 10,
"bottom": 10,
}
def redraw_window():
window.blit(background_img.get_image(inputs.width_height), (0, 0))
lives_label = ui_font.render(f"lives: {lives}", 1, (255, 255, 255))
level_label = ui_font.render(f"level: {level}", 1, (255, 255, 255))
window.blit(lives_label, (ui_margin["left"], ui_margin["top"]))
window.blit(level_label, (inputs.width_height[0] - level_label.get_width() - ui_margin["right"], ui_margin["top"]))
pygame.display.update()
while run:
clock.tick(FPS)
redraw_window()
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
print("Game ended")
if __name__ == "__main__":
main()
|
987,353 | 8b74a182b773c829ee7f15aaeedf13a50ffc0752 | from django.contrib import admin
from Trainer.models import UserProfileInfo, Document
# Register your models here.
admin.site.register(UserProfileInfo)
admin.site.register(Document)
|
987,354 | 386de61934231cbdcad355fe86d9a1efffd4688d | from PIL import Image
import os
# Reference : https://note.nkmk.me/en/python-pillow-add-margin-expand-canvas/
def resizing():
dir_path = 'archive/images'
dir_names = os.listdir(dir_path)
for dir_name in dir_names :
if dir_name != ".DS_Store" :
file_path = f"{dir_path}/{dir_name}"
file_names = os.listdir(file_path)
for file_name in file_names :
img = Image.open(f"archive/images/{dir_name}/{file_name}")
rgb_img = img.convert("RGB")
img_new = change2square(rgb_img, (0, 0, 0)).resize((256, 256))
new_dir = f"dog_resize/{dir_name}"
if not os.path.isdir(new_dir):
os.mkdir(new_dir)
img_new.save(f"{new_dir}/{file_name}", quality = 95)
# img_resize = img.resize((256, 256))
# img_resize.save('dog_resize/resize2.jpg')
def change2square(pill_img, background_color):
width, height = pill_img.size
if width == height:
return pill_img
elif width> height :
result = Image.new(pill_img.mode, (width, width), background_color)
result.paste(pill_img, (0, (width - height) // 2))
return result
else:
result = Image.new(pill_img.mode, (height, height), background_color)
result.paste(pill_img, ((height - width) // 2, 0))
return result
resizing()
|
987,355 | 909e52a35646a8207030d558c30c1e1255f582f0 | from microbit import *
def draw_counter(counter):
for x in range(5):
for y in range(5):
display.set_pixel(x, y, 9 if counter > 5*x+y else 0)
counter = 0
while True:
if button_a.was_pressed():
counter += 1
draw_counter(counter)
|
987,356 | 2a3b1cc04a215982e50b6829073bd13b09896b66 | from typing import Any, Optional
from httpx import AsyncClient, TimeoutException
from .....config import config
from .....exceptions import ConfigError
from ....utils import http_status_handler
from .....exceptions import HTTPException
def get_uri(uri: Optional[str]) -> str:
if not uri:
raise ConfigError
return uri.rstrip('/')
async def get(uri: str, **params: Any) -> Any:
timeout = params.get('timeout', config.TIMEOUT)
try:
async with AsyncClient(timeout=timeout) as client:
response = await client.get(uri, params=params)
except TimeoutException as e:
raise e
except:
raise HTTPException(
status_code=500, detail='500 Server error')
http_status_handler(response)
return response.json()
async def userbest(code: str, songname: str, difficulty: int) -> Any:
endpoint = get_uri(config.BOTARCAPI_URI) + '/user/best'
return await get(
endpoint,
usercode=code,
songname=songname,
difficulty=difficulty
)
async def userbest30(code: str) -> Any:
endpoint = get_uri(config.BOTARCAPI_URI) + '/user/best30'
return await get(
endpoint,
usercode=code,
timeout=config.TIMEOUT_B30
)
async def userinfo(code: str, recent: int = 1) -> Any:
endpoint = get_uri(config.BOTARCAPI_URI) + '/user/info'
return await get(
endpoint,
usercode=code,
recent=int(recent)
)
async def songalias(songid: str) -> Any:
endpoint = get_uri(config.BOTARCAPI_URI) + '/song/alias'
return await get(
endpoint,
songid=songid
)
async def songinfo(songname: str) -> Any:
endpoint = get_uri(config.BOTARCAPI_URI) + '/song/info'
return await get(
endpoint,
songname=songname
)
async def songrandom(start: Optional[int] = None, end: Optional[int] = None) -> Any:
endpoint = get_uri(config.BOTARCAPI_URI) + '/song/random'
if start is not None:
if end is None:
end = start + 1
return await get(endpoint, start=start, end=end)
return await get(endpoint)
async def songrating(start: int, end: Optional[int] = None) -> Any:
endpoint = get_uri(config.BOTARCAPI_URI) + '/song/rating'
if start is not None:
if end is None:
end = start + 1
return await get(endpoint, start=start, end=end)
return await get(endpoint)
|
987,357 | 53c5eb907860eb7b4dc22eea30063f94a93a0a94 | """ Setup python project. """
|
987,358 | 05775d40a355affc0d469888ee93da9447189954 | #!/usr/bin/python3.5
gunicorn -b :5000 -w 1 -t 120 jtyd_spider_run:app >> service.log 2>&1 &
ps x | grep jtyd_spider_run | grep -v grep | cut -c 1-5 | xargs kill -9
nohup python -m celery worker -l INFO -c 5 -A apps.celery_init.celery -B &
ps x | grep celery | grep -v grep | cut -c 1-5 | xargs kill -9
# 创建虚拟环境
virtualenv jtyd_spider_env
source jtyd_spider_env/bin/activate
|
987,359 | f8fb703303876e8d2487102ae1a157ff29318c24 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import sys
# NB: This file exists for convenience in development by catching the ModuleNotFoundError condition and
# appending to the system-path. This file mimics the automatically generated CLI wrapper that the
# package-build process creates, however it is not the actual file installed by the package.
try:
from elasticsearch_kibana_cli.cli import entrypoints
except ModuleNotFoundError as e:
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from elasticsearch_kibana_cli.cli import entrypoints
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(entrypoints.eskbcli()) |
987,360 | e3cb5838d70458198fd70b41ee1483c55e50f5ad | """ Setup script for package """
from setuptools import setup, find_packages
with open("requirements.txt") as reqs:
requirements = reqs.read().splitlines()
setup(
name="fleetmanagement",
version="0.1",
description='Predictive model for Fleet Management problem',
author='Pau Vilar',
author_email='pau.vilar.ribo@gmail.com',
packages=find_packages(),
python_requires=">=3.5",
install_requires=requirements
)
|
987,361 | 35a0c16f338ff62bd52037dea4f80eca1a71c20f | """
Tests for register()
"""
from auth import register, MAX_USERNAME, MIN_PASSWORD, MAX_NAME
from data import data
from error import InputError
from helpers import expect_error
def test_long_username():
"""
Test a username that is too long.
"""
expect_error(register, InputError,
"a" * (MAX_USERNAME + 1), "abcdef", "a", "a", "a")
def test_long_password():
"""
Test a password that is too short.
"""
expect_error(register, InputError,
"abcdef", "a" * (MIN_PASSWORD - 1), "a", "A", "a")
def test_empty_username():
"""
Test an empty username.
"""
expect_error(register, InputError, "", "abcdef", "A", "A", "A")
def test_long_name():
"""
Test names that are too long.
"""
expect_error(register, InputError,
"a", "abcdef", "a" * (MAX_NAME + 1), "a", "a")
expect_error(register, InputError,
"a", "abcdef", "a", "a" * (MAX_NAME + 1), "a")
def test_empty_name():
"""
Test empty names.
"""
expect_error(register, InputError, "a", "abcefw", "", "a", "a")
expect_error(register, InputError, "a", "abcefw", "a", "", "a")
def test_empty_email():
"""
Test an empty email.
"""
expect_error(register, InputError, "a", "abdkjjd", "a", "A", "")
def test_nonalpha_name():
"""
Test non-alphabetical names.
"""
expect_error(register, InputError, "a", "abcdef", "a1b2", "a", "a")
expect_error(register, InputError, "a", "abcdef", "a", "a1b2", "a")
def test_username_not_unique(bot):
"""
Test a non unique username.
"""
expect_error(register, InputError, bot.username, "abcdef", "a", "a", "a")
def test_email_not_unique(bot):
"""
Test a non unique email.
"""
expect_error(register, InputError, "a", "abcdef", "a", "a", bot.email)
def test_success_register():
"""
Test a successful registration.
"""
assert not register("abc123", "qwerty123456", "Bob", "John", "abc@def.com")
# Check that user data was updated and that the user is logged in
new_user = data.users.get("abc123")
assert new_user
assert new_user.logged_in == True
|
987,362 | 3b61138bf75faa65a905fcfd1760ad4eedff356d | import datetime
class Scroller(object):
def __init__(self, min_value, max_value, speed=None):
self.min_value = min_value
self.max_value = max_value
self._value = min_value
self._scroll_speed = (max_value - min_value) / 10 if speed is None else speed
self._scroll_start = None
self._scroll_direction = 0
def _value_at(self, t):
if self._scroll_direction == 0:
return self._value
dv = (t - self._scroll_start).total_seconds() * self._scroll_speed * self._scroll_direction
return min(max(self._value + dv, self.min_value), self.max_value)
def _finish_scroll(self, t):
if self._scroll_direction != 0:
self._value = self._value_at(t)
def scroll_down(self, scroll=True):
self.scroll(-1, scroll)
def scroll_up(self, scroll=True):
self.scroll(1, scroll)
def scroll(self, direction, scroll=True):
if scroll:
now = datetime.datetime.utcnow()
self._finish_scroll(now)
self._scroll_start = now
self._scroll_direction = direction
else:
self.scroll_stop()
def scroll_stop(self):
now = datetime.datetime.utcnow()
self._finish_scroll(now)
self._scroll_start = None
self._scroll_direction = 0
def get_value(self):
if self._scroll_direction == 0:
return self._value
return self._value_at(datetime.datetime.utcnow())
def set_value(self, value):
self._value = value
self._scroll_start = datetime.datetime.utcnow()
value = property(get_value, set_value)
def get_speed(self):
return self._scroll_speed
def set_speed(self, speed):
now = datetime.datetime.utcnow()
self._value = self._value_at(now)
self._scroll_start = now
self._scroll_speed = speed
speed = property(get_speed, set_speed)
|
987,363 | bde3ca1cd001e6ede7f5cf4f63e7cfdc99c92ad2 |
def parse_roman_numeral(num):
o={"I":1,"V":5,"X":10,"L":50,"C":100,"D":500,"M":1000}
u=[]
for i in range(len(num)):
u.append(o[num[i]])
u.append(0)
final=[]
for i in range(len(u)-1):
if u[i]>=u[i+1] or u[i]==0:
final.append(u[i])
else:
final.append(u[i+1]-u[i])
u[i+1]=0
return sum(final)
|
987,364 | a499b1f7f8dbd452391683b5ba2c4b37290340c1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
After eating chocolate bars your whole life, you’ve decided to go on a quest to
find the greatest chocolate bar in the world.
You’ve found a website that has over 1700 reviews of chocolate bars from all
around the world. It’s displayed in the web browser on this page.
The data is displayed in a table, instead of in a csv or json. Thankfully, we
have the power of BeautifulSoup that will help us transform this webpage into
a DataFrame that we can manipulate and analyze.
The rating scale is from 1-5, as described in this review guide. A 1 is
“unpleasant” chocolate, while a 5 is a bar that transcends “beyond the ordinary
limits”.
Some questions we thought about when we found this dataset were: Where are the
best cocoa beans grown? Which countries produce the highest-rated bars? What’s
the relationship between cocoa solids percentage and rating?
Can we find a way to answer these questions, or uncover more questions, using
BeautifulSoup and Pandas?
"""
from bs4 import BeautifulSoup
import requests
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
'''
Let’s make a request to this site to get the raw HTML, which we can later turn
into a BeautifulSoup object.
The URL is:
https://s3.amazonaws.com/codecademy-content/courses/beautifulsoup/cacao/index.html
'''
webpage = requests.get('https://s3.amazonaws.com/codecademy-content/courses/beautifulsoup/cacao/index.html')
'''
Create a BeautifulSoup object called soup to traverse this HTML.
Use "html.parser" as the parser, and the content of the response you got from
your request as the document.
'''
soup = BeautifulSoup(webpage.content, 'html.parser')
'''
How many terrible chocolate bars are out there? And how many earned a perfect 5?
Let’s make a histogram of this data.
The first thing to do is to put all of the ratings into a list.
Use a command on the soup object to get all of the tags that contain the ratings.
'''
ratings_tags = soup.find_all(attrs={'class':'Rating'})
'''
Create an empty list called ratings to store all the ratings in.
'''
ratings = []
'''
Loop through the ratings tags and get the text contained in each one. Add it to
the ratings list.
As you do this, convert the rating to a float, so that the ratings list will be
numerical. This should help with calculations later.
'''
#the first line of ratings_tags contains the header. That's why we skipped this position
for tag in ratings_tags[1:]:
tag_content = tag.get_text()
tag_content = float(tag_content)
ratings.append(tag_content)
'''
Using Matplotlib, create a histogram of the ratings values
'''
plt.hist(ratings)
plt.title('Distribution of Chocolate Ratings')
plt.show()
plt.clf()
'''
We want to now find the 10 most highly rated chocolatiers. One way to do this
is to make a DataFrame that has the chocolate companies in one column, and the
ratings in another. Then, we can do a groupby to find the ones with the highest
average rating.
'''
company_tags = soup.select('.Company')
company_names = []
# We have to ignore header here as well
for tag in company_tags[1:]:
company_names.append(tag.get_text())
'''
Create a DataFrame with a column “Company” corresponding to your companies
list, and a column “Ratings” corresponding to your ratings list.
'''
dictionary = {'Company':company_names, 'Ratings':ratings}
df_1 = pd.DataFrame.from_dict(dictionary)
# getting the ten best company ratings
ten_best = df_1.groupby('Company').Ratings.mean().nlargest(10)
print(ten_best)
'''
We want to see if the chocolate experts tend to rate chocolate bars with higher
levels of cacao to be better than those with lower levels of cacao.
It looks like the cocoa percentages are in the table under the Cocoa Percent
column.
Using the same methods you used in the last couple of tasks, create a list that
contains all of the cocoa percentages. Store each percent as a float, after
stripping off the % character.
'''
cocoa_percent_tags = soup.select('.CocoaPercent')
cocoa_percent = []
for tag in cocoa_percent_tags[1:]:
percent = float(tag.get_text().strip('%'))
cocoa_percent.append(percent)
# Updating the dataframe
df_1['CocoaPercentage'] = np.array(cocoa_percent)
'''
Make a scatterplot of ratings (your_df.Rating) vs percentage of cocoa
(your_df.CocoaPercentage).Is there any correlation here? We can use some numpy
commands to draw a line of best-fit over the scatterplot.
'''
plt.scatter(df_1.CocoaPercentage, df_1.Ratings)
plt.title('Ratings vs Percentage of Cocoa')
plt.xlabel('Percentage')
plt.ylabel('Rating')
z = np.polyfit(df_1.CocoaPercentage, df_1.Ratings, 1)
line_function = np.poly1d(z)
plt.plot(df_1.CocoaPercentage, line_function(df_1.CocoaPercentage), "r--")
plt.show()
|
987,365 | 5bbce88095f718102431b165ef922d567cd48950 | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from dnc_msgs/Cmd_WF.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class Cmd_WF(genpy.Message):
_md5sum = "d06405f78ad4d1752ebdd64f2ef906dc"
_type = "dnc_msgs/Cmd_WF"
_has_header = False #flag to mark the presence of a Header object
_full_text = """#
# control cmd for weifang qingsaoche
#
#########################
bool enable_steering
bool enable_braking
bool enable_driving
bool enable_Estop
bool enable_gear
#########################
float32 sw_deg # steering wheel angle
float32 sw_rad
float32 speed_ms
float32 speed_kms
float32 ax_ms2
float32 omega_rad
int32 gear_mode # 0: N, // 1: forward , // 2 backward, // 3: stop
#########################
float32 steering
float32 speed"""
__slots__ = ['enable_steering','enable_braking','enable_driving','enable_Estop','enable_gear','sw_deg','sw_rad','speed_ms','speed_kms','ax_ms2','omega_rad','gear_mode','steering','speed']
_slot_types = ['bool','bool','bool','bool','bool','float32','float32','float32','float32','float32','float32','int32','float32','float32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
enable_steering,enable_braking,enable_driving,enable_Estop,enable_gear,sw_deg,sw_rad,speed_ms,speed_kms,ax_ms2,omega_rad,gear_mode,steering,speed
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Cmd_WF, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.enable_steering is None:
self.enable_steering = False
if self.enable_braking is None:
self.enable_braking = False
if self.enable_driving is None:
self.enable_driving = False
if self.enable_Estop is None:
self.enable_Estop = False
if self.enable_gear is None:
self.enable_gear = False
if self.sw_deg is None:
self.sw_deg = 0.
if self.sw_rad is None:
self.sw_rad = 0.
if self.speed_ms is None:
self.speed_ms = 0.
if self.speed_kms is None:
self.speed_kms = 0.
if self.ax_ms2 is None:
self.ax_ms2 = 0.
if self.omega_rad is None:
self.omega_rad = 0.
if self.gear_mode is None:
self.gear_mode = 0
if self.steering is None:
self.steering = 0.
if self.speed is None:
self.speed = 0.
else:
self.enable_steering = False
self.enable_braking = False
self.enable_driving = False
self.enable_Estop = False
self.enable_gear = False
self.sw_deg = 0.
self.sw_rad = 0.
self.speed_ms = 0.
self.speed_kms = 0.
self.ax_ms2 = 0.
self.omega_rad = 0.
self.gear_mode = 0
self.steering = 0.
self.speed = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_5B6fi2f().pack(_x.enable_steering, _x.enable_braking, _x.enable_driving, _x.enable_Estop, _x.enable_gear, _x.sw_deg, _x.sw_rad, _x.speed_ms, _x.speed_kms, _x.ax_ms2, _x.omega_rad, _x.gear_mode, _x.steering, _x.speed))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 41
(_x.enable_steering, _x.enable_braking, _x.enable_driving, _x.enable_Estop, _x.enable_gear, _x.sw_deg, _x.sw_rad, _x.speed_ms, _x.speed_kms, _x.ax_ms2, _x.omega_rad, _x.gear_mode, _x.steering, _x.speed,) = _get_struct_5B6fi2f().unpack(str[start:end])
self.enable_steering = bool(self.enable_steering)
self.enable_braking = bool(self.enable_braking)
self.enable_driving = bool(self.enable_driving)
self.enable_Estop = bool(self.enable_Estop)
self.enable_gear = bool(self.enable_gear)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_5B6fi2f().pack(_x.enable_steering, _x.enable_braking, _x.enable_driving, _x.enable_Estop, _x.enable_gear, _x.sw_deg, _x.sw_rad, _x.speed_ms, _x.speed_kms, _x.ax_ms2, _x.omega_rad, _x.gear_mode, _x.steering, _x.speed))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 41
(_x.enable_steering, _x.enable_braking, _x.enable_driving, _x.enable_Estop, _x.enable_gear, _x.sw_deg, _x.sw_rad, _x.speed_ms, _x.speed_kms, _x.ax_ms2, _x.omega_rad, _x.gear_mode, _x.steering, _x.speed,) = _get_struct_5B6fi2f().unpack(str[start:end])
self.enable_steering = bool(self.enable_steering)
self.enable_braking = bool(self.enable_braking)
self.enable_driving = bool(self.enable_driving)
self.enable_Estop = bool(self.enable_Estop)
self.enable_gear = bool(self.enable_gear)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_5B6fi2f = None
def _get_struct_5B6fi2f():
global _struct_5B6fi2f
if _struct_5B6fi2f is None:
_struct_5B6fi2f = struct.Struct("<5B6fi2f")
return _struct_5B6fi2f
|
987,366 | 7e4ac3088bcbb1b73e28270fd1ce5ca9ac9a229b | #encoding: utf-8
__author__ = 'Jon'
import tornado.web
import tornado.ioloop
import tornado.escape
import requests
from tornado import gen
from tornado.options import options, define
from tornado.concurrent import run_on_executor
from concurrent.futures import ThreadPoolExecutor
from bs4 import BeautifulSoup
from pprint import pprint
from tornadohttpclient import TornadoHTTPClient
define('port')
tornado.options.parse_command_line()
##################################################################
# Setting
##################################################################
POOL_COUNT = 10
URL = 'http://www.baidu.com'
##################################################################
# Service
##################################################################
class BaseService(object):
executor = ThreadPoolExecutor(max_workers=POOL_COUNT)
class SyncService(BaseService):
'''requests 登录51job
'''
def find(self):
pprint('Into requests')
s = requests.session()
pprint('Start login')
f = s.get('http://ehire.51job.com')
soup = BeautifulSoup(f.text, "html.parser")
hidAccessKey = soup.find('input', {'name': 'hidAccessKey'})['value']
fksc = soup.find('input', {'name': 'fksc'})['value']
hidEhireGuid = soup.find('input', {'name': 'hidEhireGuid'})['value']
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'http://ehire.51job.com',
'Referer': 'http://ehire.51job.com/MainLogin.aspx',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
}
data = {'ctmName': '大岂网络',
'userName': 'dqwl805',
'password': '64079603sj',
'oldAccessKey': hidAccessKey,
'langtype': 'Lang=&Flag=1',
'sc': fksc,
'ec': hidEhireGuid,
'isRememberMe': 'True'
}
res = s.post('https://ehirelogin.51job.com/Member/UserLogin.aspx', data=data, headers=headers)
pprint('End login')
pprint('Start force')
try:
soup = BeautifulSoup(res.text, "html.parser")
viewState = soup.find('input', {'name': '__VIEWSTATE'})['value']
partURL = soup.find('form', {'id': 'form1'})['action']
URL = 'http://ehire.51job.com/Member/' + partURL
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'http://ehire.51job.com',
'Referer': URL,
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
}
data = {'__EVENTTARGET': 'gvOnLineUser',
'__EVENTARGUMENT': 'KickOut$0',
'__VIEWSTATE': viewState
}
res = s.post(URL, data=data, headers=headers)
except: pass # 有时不需要下线
pprint('End force')
pprint('Start fetch remain')
resp = s.get('http://ehire.51job.com/CommonPage/JobsPostNumbList.aspx')
pprint('End fetch remain')
bs = BeautifulSoup(resp.text, "html.parser")
print(bs.find('b', {'class': 'info_att'}).text)
class AsyncService(BaseService):
'''使用tornadohttpclient 登录51job
特点:
* 携带cookie
* requests风格
'''
@gen.coroutine
def find(self):
pprint('Into tornadohttpclient')
s = TornadoHTTPClient(force_instance = True)
pprint('Start login')
f = yield s.get('http://ehire.51job.com')
soup = BeautifulSoup(f.body, "html.parser")
hidAccessKey = soup.find('input', {'name': 'hidAccessKey'})['value']
fksc = soup.find('input', {'name': 'fksc'})['value']
hidEhireGuid = soup.find('input', {'name': 'hidEhireGuid'})['value']
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'http://ehire.51job.com',
'Referer': 'http://ehire.51job.com/MainLogin.aspx',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
}
data = {'ctmName': '大岂网络',
'userName': 'dqwl805',
'password': '64079603sj',
'oldAccessKey': hidAccessKey,
'langtype': 'Lang=&Flag=1',
'sc': fksc,
'ec': hidEhireGuid,
'isRememberMe': 'True'
}
res = yield s.post('https://ehirelogin.51job.com/Member/UserLogin.aspx', data=data, headers=headers)
pprint('End login')
pprint('Start force')
try:
soup = BeautifulSoup(res.body, "html.parser")
viewState = soup.find('input', {'name': '__VIEWSTATE'})['value']
partURL = soup.find('form', {'id': 'form1'})['action']
URL = 'http://ehire.51job.com/Member/' + partURL
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'http://ehire.51job.com',
'Referer': URL,
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
}
data = {'__EVENTTARGET': 'gvOnLineUser',
'__EVENTARGUMENT': 'KickOut$0',
'__VIEWSTATE': viewState
}
res = yield s.post(URL, data=data, headers=headers)
except: pass # 有时不需要下线
pprint('End force')
pprint('Start fetch remain')
resp = yield s.get('http://ehire.51job.com/CommonPage/JobsPostNumbList.aspx')
pprint('End fetch remain')
bs = BeautifulSoup(resp.body, "html.parser")
pprint(bs.find('b', {'class': 'info_att'}).text)
pprint('###Cookie###')
pprint(s.cookie)
with open('cookie.txt', 'w') as f:
f.write(s.cookie)
@gen.coroutine
def find_next(self):
pprint('Start recover cookie')
s = TornadoHTTPClient(force_instance = True)
with open('cookie.txt') as f:
cookie_str = f.read()
s.set_global_headers({ 'Cookie': cookie_str })
resp = yield s.get('http://ehire.51job.com/CommonPage/JobsPostNumbList.aspx')
pprint('End recover cookie')
bs = BeautifulSoup(resp.body, "html.parser")
pprint(bs.find('b', {'class': 'info_att'}).text)
class SuperSyncService(BaseService):
@run_on_executor
def find(self):
r = requests.get(URL)
return r.status_code
##################################################################
# Handler
##################################################################
class BaseHandler(tornado.web.RequestHandler):
def __init__(self, application, request, **kwargs):
super(BaseHandler, self).__init__(application, request, **kwargs)
self.sync_service = SyncService()
self.async_service = AsyncService()
self.super_sync_service = SuperSyncService()
def write_json(self, data):
self.write(tornado.escape.json_encode(data))
class SyncHandler(BaseHandler):
def prepare(self):
print('into sync')
def get(self):
result = self.sync_service.find()
self.write_json({'data': result})
class AsyncHandler(BaseHandler):
def prepare(self):
print('into async')
@gen.coroutine
def get(self, n):
if n:
result = yield self.async_service.find_next()
else:
result = yield self.async_service.find()
self.write_json({'data': result})
class SuperSyncHandler(BaseHandler):
def prepare(self):
print('into super sync')
@gen.coroutine
def get(self):
result = yield self.super_sync_service.find()
self.write_json({'data': result})
class FastHandler(BaseHandler):
def prepare(self):
print('into fast')
def get(self):
self.write('fast')
##################################################################
# Application
##################################################################
class Application(tornado.web.Application):
def __init__(self, handlers, **settings):
tornado.web.Application.__init__(self, handlers, **settings)
if __name__ == '__main__':
app = Application([
(r'/sync', SyncHandler), # 同步IO
(r'/async/?(\d*)', AsyncHandler), # 异步IO
(r'/supersync', SuperSyncHandler), # 多线程
(r'/fast', FastHandler)
], debug=True)
port = options.port or 8060
app.listen(port)
print('listen %s...' % port)
tornado.ioloop.IOLoop.instance().start() |
987,367 | 6561fd61fcf043196f076d4478de19d0f6c85a95 | from tkinter import *
from tkinter import ttk
from ttkthemes import themed_tk as tk
import mysql.connector
import tkinter.messagebox
rootl = tk.ThemedTk()
rootl.get_themes()
rootl.set_theme("radiance")
rootl.title("Module Registration System")
rootl.iconbitmap(r'images/upes.ico')
mailL = StringVar()
pwordL = StringVar()
def clear_entry(event, entry):
entry.delete(0, END)
def log():
emailL = mailL.get()
passwordL = pwordL.get()
conn = mysql.connector.connect(host='localhost',user='root', password='',db='erp')
def valid_mail(emailL):
y = re.findall('@([^ ]*)',emailL)
domain = y[0]
if domain == "ddn.upes.ac.in":
return True
else:
return False
cur = conn.cursor()
cur.execute(
'CREATE TABLE IF NOT EXISTS Faculty_Register(FirstName VARCHAR(50),LastName VARCHAR(50),Email VARCHAR(100), Password VARCHAR(50))')
if emailL != '' and valid_mail(emailL):
if passwordL != '':
try:
cur.execute(
'SELECT Password FROM Faculty_Register WHERE Email = %s',
(emailL,))
pas = cur.fetchone()
if pas[0] == passwordL:
cur.execute('SELECT FirstName FROM Faculty_Register WHERE Email = %s',
(emailL,))
user = cur.fetchone()
tkinter.messagebox.showinfo('Status',
user[0]+', you are successfully logged in')
else:
tkinter.messagebox.showinfo('Status',
'Incorrect Password, Please try again...')
except TypeError:
tkinter.messagebox.showinfo('Status',
'You are not registered\nPlease register yourself first...')
else:
tkinter.messagebox.showerror('Error', 'Please fill the password carefully\nMinnimum Length of 8 charachters!!!')
else:
tkinter.messagebox.showerror('Error', 'Please enter a valid email!!!\nDomain should be ddn.upes.ac.in')
AddBtn = ttk.Button(rootl, text = "LOGIN", width = 20, command = log)
AddBtn.pack(side = BOTTOM, pady = 10)
bottomframe = Frame(rootl)
bottomframe.pack(side = BOTTOM)
luser= PhotoImage(file="images/loguser.png")
labelphoto = Label(rootl,image = luser)
labelphoto.pack(pady = 20, padx = 150)
l1 = Label(bottomframe, text="Email ID", font="fixedsys 10 normal")
l1.grid(row = 0, column = 0,padx = 20)
e1 = ttk.Entry(bottomframe, textvar=mailL, width=30)
placeholder_text = 'EMAIL ID'
e1.insert(0, placeholder_text)
e1.bind("<Button-1>", lambda event: clear_entry(event, e1))
e1.grid(row = 0, column = 1,padx = 20)
bullet = "\u2022" #specifies bullet character
l2 = Label(bottomframe, text="Password", font="fixedsys 10 normal")
l2.grid(row = 1, column = 0,pady = 10,padx = 20)
e2 = ttk.Entry(bottomframe, show=bullet, textvar=pwordL, width=30)
placeholder_text = '12345678'
e2.insert(0, placeholder_text)
e2.bind("<Button-2>", lambda event: clear_entry(event, e2))
e2.grid(row = 1, column = 1,pady = 10,padx = 20)
rootl.mainloop() |
987,368 | b065636756707b7f6af90ce47ad9fc781d416ffe | '''
roommanger to optimize user room mapping
'''
from models.roommap import Roommap
import datetime
class RoomManager():
def __init__(self, database, sensorManager):
print("init Roommanger")
self.DB = database
self.sensorManager = sensorManager
def checkRoomManagerEntrys(self):
"""
returns all Room User Mapping entry in the next 30 days
Returns
-------
mongodb.cursors
containing all room user mapping entrys in the next 30 days
"""
roommaps = self.DB.getRoomMapsByDatum(datetime.datetime.now().strftime("%Y-%m-%d"),
(datetime.datetime.now() + datetime.timedelta(days=30)).strftime("%Y-%m-%d"))
return roommaps
def optimizeRoomMaps(self):
"""
starts the room user mapping optimization.
Directly updates the Database and sets the Room leds
"""
relevantDates = [(datetime.datetime.now(
) + datetime.timedelta(days=i)).strftime("%Y-%m-%d") for i in range(31)]
for date in relevantDates:
relevantUsers = [
user for user in self.DB.getAllUsers() if date not in user['workplan']]
for room in self.DB.getAllRooms():
roomUsers = []
currentUserCount = 0
while currentUserCount < room['maxStaff']:
if len(relevantUsers) == 0:
break
roomUsers.append(relevantUsers.pop(0)['key'])
currentUserCount += 1
if len(roomUsers) == 0:
state = False
else:
state = True
self.DB.updateRoomMap(
Roommap(datum=date, room=room['key'], users=roomUsers, active=state).getDict())
if date == (datetime.datetime.now()+datetime.timedelta(days=self.DB.dayoffset)).strftime('%Y-%m-%d'):
self.sensorManager.publisher.publish(
'actuator/stateled', {'room': room['key'], 'state': int(state)})
|
987,369 | 01af81388a50bb703183d0110dccfd1a67734575 | if __name__ == '__main__':
number = int(input("Input = "))
stars = 1
spaces = (2 * number) - 2
for _ in range(number): # top half
for _ in range(stars):
print('*', end="")
for _ in range(spaces):
print(' ', end="")
for _ in range(stars):
print('*', end="")
stars += 1
spaces -= 2
print()
stars -= 1
spaces += 2
for _ in range(number-1): # bottom half
stars -= 1
spaces += 2
for _ in range(stars):
print('*', end="")
for _ in range(spaces):
print(' ', end="")
for _ in range(stars):
print('*', end="")
print() |
987,370 | fd91e978aedfd296e55b298e403d355ddbd5673b | import torch.nn as nn
import MyResNet
import torch
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.cnn = MyResNet.resnet50(pretrained=True, num_classes=100000)
def forward(self, image):
x = self.cnn(image)
return x
class Model_Test(nn.Module):
def __init__(self):
super(Model_Test, self).__init__()
self.cnn = MyResNet.resnet50(pretrained=True, num_classes=100000)
def forward(self, image):
x = self.cnn(image)
return x |
987,371 | 9744874ef5c63383bfca50ab7e19e88a01452ea2 | #manipulando string
texto = 'Técnico em Desenvolvimento de Sistemas'
print(texto)
#EXEMPLO DE FATIAMENTO DO CONTEÚDO
print(texto[3])
print(texto[0:15])
#STRING
print(len(texto)) #conta a quantidade de caractere
print(texto.upper()) #escreve tudo em maiúsculo
print(texto.lower()) #escreve tudo em minúsculo
print(texto.split()) #escreve em espaço com índice
print(texto.capitalize()) #escreve a primeira letra maiúscula
print(texto.replace('Sistemas','Internet')) #TROCA AS PALAVRAS
|
987,372 | ac63ef9f9156df5d90f8b2c4088035ee7706313d | import json
from collections import deque
from importlib import import_module
from CLE.config import ANCHOR_STATE_IDLE, ANCHOR_STATE_CONFIGURED, ANCHOR_STATE_WORKING
from CLE.ds.coap_client import set_config, set_state, popqueue
# from CLE.ds.sql import objects as db_objects, AnchorModel, DefaultConfigModel, AnchorReportModel, JobModel
from CLE.ds.decawave import AnchorConfigurationFrame, AnchorStateFrame, CPPTXFrame, CPPRXFrame, BlinkFrame
from CLE.ds.rabbit import publish as rabbit_publish
from CLE.ds.solver import solve
# from CLE.ds.influxdb import publish as influx_publish
class Job:
"""Задача, имеет рабочую нагрузку в виде метода work и задержку
latency - число системных тиков общего плана до выполнения задачи
"""
plan = None
def __init__(self, latency=1, parameters=None):
self.latency = latency
self.parameters = parameters
self._counter = latency
@classmethod
def set_plan(cls, plan):
cls.plan = plan
def its_time(self):
self._counter -= 1
return bool(self._counter == 0)
def need_continue(self):
return bool(self._counter)
def work(self):
raise NotImplementedError
class RegularJob(Job):
"""
Регулярная задача, в отличие от обычной обновляет
свой счетчик тиков на latency после выполнения рабочей нагрузки
"""
def its_time(self):
if super().its_time():
self._counter = self.latency
return True
class InitAnchors(Job):
"""Собираем анкоры из базы"""
async def work(self):
anchor_list = await db_objects.execute(AnchorModel.select())
for anchor in anchor_list:
if anchor.state == ANCHOR_STATE_IDLE:
self.plan.add_job(ConfigureAnchorJob(anchor))
if anchor.state == ANCHOR_STATE_CONFIGURED:
self.plan.add_job(SetStateAnchorJob(anchor, ANCHOR_STATE_WORKING))
if anchor.state == ANCHOR_STATE_WORKING:
self.plan.add_job(PopQueueJob(anchor, 1))
class AnchorJob(Job):
"""job с передачей анкора"""
def __init__(self, anchor, *args, **kwargs):
self.anchor = anchor
super().__init__(*args, **kwargs)
class ConfigureAnchorJob(AnchorJob):
"""Конфигурация анкора по дефолтному конфигу"""
async def work(self):
default_db_config = await db_objects.execute(
DefaultConfigModel.select()
.where(DefaultConfigModel.active == 1)
.order_by(DefaultConfigModel.id.desc()))
default_db_config = default_db_config[0]
anchor_config = AnchorConfigurationFrame()
anchor_config.chan = default_db_config.chan
anchor_config.prf = default_db_config.prf
anchor_config.txPreambLength = default_db_config.txPreambLength
anchor_config.rxPAC = default_db_config.rxPAC
anchor_config.txCode = default_db_config.txCode
anchor_config.rxCode = default_db_config.rxCode
anchor_config.nsSFD = default_db_config.nsSFD
anchor_config.dataRate = default_db_config.dataRate
anchor_config.phrMode = default_db_config.phrMode
anchor_config.sfdTO = default_db_config.sfdTO
anchor_config.my_master_ID = default_db_config.my_master_ID
anchor_config.role = default_db_config.role
anchor_config.master_period = default_db_config.master_period
anchor_config.submaster_delay = default_db_config.submaster_delay
response = await set_config(self.anchor.ip_address, anchor_config.to_binary())
print('Set config result : %s\n%r\n' % (response.code, response.payload))
self.anchor.config = anchor_config.to_json()
await db_objects.update(self.anchor)
self.plan.add_job(SetStateAnchorJob(self.anchor, ANCHOR_STATE_CONFIGURED))
class SetStateAnchorJob(AnchorJob):
"""Установка анкора в рабочий режим"""
def __init__(self, anchor, state, *args, **kwargs):
self.state = state
super().__init__(anchor, *args, **kwargs)
async def work(self):
response = await set_state(self.anchor.ip_address,
AnchorStateFrame(bytes([self.state])).to_binary())
print('Set state result : %s\n%r\n' % (response.code, response.payload))
self.anchor.state = self.state
await db_objects.update(self.anchor)
if self.state == ANCHOR_STATE_CONFIGURED:
self.plan.add_job(SetStateAnchorJob(self.anchor, ANCHOR_STATE_WORKING))
if self.state == ANCHOR_STATE_WORKING:
self.plan.add_job(PopQueueJob(self.anchor, 5))
class PopQueueJob(RegularJob, AnchorJob):
"""Регулярный опрос анкора"""
async def work(self):
response = await popqueue(self.anchor.ip_address)
print('Popqueue result: %s\n%r' % (response.code, response.payload))
if response.payload[0] == 48: # 0x30
frame = CPPTXFrame(response.payload)
if response.payload[0] == 49: # 0x31
frame = CPPRXFrame(response.payload)
if response.payload[0] == 50: # 0x32
frame = BlinkFrame(response.payload)
report = await db_objects.create(AnchorReportModel,
anchor=self.anchor, record=frame.to_json())
# include solver
solved_data = solve(frame)
data = json.loads(frame.to_json())
data["anchor_id"] = self.anchor.id
data["solver"] = solved_data
# publish to rabbitmq
rabbit_publish(json.dumps(data))
# publish to influxdb
influx_publish(data)
# "{\"parameter\": \"value\"}"
class EmptyJob(Job):
async def work(self):
print("Empty work processed!")
print(self.parameters)
class EmptyAnchorJob(AnchorJob):
async def work(self):
print("Empty anchor work processed!")
print(self.anchor.ip_address)
print(self.parameters)
class RemoteJob(RegularJob):
"""Смотрим не появились ли новые задачи из внешних источников"""
async def work(self):
job_list = await db_objects.execute(JobModel.select())
for job in job_list:
job_class = job.name
try:
module_path, class_name = "execplan", job_class
module = import_module(module_path)
job_class = getattr(module, class_name)
except (ImportError, AttributeError) as e:
raise ImportError(job_class)
parameters = {}
if job.parameters:
parameters = json.loads(job.parameters)
if job.anchor_ids:
for anchor_id in job.anchor_ids.split(","):
anchor = await db_objects.get(AnchorModel, id=anchor_id)
self.plan.add_job(job_class(anchor=anchor, parameters=parameters))
else:
self.plan.add_job(job_class(parameters=parameters))
await db_objects.delete(job)
class ExecutionPlan:
"""
Очередь задач (джобов) с системной частой пробегает по всем джобам
тех кому подошла очередь выполняться выполняет
"""
queue = deque()
def __init__(self):
Job.set_plan(self)
def add_job(self, job):
self.queue.append(job)
async def process(self):
if self.queue:
count = len(self.queue)
for i in range(count):
await self._process_job()
async def _process_job(self):
job = self.queue.popleft()
if job.its_time():
await job.work()
if job.need_continue():
self.queue.append(job)
execution_plan = ExecutionPlan()
# execution_plan.add_job(InitAnchors())
# execution_plan.add_job(RemoteJob(3))
|
987,373 | d65735afce594deb4fa7f08b4b338269c6cff59a | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from pandas import DataFrame,Series
s1 = Series(np.random.randn(1000))
sns.distplot(s1,hist=True,kde=True,rug=True) #rug下方的线代表密度
sns.kdeplot(s1,shade=True) #shade 填充
|
987,374 | f3a6553a0567d2696c3e278c74d1b3e216f02738 | from appium.webdriver.common.mobileby import MobileBy
from page.base_page import BasePage
from page.edit_member_page import EditMemberPage
class AddManuallyPage(BasePage):
# def __init__(self, driver):
# self.driver = driver
def click_add_manually(self):
# self.driver.find_element(MobileBy.XPATH, '//*[@text="手动输入添加"]').click()
self.find_and_click((MobileBy.XPATH, '//*[@text="手动输入添加"]'))
return EditMemberPage(self.driver)
def get_toast(self):
# result = self.driver.find_element(MobileBy.XPATH, '//*[@class="android.widget.Toast"]').text
result = self.find((MobileBy.XPATH, '//*[@class="android.widget.Toast"]')).text
return result
|
987,375 | 160bfce80d1a32a16451f229abe5e9ca2509eac0 | import torch
import torch.nn as nn
class LSTM(nn.Module):
def __init__(self,
vocab_size,
embedding_dim,
embedding_weight,
hidden_size,
num_layers,
output_size):
super(LSTM, self).__init__()
self.emb = nn.Embedding(num_embeddings=vocab_size,
embedding_dim=embedding_dim)
if embedding_weight != None:
self.emb.weight.data.copy_(embedding_weight)
self.lstm = nn.LSTM(input_size=embedding_dim,
hidden_size=hidden_size,
num_layers=num_layers,
batch_first=True)
self.fc1 = nn.Linear(hidden_size, output_size)
self.dropout = nn.Dropout(0.5)
def forward(self, x):
x = self.emb(x)
out, (h_n, c_n) = self.lstm(x)
x = h_n[-1, :, :].view(x.size(0), -1)
x = self.dropout(x)
return self.fc1(x)
|
987,376 | 4e6d9cb8e0c02cc2ead63e52a5e4cc08b3a8c057 | d = {100: "theja", 200: "ravi", 300: "shiva", 400: "ram", 500: "sri"}
print(d)
del d
print(d)
# {100: 'theja', 200: 'ravi', 300: 'shiva', 400: 'ram', 500: 'sri'}
# Traceback (most recent call last):
# File "/Code/venv/dict/11DeleteDict.py", line 4, in <module>
# print(d)
# NameError: name 'd' is not defined
|
987,377 | ac002386ab8ab92b9009c8fd715d8b172ba00cc8 | # Given 2 singly linked lists, return an intersecting node if they are intersecting (by reference)
# Example:
# Input: 5 -> 4 -> 12 -> 7 and 9 -> 12 -> 7
# Output: 12 (this node exists in both lists)
# Idea: If there is an intersection node, then it will have the same value and same next.
# That means all nodes after the intersecting node will be the same.
# All we have to do is traverse the nodes and check if a node matches up.
# First we have to "Align" the lists, meaning the tail of the first list is lined up with the tail of the second.
# We can align easily by first finding the length of each list.
from singlylinkedlist import Node
from singlylinkedlist import SinglyLinkedList as LL
def getIntersectingNode(ll1, ll2):
ll1_length = ll1.getLength()
ll2_length = ll2.getLength()
if ll1_length < ll2_length:
curr1 = ll1.head
curr2 = ll2.getNodeByIndex(ll2_length - ll1_length)
elif ll2_length < ll1_length:
curr1 = ll1.getNodeByIndex(ll1_length - ll2_length)
curr2 = ll2.head
else:
curr1 = ll1.head
curr2 = ll2.head
while curr1 and curr2:
if curr1 == curr2:
return curr1
curr1 = curr1.next
curr2 = curr2.next
return None
n1 = Node(5)
n2 = Node(4)
n3 = Node(9)
n4 = Node(12)
n5 = Node(7)
l1 = LL()
l1.head = n1
l1.insertMultipleNodes([n2, n4, n5])
l2 = LL()
l2.head = n3
l2.insertNode(n4)
assert n4 == getIntersectingNode(l1, l2)
|
987,378 | e7b4531753199cb09c5cb6e0963cc9c6e1e379bc | from collections import OrderedDict
import mock
from paperspace.commands.common import CommandBase
output_response = ""
class TestBaseClass(object):
def test_json_print(self):
global output_response
output_response = ""
def log_to_var(message):
global output_response
output_response = "{}{}\n".format(output_response, message)
logger_ = mock.MagicMock()
logger_.log = log_to_var
input_dict = {
"foo": {
'bar': {
"baz": "faz"
}
}
}
expected_string = """foo:
bar:
baz:
faz
"""
command = CommandBase(logger_=logger_)
command._print_dict_recursive(OrderedDict(input_dict))
assert output_response == expected_string
|
987,379 | 479de1d1a68273f72765c5dcc0cc5c3fa3e2c86a | # Usage:
# Run the script from a folder with file "all.mbox"
# Attachments will be extracted into subfolder "attachments"
# with prefix "n " where "n" is an order of attachment in mbox file.
import mailbox, pickle, traceback, os
from email.header import decode_header
mb = mailbox.mbox('all.mbox')
prefs_path = '.save-attachments'
save_to = 'attachments/'
months = {}
months["Jan"] = "01"
months["Feb"] = "02"
months["Mar"] = "03"
months["Apr"] = "04"
months["May"] = "05"
months["Jun"] = "06"
months["Jul"] = "07"
months["Aug"] = "08"
months["Sep"] = "09"
months["Oct"] = "10"
months["Nov"] = "11"
months["Dec"] = "12"
if not os.path.exists(save_to): os.makedirs(save_to)
prefs = dict(start=0)
total = 0
failed = 0
def save_attachments(mid):
msg = mb.get_message(mid)
if msg.is_multipart():
DateReceived = msg['Date'].split()[3]+'-'+months[msg['Date'].split()[2]]
if int(msg['Date'].split()[1])<10 and len(msg['Date'].split()[1]) == 1:
DateReceived = DateReceived+'-0'+msg['Date'].split()[1]
else:
DateReceived = DateReceived+'-'+msg['Date'].split()[1]
for part in msg.get_payload():
if str(part.get_filename()) == 'None':
continue
global total
total = total + 1
print()
try:
decoded_name = decode_header(part.get_filename())
print(decoded_name)
if isinstance(decoded_name[0][0], str):
name = decoded_name[0][0]
else:
name_encoding = decoded_name[0][1]
name = decoded_name[0][0].decode(name_encoding)
name = '%s %s %s' % (DateReceived, total, name)
print('Saving %s' % (name))
with open(save_to + name, 'wb') as f:
f.write(part.get_payload(decode=True))
except:
traceback.print_exc()
global failed
failed = failed + 1
for i in range(prefs['start'], 1000000):
try:
save_attachments(i)
except KeyError:
break
prefs['start'] = i
print()
print('Total: %s' % (total))
print('Failed: %s' % (failed)) |
987,380 | f4281692b8d6348b6b42957b3bebb5178cf4a70d | '''
Football Selection
The head coach for the football team requires several players for the upcoming tournament. He asked N students to stand in increasing height order to make his selection process easier. But the students had a different plan; some of the students were not interested in playing football so they placed themselves in such a manner that the coach would not see them. Display the positions of the students who tried to escape from the coach?
INPUT:
First line includes total number of students N
Second line includes the heights of the students
OUTPUT:
Display the positions of the students who tried to escape
CONSTRAINTS:
3<= N <= 10^3
SAMPLE INPUT:
4
3 9 2 1
SAMPLE OUTPUT:
3 4
EXPLANATION:
The heights of the students are 3 9 2 1
The 3rd student has placed himself behind 2nd student, who is taller than him so he won’t be visible,
Similarly the 4th person has placed himself behind 3rd person
Hence 3 4 is printed
SAMPLE INPUT 1:
5
4 2 6 1 3
SAMPLE OUTPUT1:
2 4 5
'''
N = int(input())
students = list(map(int,input().split()))
pos = []
for i in range(1,N):
if(max(students[:i])>students[i]):pos.append(i+1)
print(*pos,sep=" ") |
987,381 | d620fee8b9a521b77a285018801eca4a25bdfae2 | from scipy.io import loadmat
from matplotlib import pyplot as plt
from sklearn.preprocessing import normalize
data = loadmat('/tmp/params.mat')
cmap = 'coolwarm'
plt.figure(2)
heatmap_U = plt.pcolor(normalize(data['U'], norm='l1', axis=1), cmap=cmap)
plt.show()
|
987,382 | ee35b05aabaa9cf8761fff0f9529d7320f4fc029 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 10 14:48:45 2019
@author: pengy10
"""
import pandas as pd
## Different sets of simulation runs
Model_list = ["ModelA", "ModelB", "ModelC", "ModelD", "ModelD_gromacs_run1", "ModelD_gromacs_run2"]
for file_name in Model_list:
## unbound state is defined if the percentage of tail residues maintaining contacts with the DNA molecule is no more than the “cut-off”
for cut_off_ratio in [0,0.1]:
tail_residence_time = { i : [] for i in ["H2A_N", "H2A_C", "H2B", "H3", "H4"] }
## read the measurements of tail-DNA contacts from simulations
H2A_contacts1 = pd.read_csv("tail_DNA_mean_contacts_"+ file_name+ "_h2a_C.dat", sep = "\t", header=0)
H2A_contacts2 = pd.read_csv("tail_DNA_mean_contacts_"+ file_name+ "_h2a_G.dat", sep = "\t", header=0)
H2B_contacts1 = pd.read_csv("tail_DNA_mean_contacts_"+ file_name+ "_h2b_D.dat", sep = "\t", header=0)
H2B_contacts2 = pd.read_csv("tail_DNA_mean_contacts_"+ file_name+ "_h2b_H.dat", sep = "\t", header=0)
H3_contacts1 = pd.read_csv("tail_DNA_mean_contacts_"+ file_name+ "_h3_A.dat", sep = "\t", header=0)
H3_contacts2 = pd.read_csv("tail_DNA_mean_contacts_"+ file_name+ "_h3_E.dat", sep = "\t", header=0)
H4_contacts1 = pd.read_csv("tail_DNA_mean_contacts_"+ file_name+ "_h4_B.dat", sep = "\t", header=0)
H4_contacts2 = pd.read_csv("tail_DNA_mean_contacts_"+ file_name+ "_h4_F.dat", sep = "\t", header=0)
## We only used the measurements from the long runs in calculations
H2A_contacts = pd.concat([H2A_contacts1.iloc[0:19000,], H2A_contacts2.iloc[0:19000,]]).reset_index().iloc[:,1:]
H2B_contacts = pd.concat([H2B_contacts1.iloc[0:19000,], H2B_contacts2.iloc[0:19000,]]).reset_index().iloc[:,1:]
H3_contacts = pd.concat([H3_contacts1.iloc[0:19000,], H3_contacts2.iloc[0:19000,]]).reset_index().iloc[:,1:]
H4_contacts = pd.concat([H4_contacts1.iloc[0:19000,], H4_contacts2.iloc[0:19000,]]).reset_index().iloc[:,1:]
## timestep for frame intervals
ts = 0.2 #ns
## H2A_N tails
count_bound_time_N = 0
## H2A_C tails
count_bound_time_C = 0
for i in range(0,len(H2A_contacts)-2):
contact0_N = filter(lambda x: x >= 1, H2A_contacts.iloc[i, 1:9])
contact1_N = filter(lambda x: x >= 1, H2A_contacts.iloc[i+1, 1:9])
ratio0_N = len(list(contact0_N))/8 ## percentage of bound tail residues of frame i
ratio1_N = len(list(contact1_N))/8 ## percentage of bound tail residues of frame i+i
## we combined two copeis of tail conformations for each runs run thus they are not continuous in time.
if ratio0_N > cut_off_ratio: ## tail stay in bound state in frame i
count_bound_time_N += 1
if i ==int(len(H2A_contacts))/2 or i ==len(H2A_contacts)-3: ## reach the max number of frames per run
tail_residence_time["H2A_N"].append(count_bound_time_N * ts)
count_bound_time_N = 0
continue
elif ratio1_N <= cut_off_ratio: ## tail unbind from DNA in frame i+1
tail_residence_time["H2A_N"].append(count_bound_time_N * ts)
elif ratio0_N <= cut_off_ratio: ## tail stay in unbound state in frame i
count_bound_time_N = 0 ## reset residence time
for i in range(0,len(H2A_contacts)-2):
contact0_C = filter(lambda x: x >= 1, H2A_contacts.iloc[i, 15:25])
contact1_C = filter(lambda x: x >= 1, H2A_contacts.iloc[i+1, 15:25])
ratio0_C = len(list(contact0_C))/9
ratio1_C = len(list(contact1_C))/9
if ratio0_C > cut_off_ratio: ## tail stay in bound state in frame i
count_bound_time_C += 1
if i ==int(len(H2A_contacts))/2 or i ==len(H2A_contacts)-3: ## reach the max number of frames per run
tail_residence_time["H2A_C"].append(count_bound_time_C * ts)
count_bound_time_C = 0
continue
elif ratio1_C <= cut_off_ratio: ## tail unbind from DNA in frame i+1
tail_residence_time["H2A_C"].append(count_bound_time_C * ts)
elif ratio0_C <= cut_off_ratio:
count_bound_time_C = 0 ## reset residence time
## H2B Tails
count_bound_time = 0
for i in range(0,len(H2A_contacts)-2):
contact0 = filter(lambda x: x >= 1, H2B_contacts.iloc[i, 1:19])
contact1 = filter(lambda x: x >= 1, H2B_contacts.iloc[i+1, 1:19])
ratio0 = len(list(contact0))/18
ratio1 = len(list(contact1))/18
if ratio0 > cut_off_ratio:
count_bound_time += 1
if i ==int(len(H2B_contacts))/2 or i ==len(H2B_contacts)-3:
tail_residence_time["H2B"].append(count_bound_time * ts)
count_bound_time = 0
continue
elif ratio1 <= cut_off_ratio:
tail_residence_time["H2B"].append(count_bound_time * ts)
elif ratio0 <= cut_off_ratio:
count_bound_time = 0
## H3 tails
count_bound_time = 0
for i in range(0,len(H2A_contacts)-2):
contact0 = filter(lambda x: x >= 1, H3_contacts.iloc[i, 1:34])
contact1 = filter(lambda x: x >= 1, H3_contacts.iloc[i+1, 1:34])
ratio0 = len(list(contact0))/33
ratio1 = len(list(contact1))/33
if ratio0 > cut_off_ratio:
count_bound_time += 1
if i ==int(len(H3_contacts))/2 or i ==len(H3_contacts)-3:
tail_residence_time["H3"].append(count_bound_time * ts)
count_bound_time = 0
continue
elif ratio1 <= cut_off_ratio:
tail_residence_time["H3"].append(count_bound_time * ts)
elif ratio0 <= cut_off_ratio:
count_bound_time = 0
## H4
count_bound_time = 0
for i in range(0,len(H2A_contacts)-2):
contact0 = filter(lambda x: x >= 1, H4_contacts.iloc[i, 1:16])
contact1 = filter(lambda x: x >= 1, H4_contacts.iloc[i+1, 1:16])
ratio0 = len(list(contact0))/15
ratio1 = len(list(contact1))/15
if ratio0 > cut_off_ratio:
count_bound_time += 1
if i ==int(len(H4_contacts))/2 or i ==len(H4_contacts)-3:
tail_residence_time["H4"].append(count_bound_time * ts)
count_bound_time = 0
continue
elif ratio1 <= cut_off_ratio:
tail_residence_time["H4"].append(count_bound_time * ts)
elif ratio0 <= cut_off_ratio:
count_bound_time = 0
df_residence_time = pd.DataFrame({"residence_time": tail_residence_time["H2A_N"],
"tail_type": ["H2A_N"]*len(tail_residence_time["H2A_N"])})
df_residence_time = df_residence_time.append(pd.DataFrame({"residence_time": tail_residence_time["H2A_C"],
"tail_type": ["H2A_C"]*len(tail_residence_time["H2A_C"])}))
df_residence_time = df_residence_time.append(pd.DataFrame({"residence_time": tail_residence_time["H2B"],
"tail_type": ["H2B"]*len(tail_residence_time["H2B"])}))
df_residence_time = df_residence_time.append(pd.DataFrame({"residence_time": tail_residence_time["H3"],
"tail_type": ["H3"]*len(tail_residence_time["H3"])}))
df_residence_time = df_residence_time.append(pd.DataFrame({"residence_time": tail_residence_time["H4"],
"tail_type": ["H4"]*len(tail_residence_time["H4"])}))
## filter the reisdence time < 10 ns, Since full histone tails undergo very rapid fluctuations before retaining stable binding with DNA during the simulations
cut_off_time = 10
df_residence_time_filter = df_residence_time.loc[df_residence_time['residence_time'] >= cut_off_time]
with open(file_name+ "_full_tail_residence_time"+ str(cut_off_ratio) + ".csv", "w") as fwh:
for i in range(0,len(df_residence_time_filter)):
fwh.write(str(df_residence_time_filter.reset_index().iloc[i,2]) + "," \
+ str(df_residence_time_filter.reset_index().iloc[i,1]) + "\n")
|
987,383 | 8f6f1c0d061286714082808561826191e60c5e1b | def quick_sort(array):
if len(array) <= 1:
return array
else:
pivot = array.pop()
highest = []
lowest = []
for i in array:
if i < pivot:
lowest.append(i)
else:
highest.append(i)
return quick_sort(lowest) + [pivot] + quick_sort(highest)
if __name__ == "__main__":
import time, random
# tim sorting algorithm
num_list = []
for i in range(1000000):
num = random.randint(1, 1000000)
num_list.append(num)
print(num_list[:10])
t0 = time.time()
print(quick_sort(num_list)[:10])
t1 = time.time()
print("time taken:", t1-t0)
|
987,384 | e8f3afd9bdb02341bafbc397932fdb6541d87b8b | def my_max(a,b):
return a if a>b else b
print('a'<'b')
def my_abs(a):
return a if a>0 else -a
print(my_abs(-9))
def my_sum(a):
t=[]
for i in range(a+1):
for j in range(a+1):
if(i+j == a):
t.append(i)
t.append(j)
return t
# for i in range(len(my_sum(100))):
# print(my_sum(100)[i])
# print( my_sum(100)[i+1])
# i+=2
def my_pow(x,n=2):
return x if x <=1 else pow(x,n)
print(my_pow(8,3))
|
987,385 | 8710364d5051c84fd42839a0c3f7a4ed0e6c5378 | #!/usr/bin/env python
import pgp_survey
import re
surveys_directory = "surveys"
# Input trait surveys and list of traits.
trait_surveys, trait_survey_traits = pgp_survey.get_trait_surveys(surveys_directory)
# Input participant survey.
participant_survey = pgp_survey.get_participant_survey(surveys_directory)
for huID in participant_survey.get_huIDs():
print(huID)
#for i in xrange(0, len(participant_survey.header_titles)):
# print("{0}: {1}".format(participant_survey.header_titles[i], participant_survey.get_latest_responses(huID)[i]))
print(pgp_survey.get_traits_for(huID, participant_survey, trait_surveys, trait_survey_traits))
|
987,386 | 1962908d549cc0ee17cf7fab9d0dffaadaf08f7c |
class PreHelper(object):
def maxCV(array):
return array[1] |
987,387 | 0a419a1912b50b81e59aa47b561cdbab7251d370 | import pandas as pd
from definitions import ROOT_DIR
def purchase_init(purchase_path):
df_purchase_full = pd.read_csv(purchase_path, parse_dates=['buytime'])
df_purchase_full['buytime'] = pd.to_datetime(df_purchase_full['buytime'].dt.strftime("%Y-%m-%d"))
# get purchase feature
df_purchase_full = df_purchase_full[['storeid', 'buytime', 'goodscode', 'price', 'totalamount', 'totaldiscount']]
df_purchase_full = df_purchase_full.groupby(['storeid', 'buytime', 'goodscode'], as_index=False).mean()
df_purchase_full.columns = ['storeid', 'rundate', 'goodscode', 'price', 'total_amount', 'total_discount']
df_purchase_full.fillna(0)
return df_purchase_full
if __name__ == '__main__':
df_purchase_full = purchase_init('%s/%s' % (ROOT_DIR, 'sources/input/saledata.csv'))
df_purchase_full.to_csv('%s/%s' % (ROOT_DIR, 'sources/output/purchase_feature.csv'), index=False) |
987,388 | f12fc1cff528428916eca3404ad45db3f7e7d15e | # -*- coding:utf-8 -*-
from collections import Iterable,Iterator
class MyRange(object):
def __init__(self, n):
self.idx = 0
self.n = n
def __iter__(self):
return self
def next(self):
if self.idx < self.n:
val = self.idx
self.idx += 1
return val
else:
raise StopIteration()
myRange = MyRange(3)
print isinstance([], Iterable) # True
print isinstance([], Iterator) # False
print isinstance(MyRange, Iterable) # False
print isinstance(MyRange, Iterator) # False
print 'type', isinstance(MyRange, type) # False
print isinstance(myRange, Iterable) # True
print isinstance(myRange, Iterator) # True
print [i for i in myRange]
print [i for i in myRange]
# 一个迭代器无法多次使用。为了解决这个问题,可以将可迭代对象和迭代器分开自定义:
class Zrange(object):
def __init__(self, n):
self.n = n
def __iter__(self):
return ZrangeIterator(self.n)
class ZrangeIterator:
def __init__(self, n):
self.i = 0
self.n = n
def __iter__(self):
return self
def next(self):
if self.i < self.n:
j = self.i
self.i += 1
return j
else:
raise StopIteration()
zrange = Zrange(3)
print zrange is iter(zrange)
print type(zrange)
print isinstance(zrange, Iterator)
print isinstance(zrange, Iterable)
print [i for i in zrange]
print [i for i in zrange]
|
987,389 | b06c38a8c90707f49fd2d3037491e6a9a8e53116 | class Solution:
# roman specility
# 1 minus has only previous one IV, IX
#
def romanToInt(self, s: str) -> int:
roman = {'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000}
num = 0
prev = 0
for i in s[::-1]:
cur = roman[i]
if prev > cur:
num -= cur # minus only have one space: IV, VI, VII, VIII, IX
else:
num += cur
prev = cur
return num
def romanToInt1(self, s: str) -> int:
# replace minus to plus: IV to IIII etc.
roman = {'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000}
num = 0
s = s.replace("IV", "IIII").replace("IX", "VIIII") # 4, 9
s = s.replace("XL", "XXXX").replace("XC", "LXXXX") # 40 90
s = s.replace("CD", "CCCC").replace("CM", "DCCCC") # 400 900
for i in s:
num += roman[i]
return num |
987,390 | c0913078067ad5fb8efe70afa9bf14fc6eb6ba89 | import numpy as np
import cv2 as cv
def Threashold(img,l,r):
rows,cols =img.shape
newMatrix=[[] for i in range(rows)]
for i in range(rows):
for j in range(cols):
newMatrix[i].append(Change(img[i,j],l,r))
return np.array(newMatrix)
def Change(val,l,r):
if(l <= val <= r):
return 255
else:
return 0
img = cv.imread('question_2.png',0)
new = Threashold(img,59,255)
cv.imwrite('newquestion1.png',new)
|
987,391 | 4afb8bd6d1e98d5c119899f785c42804dec7126e | import numpy as np
from root_numpy import root2array, tree2array
import h5py
def x_arr(filename, tree, branch, cols):
arr = root2array(filename, tree, branches=[branch])
print arr.dtype.names
x = np.array([np.concatenate(x).astype(np.float32).reshape(-1,cols) for x in arr])
x = np.squeeze(x)
return x
hcal_rows = [17, 20]
h = h5py.File('ECALvHCAL.hdf5','w')
for r in hcal_rows:
x = x_arr('output_numEvent1_ieta%d.root'%r, 'fevt/RHTree', 'HBHEEnergy_EB', 72)
print x.shape
h.create_dataset('HBHE_%d'%r, data=x)
x = x_arr('output_numEvent1_ieta17.root', 'fevt/RHTree', 'EBenergyRed', 360)
print x.shape
h.create_dataset('EB', data=x)
|
987,392 | 9005586d670025a50fa46f9f4cf439e469830bbe | from FlatlandModel import FlatlandModel
import numpy as np
from enum import Enum
from pedsim_msgs.msg import Ped, InteractiveObstacle
class PedsimWaypointMode(Enum):
LOOP = 0
RANDOM = 1
class PedsimAgentType(Enum):
ADULT = 0
CHILD = 1
ELDER = 2
VEHICLE = 3
SERVICEROBOT = 4
class InteractiveObstacleType(Enum):
SHELF = 0
# class InteractiveObstacle():
# def __init__(self) -> None:
# self.obstacleType = InteractiveObstacleType.SHELF
class PedsimAgent(Ped):
def __init__(self, name = "", flatlandModelPath = "") -> None:
super().__init__()
self.name = name
self.flatlandModel = None # FlatlandModel
if flatlandModelPath != "":
self.loadFlatlandModel(flatlandModelPath)
# self.position = np.zeros(2)
# self.agentType = PedsimAgentType.ADULT
# self.vMax = 1.0
# self.chattingProbability = 0.01
# self.waypoints = [] # list of 2D waypoints
# self.waypointMode = PedsimWaypointMode.LOOP
# # TODO rest of attributes...
def loadFlatlandModel(self, path: str):
self.yaml_file = path
model = FlatlandModel()
model.load(path)
self.flatlandModel = model
class ArenaScenario():
def __init__(self) -> None:
self.pedsimAgents = [] # list of PedsimAgent objects
self.interactiveObstacles = [] # list of InteractiveObstacle messages
self.robotPosition = np.zeros(2) # starting position of robot
self.robotGoal = np.zeros(2) # robot goal
self.mapPath = "" # path to map file |
987,393 | ceee41bfccb568ee4d3611a0a31f3a83364d277a | def quarter_of(month):
if month in range(4): return 1
elif month in range(4,7): return 2
elif month in range(7,10): return 3
else: return 4}
|
987,394 | 486389cc4c471a24839de6f4bcba225322a9601f | from cgi import escape
from collections import defaultdict
from boto.ses import SESConnection
def first_nonzero_index(input_list):
for i, item in enumerate(input_list):
if item != 0:
return i
return -1
def last_nonzero_index(input_list):
reversed_list = input_list[::-1]
index = first_nonzero_index(reversed_list)
if index == -1:
return -1
else:
return len(input_list) - index
def calculate_mean(data, start, step_size):
total = 0
for i, item in enumerate(data):
total += ((start + i) * step_size + (step_size / 2)) * item
return round(float(total) / float(sum(data)), 2)
def process_chart_data(data, output, prefix):
start = data[0]
end = data[1]
step_size = data[2]
data = data[6:]
first_nonzero = first_nonzero_index(data)
last_nonzero = last_nonzero_index(data)
output[prefix+'_data'] = ','.join(map(str, data[first_nonzero:last_nonzero+1]))
output[prefix+'_labels'] = str()
for item in range(first_nonzero * step_size + start, last_nonzero * step_size + 1, step_size):
output[prefix+'_labels'] += "'"+str(item)
if (step_size > 1):
output[prefix+'_labels'] += '-' + str(item + (step_size - 1))
output[prefix+'_labels'] += "',"
output[prefix+'_labels'] = output[prefix+'_labels'][:-1]
output[prefix+'_mean'] = calculate_mean(data, start, step_size)
def ses_email(config, to_address, subject, body):
connection = SESConnection(aws_access_key_id=config['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=config['AWS_SECRET_ACCESS_KEY_ID'])
from_address = '"SolutionNet" <{0}>'.format(config['FROM_EMAIL_ADDRESS'])
connection.send_email(from_address,
str(subject),
str(escape(body)),
str(to_address))
def process_solution(solution):
reactors = []
path = {}
# for each reactor, create a 10x8 2D dict of "cells"
# each cell is a list of tuples, representing type, color/class, and optional text (for sensors) for each member in the cell
for component in solution.components:
if "reactor" in component.type:
cells = {}
path = {}
paths_to_process = []
for path_color in ('blue', 'red'):
path[path_color] = {}
for y in range(0, 8):
for x in range(0, 10):
cells[(x, y)] = []
for path_color in ('blue', 'red'):
path[path_color][(x, y)] = {}
path[path_color][(x, y)]['edges'] = set()
path[path_color][(x, y)]['entry_edges'] = set()
path[path_color][(x, y)]['dir_change'] = ''
# add all the instructions to the cell grid
for member in component.members:
# take note of any instructions that start a path, for path-building later
if member.type in ('instr-start', 'instr-toggle', 'instr-sensor', 'instr-control'):
new_path = {}
new_path['start_type'] = member.type
new_path['start_pos'] = (member.x, member.y)
new_path['start_dir'] = member.ARROW_DIRS[member.arrow_dir]
new_path['color'] = member.color
paths_to_process.append(new_path)
# take note of any direction changes (arrow)
if member.type == 'instr-arrow':
path[member.color][(member.x, member.y)]['dir_change'] = member.ARROW_DIRS[member.arrow_dir]
# set up the class to give to img and div tags, color unless it's a directional
if member.type == 'instr-arrow':
member_class = member.color+'-arrow'
elif member.type in ('instr-start', 'instr-toggle', 'instr-sensor', 'instr-control'):
member_class = member.color+" "+member.ARROW_DIRS[member.arrow_dir]
else:
member_class = member.color
# if it's a fuser/splitter, we need to add the other half to the cell to the right as well
if member.type in ('feature-fuser', 'feature-splitter') and member.x < 9:
cells[(member.x+1, member.y)].append((member.image_name.replace('.png', '2.png'), member_class))
if member.type == 'instr-sensor':
cells[(member.x, member.y)].append((member.image_name, member_class, member.element))
else:
cells[(member.x, member.y)].append((member.image_name, member_class))
# build the paths
OPPOSITE_SIDE = {"l": "r",
"r": "l",
"u": "d",
"d": "u"}
while len(paths_to_process) > 0:
current_path = paths_to_process.pop(0)
current_pos = list(current_path['start_pos'])
# arrows in the same cell as a start instruction override its direction
if current_path['start_type'] == 'instr-start':
current_dir = path[current_path['color']][tuple(current_pos)]['dir_change'] or current_path['start_dir']
else:
current_dir = current_path['start_dir']
path[current_path['color']][tuple(current_pos)]['edges'].add(current_dir)
while True:
# move position
if current_dir == 'l':
current_pos[0] -= 1
elif current_dir == 'r':
current_pos[0] += 1
elif current_dir == 'u':
current_pos[1] -= 1
elif current_dir == 'd':
current_pos[1] += 1
# if we're now outside the graph, stop
if not (0 <= current_pos[0] <= 9 and 0 <= current_pos[1] <= 7):
break
# if we've already come into this cell from this direction, stop
if OPPOSITE_SIDE[current_dir] in path[current_path['color']][tuple(current_pos)]['entry_edges']:
break
# otherwise, mark the incoming edge
path[current_path['color']][tuple(current_pos)]['edges'].add(OPPOSITE_SIDE[current_dir])
path[current_path['color']][tuple(current_pos)]['entry_edges'].add(OPPOSITE_SIDE[current_dir])
# determine if we're changing direction or going straight through
if path[current_path['color']][tuple(current_pos)]['dir_change']:
current_dir = path[current_path['color']][tuple(current_pos)]['dir_change']
# mark outgoing edge
path[current_path['color']][tuple(current_pos)]['edges'].add(current_dir)
reactors.append((cells, path, component.type))
return reactors
def process_overview(solution):
def add_component(cells, type, start_x, start_y):
size_x = COMPONENT_SIZES[type][0]
size_y = COMPONENT_SIZES[type][1]
if start_x < 0:
size_x += start_x
start_x = 0
if start_y < 0:
size_y += start_y
start_y = 0
cells[(start_x, start_y)] += ['component', type, size_x, size_y, COMPONENT_LABELS[type]]
for x in range(0, size_x):
for y in range(0, size_y):
if x != 0 or y != 0:
cells[(start_x + x, start_y + y)].append('skip')
COMPONENT_SIZES = {"drag-silo-input": (5, 5),
"drag-oceanic-input": (2, 2),
"drag-atmospheric-input": (2, 2),
"drag-mining-input": (3, 2),
"drag-storage-tank": (3, 3),
"drag-spaceship-input": (2, 3),
"drag-powerplant-input": (14, 15),
"cargo-freighter": (2, 3),
"oxygen-tank": (3, 3),
"recycler": (5, 5),
"control-center": (3, 3),
"particle-accelerator": (3, 3),
"rocket-launch-pad": (3, 3),
"hydrogen-laser": (5, 5),
"chemical-laser": (3, 3),
"ancient-pump": (2, 2),
"omega-missile-launcher": (3, 3),
"thruster-controls": (3, 6),
"teleporter-in": (3, 1),
"teleporter-out": (3, 1),
"internal-storage-tank": (2, 3),
"crash-canister": (4, 4)}
COMPONENT_LABELS = {"drag-silo-input": "input",
"drag-oceanic-input": "input",
"drag-atmospheric-input": "input",
"drag-spaceship-input": "input",
"drag-mining-input": "input",
"drag-storage-tank": "storage tank",
"drag-powerplant-input": "input",
"cargo-freighter": "cargo output",
"oxygen-tank": "oxygen tank",
"recycler": "recycler",
"control-center": "control center",
"particle-accelerator": "particle accelerator",
"rocket-launch-pad": "rocket launch pad",
"hydrogen-laser": "hydrogen laser",
"chemical-laser": "chemical laser",
"ancient-pump": "input",
"omega-missile-launcher": "omega missile launcher",
"thruster-controls": "thruster controls",
"teleporter-in": "teleporter in",
"teleporter-out": "teleporter out",
"internal-storage-tank": "tank output",
"crash-canister": "crash canister"}
PIPE_COLORS = ['#fefe33', '#8601af',
'#FB9902', '#0247FE',
'#FE2712', '#66B032',
'#FABC02', '#3D01A4',
'#FD5308', '#0392CE',
'#A7194B', '#D0EA2B']
cells = defaultdict(list)
reactor_num = 1
component_num = 1
component_nums = {}
for component in solution.components:
base_x = component.x
base_y = component.y
component_nums[component.component_id] = component_num
component_num += 1
if component.type.endswith('-reactor'):
cells[(base_x, base_y)] += ['reactor', reactor_num]
reactor_num += 1
for x in range(0, 4):
for y in range(0, 4):
if x != 0 or y != 0:
cells[(base_x + x, base_y + y)].append('skip')
elif component.type in COMPONENT_SIZES:
add_component(cells, component.type, base_x, base_y)
else:
cells[(base_x, base_y)] += ['unknown', component.type]
for pipe in component.pipes:
pipe_color = PIPE_COLORS[((component_nums[pipe.component_id] - 1) * 2) % 6 + pipe.output_id]
# if the cell already has a pipe in it
if cells[(base_x + pipe.x, base_y + pipe.y)]:
cells[(base_x + pipe.x, base_y + pipe.y)] += [pipe_color]
else:
cells[(base_x + pipe.x, base_y + pipe.y)] += ['pipe', pipe_color]
# add "fixed" components if they're not already there
for component in solution.level.fixedcomponents:
add_component(cells, component.type, component.x, component.y)
return cells
|
987,395 | a1d732908c75407f73a3db73985d8e50cadc2b61 | '''
Created on 12 nov. 2017
@author: mbl
'''
#from pyAudioTracking import pyAudioTracking
#from time import strftime, localtime
import sys
#import unicodedata
import locale
class pyAudioInputList(object):
'''
open, parse, getline from text file
'''
def __init__(self, trackingobject, inputlist ):
'''
Constructor
'''
self.filename = inputlist
self.file = None
# object that will manage errors, infos
self.tracking = trackingobject
def Open(self):
'''
return True when file is opened
'''
ret = True
try:
locale.getdefaultlocale()
self.file = open(self.filename, 'rU')
except IOError:
self.tracking.SetError(type(self).__name__, sys._getframe().f_code.co_name, "cannot open input list" )
ret = False
return ret
def GetLine(self):
"""
read a line from the inpout file without any processing
"""
retline = None
outline = None
try:
retline= str(self.file.readline())
except IOError:
self.tracking.SetError(type(self).__name__, sys._getframe().f_code.co_name, "cannot read a line from" )
finally:
#outline1 = retline.replace("/","")
#if( (retline !="") and (retline !="\n")) :
outline = str(retline)
return outline.replace("+","")
#return unicodedata.normalize('NFKD', outline).encode('ascii','ignore')
def GetLinePostProcess(self):
"""
read a line from the inpout file
post process to remove non ascii character
allow only a-zA-Z0-9._-
not compliant character will be replaced by _
"""
retline = None
outline = None
try:
retline= str(self.file.readline())
except IOError:
self.tracking.SetError(type(self).__name__, sys._getframe().f_code.co_name, "cannot read a line from" )
finally:
#outline1 = retline.replace("/","")
if( (retline !="") and (retline !="\n")) :
outline = str("")
az_range=range(97,123)
AZ_range = range (65, 91)
val_range = range (48,58)
space_range = range (32, 33)
for i in range(len(retline)):
value = ord(retline[i] )
if ( (value in az_range) or (value in AZ_range) or (value in val_range) or (value in space_range) ):
outline = "".join([outline,retline[i]])
else:
outline = "".join([outline,"_"])
'''
if( (retline[i] != "/") and (retline[i] != "&") and (retline[i] != "\\") and (retline[i] != "%") and (retline[i] != "#") and (retline[i] != "_") and (retline[i] != '"') and (retline[i] != "@") and (retline[i] != ":") and (retline[i] != "\n")):
#charac = str(retline[i].encode('ascii','ignore'))
if(ord(retline[i]) < 128):
outline = "".join([outline,retline[i]])
'''
return outline
#return unicodedata.normalize('NFKD', outline).encode('ascii','ignore')
def GetNumberofLine (self):
'''
number of real line with text
'''
nbline = 0
try:
while ( True):
retline = self.GetLine()
if ( (retline == None) or (retline == "")):
break
realine = str(retline).strip()
if(len(realine) > 5):
#if( (retline !="") and (retline !="\n") and (retline !=" ")) :
nbline = nbline + 1
except IOError:
self.tracking.SetError(type(self).__name__, sys._getframe().f_code.co_name, "cannot read a line from" )
finally:
self.file.seek(0,0)
return nbline
def Close(self):
try:
self.file.close()
except IOError:
self.tracking.SetError(type(self).__name__, sys._getframe().f_code.co_name, "cannot close file" )
finally:
pass
|
987,396 | f6bbbffb1ca7aee996717c68934b3195bab90319 | class Produto:
def __init__(self, descricao, valor):
self.descricao = descricao
self.valor = valor
class Cliente:
def __init__(self, nome):
self.nome = nome
class Carrinho:
def __init__(self, cliente):
self.produtos = []
self.cliente = cliente
def adicionar_produto(self, produto):
self.produtos.append(produto)
#aqui acesssamos a lista "produtos" acima e faz um append
#de um produto qualquer nele
def listar_produtos(self): #somente para listar as informações
for produto in self.produtos: #percorre a lista de produtos no carrinho
print("Descrição: ", produto.descricao, "Valor:", produto.valor)
def calcular_total(self):
soma = 0
for produto in self.produtos:
soma += produto.valor
return soma
def aplicar_desconto(self, porcentagem):
valor = self.calcular_total()
total = valor - (valor * porcentagem / 100)
return total
cliente1 = Cliente("Renata Monteiro")
produto1 = Produto("Monitor", 2000)
produto2 = Produto("Agenda", 35)
produto3 = Produto("Bolsa", 75)
carrinho1 = Carrinho(cliente1) #associando o cliente1 no carrinho
carrinho1.adicionar_produto(produto1)#usando a associação, estou usando o metodo
#adicionar_produto para adicionar o produto 1 dentro do Carrinho1
carrinho1.adicionar_produto(produto3)
carrinho1.adicionar_produto(produto2)
carrinho1.adicionar_produto(produto2)
carrinho1.listar_produtos()
print("Total sem Desconto: R$", carrinho1.calcular_total())
print("TOTAL COM DESCONTO: R$", carrinho1.aplicar_desconto(10)) #requer que insira
#um valor para calcular o desconto
|
987,397 | 2879db94dfd44d028f08cb8d5d05c31bfaf64c47 | # Generated by Django 3.0.4 on 2021-01-27 14:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('draft', '0010_draft_code'),
]
operations = [
migrations.AddField(
model_name='draft',
name='mode',
field=models.CharField(choices=[('1', 'Single Play'), ('2', 'Multi Play')], default='1', max_length=20, verbose_name='모드'),
),
]
|
987,398 | af44bbe45f6e0ec217beeca79be59d0cb0f11478 | from torchvision import models
from backbones.octConv_ResNet import oct_resnet26, oct_resnet50, oct_resnet101, oct_resnet152
from backbones.SENet import senet154, se_resnet101, se_resnet152, se_resnext50_32x4d, se_resnext101_32x4d
from backbones.efficientnet import EfficientNet
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
# False = 역전파 중 해당 Tensor에 대한 Gradient를 계산하지 않을 것임을 의미
param.requires_grad = False
# If use_pretrained=True, below models are using ImageNet pretrained weights.
def initialize_model(model_name, use_pretrained=False, input_channels=None, num_classes=None):
# If use_pretrained = True, These models are pretrained via ImageNet-1000 class
if model_name == "resnet101":
return models.resnet101(pretrained=use_pretrained, progress=True)
elif model_name == "resnet152":
return models.resnet152(pretrained=use_pretrained, progress=True)
# Constructs a Octave ResNet-152 model.(pretrained (bool): If True, returns a model pre-trained on ImageNet)
elif model_name == "oct_resnet26":
return oct_resnet26(input_channels=input_channels, num_classes=num_classes)
elif model_name == "oct_resnet50":
return oct_resnet50(input_channels=input_channels, num_classes=num_classes)
elif model_name == "oct_resnet101":
return oct_resnet101(input_channels=input_channels, num_classes=num_classes)
elif model_name == "oct_resnet152":
return oct_resnet152(input_channels=input_channels, num_classes=num_classes)
# 아래 5개의 모델은 pretrained=None이면 전이학습을 하지 않음.
elif model_name == "senet154":
if use_pretrained == False:
return senet154(num_classes=num_classes, pretrained=None)
else:
return senet154(num_classes=num_classes, pretrained='imagenet')
elif model_name == "se_resnet101":
if use_pretrained == False:
return se_resnet101(num_classes=num_classes, pretrained=None)
else:
return se_resnet101(num_classes=num_classes, pretrained='imagenet')
elif model_name == "se_resnet152":
if use_pretrained == False:
return se_resnet152(num_classes=num_classes, pretrained=None)
else:
return se_resnet152(num_classes=num_classes, pretrained='imagenet')
elif model_name == 'se_resnext50_32x4d':
if use_pretrained == False:
return se_resnext50_32x4d(num_classes=num_classes, pretrained=None)
else:
return se_resnext50_32x4d(num_classes=num_classes, pretrained='imagenet')
elif model_name == 'se_resnext101_32x4d':
if use_pretrained == False:
return se_resnext101_32x4d(num_classes=num_classes, pretrained=None)
else:
return se_resnext101_32x4d(num_classes=num_classes, pretrained='imagenet')
elif model_name == "resnext50_32x4d":
# Aggregated Residual Transformation for Deep Neural Networks<https://arxiv.org/pdf/1611.05431.pdf>`
# If progress=True, print pretrained model downloading status.
return models.resnext50_32x4d(pretrained=use_pretrained, progress=True)
elif model_name == "resnext101_32x8d":
return models.resnext101_32x8d(pretrained=use_pretrained, progress=True)
# ImageNet pretrained efficientnet-b3, b4
elif model_name == 'efficientnetb3':
return EfficientNet.from_scratch(model_name='efficientnet-b3', num_classes=num_classes)
elif model_name == 'efficientnetb4':
return EfficientNet.from_scratch(model_name='efficientnet-b4', num_classes=num_classes)
elif model_name == 'efficientnetb5':
return EfficientNet.from_scratch(model_name='efficientnet-b5', num_classes=num_classes)
else:
print("Wrong define model parameter input.")
raise ValueError |
987,399 | 8762ae3d53dc3e8a1d1766470935fc24635c430f | # 6.4.1.py | ian luna
for x in range(5, 80, 5):
print(x, end =' ')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.