index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
11,200 | 6c7267fd257a9ecc6325a012b52052447cb326cd | import time
name=input (print("What's your name?"))
print("Hello "+name,"Time to play hangman!")
time.sleep(1)
print ("Start guessing...")
time.sleep(0.5)
word= str (input(print("Enter any word: ")))
print ("The length of the word is: ")
print (len(word))
guesses=" "
turns=10
while turns >0:
failed =0
for char in word:
if char in guesses:
print (char),
else :
print ("_")
failed +=1
if failed ==0:
print("You Won!")
break
guess=input(print(" Guess a character: "))
guesses +=guess
if guess not in word:
turns -=1
print("Wrong!")
print("You have ", +turns, "more guesses")
if turns==0:
print("You Loose!")
break
|
11,201 | bb94a6f7dd43c506749dbbaea8e77ce8ce05edfa | # 合并排序
# 时间复杂度, 最坏情况, 最好情况, 空间复杂度
# O(nlogn), O(nlogn), O(nlogn), O(n)
def merge(l, r):
"""
合并的过程
:param l:
:param r:
:return:
"""
new_list = []
tag_l = 0
tag_r = 0
while tag_l < len(l) and tag_r < len(r):
if l[tag_l] < r[tag_r]:
new_list.append(l[tag_l])
tag_l = tag_l + 1
else:
new_list.append(r[tag_r])
tag_r = tag_r + 1
if tag_l == len(l):
for i in r[tag_r:]:
new_list.append(i)
elif tag_r == len(r):
for j in l[tag_l]:
new_list.append(j)
return new_list
def merge_sort(ls):
"""
归并排序,先拆分再合并
拆分过程
递归
:param ls:
:return:
"""
if len(ls) <= 1:
return ls
middle = int(len(ls) / 2)
left = merge_sort(ls[:middle])
right = merge_sort(ls[middle:])
return merge(left, right)
if __name__ == "__main__":
a = [4, 7, 8, 3, 5, 9]
print(merge_sort(a)) |
11,202 | 8a46da47f866e506ece43284a896e9ffe7c2a453 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow import keras
import bert
from bert import BertModelLayer
from bert.loader import StockBertConfig, map_stock_config_to_params, load_stock_weights
import numpy as np
import pandas as pd
import json
import collections
import re
import unicodedata
import six
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def convert_ids_to_tokens2(inv_vocab, ids):
output = []
for idss in ids:
for key, valus in inv_vocab.items():
if valus == idss:
output.append(key)
return output
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
### joonho.lim @ 2019-03-15
# if start > 0:
# substr = "##" + substr
# print ( '[substr]\t%s\t%s\t%d\t%d' % ( substr, substr in self.vocab, start, end))
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
output_tokens.insert(0, '[CLS]')
output_tokens.append('[SEP]')
return output_tokens
class Intent_model():
def __init__(self):
self.max_len = 29
self.config_path = './Intent_cl/Bert_model/bert_config.json'
self.data = pd.read_csv('./Intent_cl/Intent_dataset/category_data.csv')
with open('./Intent_cl/Bert_model/vocab.json', 'r') as read_file:
self.vocab = json.loads(read_file.read())
with tf.io.gfile.GFile(self.config_path, "r") as reader:
bc = StockBertConfig.from_json_string(reader.read())
self.bert_params = map_stock_config_to_params(bc)
self.bert_params.adapter_size = None
self.intent_model = keras.models.load_model('./Intent_cl/Bert_model/nomal_news_weather_etc_kobert_model_category.h5',
custom_objects={"BertModelLayer":BertModelLayer.from_params(self.bert_params, name="bert")} )
self.classes = self.data.intent.unique().tolist()
def intent_classification(self, text):
padding_text = WordpieceTokenizer(self.vocab)
a= padding_text.tokenize(text)
token_ids = convert_by_vocab(self.vocab, a)
for x in range(self.max_len - len(token_ids)):
token_ids.append(0)
token_ids = np.array(token_ids)
token_ids = token_ids.reshape(1,self.max_len, order= 'F')
predictions = self.intent_model.predict(token_ids).argmax(axis=-1)
return self.classes[int(predictions)] |
11,203 | 2097c2f7ca16a07af7fd132b3a4f540fccca7973 | import vrep
import vrep_constants as const
import math
from matplotlib.path import Path
from numpy import array
class Point:
def __init__(self, xy=None):
if isinstance(xy, tuple):
if len(xy) >= 2:
self.x = xy[0]
self.y = xy[1]
else:
if not isinstance(xy, type(None)):
self.x = xy.x
self.y = xy.y
def __str__(self):
return str(self.x) + " " + str(self.y)
def set_x(self, x):
self.x = x
def set_y(self, y):
self.y = y
def set_xy(self, x, y):
self.x = x
self.y = y
def get_x(self):
return self.x
def get_y(self):
return self.y
def get_xy(self):
return self.x, self.y
class Vrep():
def __init__(self):
self.client_id = vrep.simxStart(const.CON_ADDRESS, const.CON_PORT, False, True, \
const.TIMEOUT_IN_MS, const.COMM_THREAD_CYCLE_IN_MS)
def get_object_handle(self, obj_name):
ret, handle = vrep.simxGetObjectHandle(self.client_id, obj_name, vrep.simx_opmode_oneshot_wait)
return handle
def get_object_child(self, parent_handle, index):
ret, child_handle = vrep.simxGetObjectChild(self.client_id, \
parent_handle, index, vrep.simx_opmode_oneshot_wait)
return child_handle
def get_object_position(self, object_handle):
"""
Function that returns position of object on the scene in V-REP
"""
res, object_position = vrep.simxGetObjectPosition(self.client_id, object_handle, -1, \
vrep.simx_opmode_blocking)
if res == vrep.simx_return_ok:
return Point((object_position[0], object_position[1]))
else:
print('Remote function call failed with result {0}.'.format(res))
return ()
def get_robots_data(self):
robots_data = dict()
robots_handles = self.get_object_childs(const.ROBOTS_NAMES_TREE)
for robot in robots_handles:
robot_boundary_points = self.get_boundary_points(robot)
robot_position = self.get_object_position(robot)
robot_direction = self.get_robot_direction_vector(robot)
robots_data[robot] = [robot_position, robot_direction, robot_boundary_points]
return robots_data
def get_goal_data(self):
goal_data = dict()
goal_handles = self.get_object_childs(const.TARGETS_NAMES_TREE)
for goal in goal_handles:
goal_boundary_points = self.get_boundary_points(goal)
goal_position = self.get_object_position(goal)
goal_data[goal] = [goal_position, goal_boundary_points]
return goal_data
def get_obstacles_data(self):
if const.WITH_DYNAMIC_OBSTACLES:
pass
else:
obstacles_data = dict()
obstacle_handles = self.get_object_childs(const.OBSTACLES_NAMES_TREE)
for obstacle in obstacle_handles:
obstacle_boundary_points = self.get_boundary_points(obstacle)
obstacle_position = self.get_object_position(obstacle)
obstacles_data[obstacle] = [obstacle_position, obstacle_boundary_points]
return obstacles_data
def get_boundary_points(self, object_handle):
"""
Function that returns boundary points of object's (obstacle) boundary box
"""
points = []
obstacle_position = self.get_object_position(object_handle)
ret, orient = vrep.simxGetObjectOrientation(self.client_id, object_handle, -1, \
vrep.simx_opmode_blocking)
ret, x_1 = vrep.simxGetObjectFloatParameter(self.client_id, object_handle, 15, \
vrep.simx_opmode_blocking)
ret, y_1 = vrep.simxGetObjectFloatParameter(self.client_id, object_handle, 16, \
vrep.simx_opmode_blocking)
ret, x_2 = vrep.simxGetObjectFloatParameter(self.client_id, object_handle, 18, \
vrep.simx_opmode_blocking)
ret, y_2 = vrep.simxGetObjectFloatParameter(self.client_id, object_handle, 19, \
vrep.simx_opmode_blocking)
angle = orient[2]
# Extension of boundaries, so that the robots moves without collisions
x_1 = x_1 - 0.3
x_2 = x_2 + 0.3
y_1 = y_1 - 0.3
y_2 = y_2 + 0.3
p_1 = (x_1 * math.cos(angle) - y_1 * math.sin(angle) + obstacle_position.x, y_1 * \
math.cos(angle) + x_1 * math.sin(angle) + obstacle_position.y)
points.append(Point(p_1))
p_2 = (x_1 * math.cos(angle) - y_2 * math.sin(angle) + obstacle_position.x, y_2 * \
math.cos(angle) + x_1 * math.sin(angle) + obstacle_position.y)
points.append(Point(p_2))
p_3 = (x_2 * math.cos(angle) - y_2 * math.sin(angle) + obstacle_position.x, y_2 * \
math.cos(angle) + x_2 * math.sin(angle) + obstacle_position.y)
points.append(Point(p_3))
p_4 = (x_2 * math.cos(angle) - y_1 * math.sin(angle) + obstacle_position.x, y_1 * \
math.cos(angle) + x_2 * math.sin(angle) + obstacle_position.y)
points.append(Point(p_4))
return points
def get_object_childs(self, obj_name):
"""
Function that return handles of object's childs from the V-REP scene.
This function is useful when the exact number of objects is unknown
"""
index = 0
children_list = []
child = 0
parent_handle = self.get_object_handle(obj_name)
while child != -1:
res, child = vrep.simxGetObjectChild(self.client_id, parent_handle, index, vrep.simx_opmode_blocking)
if res == vrep.simx_return_ok:
children_list.append(child)
index = index + 1
else:
print('Remote fucntion get_object_childs call failed.')
return []
del children_list[len(children_list) - 1]
return children_list
def finish_connection(self):
vrep.simxFinish(-1)
def get_robot_direction_vector(self, robot_handle):
direction_point = self.get_object_child(robot_handle, 15)
robot_position = self.get_object_position(robot_handle)
dir_point_position = self.get_object_position(direction_point)
direction_vector = (dir_point_position.x - robot_position.x, \
dir_point_position.y - robot_position.y)
direction_vector_mod = math.sqrt(direction_vector[0] ** 2 \
+ direction_vector[1] ** 2)
norm_direction_vector = (direction_vector[0] / direction_vector_mod, \
direction_vector[1] / direction_vector_mod)
return Point(norm_direction_vector)
|
11,204 | 7b2214c835749e01abe93b561b9b9063040d942f | buttons = [
'й', 'ц', 'у', 'к', 'е', 'н', 'г', 'ш', 'щ', 'з', 'х', 'ъ',
'ф', 'ы', 'в', 'а', 'п', 'р', 'о', 'л', 'д', 'ж', 'э',
'я', 'ч', 'с', 'м', 'и', 'т', 'ь', 'б', 'ю', '-', 'ё',
'ПРОБЕЛ', 'СТЕРЕТЬ', 'ЗАГЛАВН', 'ПЕЧАТЬ'
]
bg_color = '#3c4987'
fg_color = '#ffffff'
active_color = fg_color
fg_active_color = bg_color
button_size = 3
button_height = 3
big_button_size = 8
padx = 3
pady = 1 # on css words = margin
bd = 1 # ???
x_padding = (20, 0)
default = 'Фамилия Имя'
|
11,205 | cdd7b4420b0ccfe2ee31c81890a0fbb6c14178e9 |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from webdriver_manager.chrome import ChromeDriverManager
def load_cookie(driver, path):
with open(path, 'rb') as cookiesfile:
cookies = pickle.load(cookiesfile)
for cookie in cookies:
driver.add_cookie(cookie)
import platform
import os
import pickle
import time
import random
def control_key():
is_osx = platform.system().startswith('Darwin')
return Keys.COMMAND if is_osx else Keys.CONTROL
path_chromedriver = './chromedriver'
if os.path.exists(path_chromedriver):
driver = webdriver.Chrome(executable_path = path_chromedriver)
else:
driver = webdriver.Chrome(ChromeDriverManager().install())
##############################################################################################################
##############################################################################################################
##############################################################################################################
##############################################################################################################
'''
CHANGE PARAMETERS HERE
'''
youtube_url = "http://www.youtube.com"
search_text = "baby delivery birth"
AN_comment_text_filename = "youtube_invite_welcome.txt"
'''
THESE NEED TO BE CONSTANTLY UPDATED
ACCORDING TO THE YOUTUBE VERSION UPGRADES
'''
youtube_search_xpath = "//div[@id='search-input']/input[@id='search']"
video_title_xpath = "//a[@id='video-title']"
new_comment_box = "//*[@id='simplebox-placeholder']"
new_comment_placeholder_anchor = "//*[@id='contenteditable-root'][@aria-label='Add a public comment...']"
comment_button = '//*/paper-button[@id="button"][@aria-label="Comment"]'
##############################################################################################################
##############################################################################################################
##############################################################################################################
##############################################################################################################
driver.get(youtube_url)
load_cookie(driver, './login_cookie')
search_box = driver.find_element_by_xpath(youtube_search_xpath)
search_box.clear()
search_box.send_keys(search_text)
search_box.send_keys(Keys.RETURN)
AN_comment_text = None
with open(AN_comment_text_filename, "r") as AN_comment_text_file:
AN_comment_text = AN_comment_text_file.read()
done_URLs_map = {}
done_map_filename = "done_URLs_map"
if os.path.exists(done_map_filename):
with open(done_map_filename, 'rb') as donemap_file:
done_URLs_map = pickle.load(donemap_file)
# google_sign_in = "https://accounts.google.com/o/oauth2/auth/identifier?client_id=717762328687-iludtf96g1hinl76e4lc1b9a82g457nn.apps.googleusercontent.com&scope=profile%20email&redirect_uri=https%3A%2F%2Fstackauth.com%2Fauth%2Foauth2%2Fgoogle&state=%7B%22sid%22%3A1%2C%22st%22%3A%2259%3A3%3Abbc%2C16%3A17161db8298ee2f5%2C10%3A1606710313%2C16%3Af7160e3b648ac360%2Cbf04a3314e10c9058fbdbf6357387626395a3c7296c3b341691cdb1f3af97941%22%2C%22cdl%22%3Anull%2C%22cid%22%3A%22717762328687-iludtf96g1hinl76e4lc1b9a82g457nn.apps.googleusercontent.com%22%2C%22k%22%3A%22Google%22%2C%22ses%22%3A%22f39e8ec6c8974158ab2b5cf9f4b959e6%22%7D&response_type=code&flowName=GeneralOAuthFlow"
# driver.get(google_sign_in)
# print("Please login on browser and press any key to proceed !")
# proceed = input()
def fetch_video_urls():
videos = []
while True:
driver.implicitly_wait(2) # seconds
videos = driver.find_elements_by_xpath(video_title_xpath)
videos = list(filter(lambda v : v.get_attribute('href') not in done_URLs_map.keys(), videos))
# if couldn't find any more new videos, scroll down
if len(videos) == 0:
driver.execute_script("window.scrollTo(0, document.documentElement.scrollHeight)")
for v in videos:
if v.get_attribute('href') not in done_URLs_map:
done_URLs_map[v.get_attribute('href')] = v
yield v.get_attribute('href')
def open_and_comment_in_new_tab(video_url):
# actions = ActionChains(driver)
# actions.key_down(control_key()).key_down('w').key_up('w').key_up(control_key()).perform()
current_video_elem = done_URLs_map[video_url]
current_video_elem.send_keys(control_key() + Keys.RETURN)
#switch tab
driver.switch_to.window(driver.window_handles[-1])
print("\n\n===============================\n")
print("opened new video in new tab !")
print("switched to the newtab ! ready to comment ")
try:
delay = 3 # seconds
browser = driver
try:
myElem = WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.ID, 'info')))
print("Page is ready!")
except TimeoutException:
print("Loading took too much time!")
driver.execute_script("window.scrollTo(0, document.documentElement.scrollHeight)")
driver.implicitly_wait(5) # seconds
commentBox = driver.find_element_by_xpath(new_comment_box)
commentBox.click()
driver.implicitly_wait(2) # seconds
new_comment_placeholder = driver.find_element_by_xpath(new_comment_placeholder_anchor)
driver.execute_script("arguments[0].scrollIntoView();", new_comment_placeholder)
_random_augmentor_ = random.randrange(10000)
# augment comment with random string to avoid comment repitition ban from Youtube
AN_comment_text_aug = f"{AN_comment_text}_{str(_random_augmentor_)}"
new_comment_placeholder.send_keys(AN_comment_text_aug)
driver.find_element_by_xpath(comment_button).click()
# wait for sometime for youtube to process comment
time.sleep(2)
print("Inserted comment !")
except:
print("Could not Comment due to some error ! Sorry :( ")
pass
# close the tab
driver.close()
# OR :
driver.switch_to.window(driver.window_handles[0])
print("switched back to the main tab with search results ! ")
driver.execute_script("arguments[0].scrollIntoView();", current_video_elem)
# replace the web-element value by boolean to make it picklable
done_URLs_map[video_url] = True
with open(done_map_filename, 'wb') as donemap_file:
pickle.dump(done_URLs_map, donemap_file)
for url in fetch_video_urls():
open_and_comment_in_new_tab(url)
driver.close()
|
11,206 | 0fb859a1504454590ffdb236769f40b0136b326f | from abc import ABCMeta, abstractmethod
class SignalTransform(object):
__metaclass__ = ABCMeta
@abstractmethod
def interpolate(self, image):
""" gets the value of the signal at the specific point,
as the signal is discrete it will be interpolated
"""
return
|
11,207 | 021c16b3aa2b8424555798eccb39edf29e4d790f | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import shutil
import unittest
from pathlib import Path
from unittest.mock import MagicMock, call, patch
from .. import source_database_buck_builder
class SourceDatabaseBuckBuilderTest(unittest.TestCase):
def setUp(self) -> None:
self._query_arguments = [
"query",
"--json",
'kind("python_binary|python_library|python_test", %s) '
"- attrfilter(labels, generated, %s) "
"+ attrfilter(labels, unittest-library, %s) "
"- attrfilter(labels, no_pyre, %s)",
"//foo/bar/...",
"//bar:baz",
]
def test_get_buck_query_arguments(self) -> None:
arguments = source_database_buck_builder._get_buck_query_arguments(
specifications=["//foo/bar/...", "//bar:baz"], mode=None
)
self.assertEqual(arguments, self._query_arguments)
def test_get_buck_query_arguments__with_mode(self) -> None:
arguments = source_database_buck_builder._get_buck_query_arguments(
specifications=["//foo/bar/...", "//bar:baz"], mode="foo"
)
self.assertEqual(
arguments,
[
"query",
"--json",
"@mode/foo",
'kind("python_binary|python_library|python_test", %s) '
"- attrfilter(labels, generated, %s) "
"+ attrfilter(labels, unittest-library, %s) "
"- attrfilter(labels, no_pyre, %s)",
"//foo/bar/...",
"//bar:baz",
],
)
# pyre-fixme[56]: Pyre was not able to infer the type of argument
# `tools.pyre.client.source_database_buck_builder` to decorator factory
# `unittest.mock.patch.object`.
@patch.object(source_database_buck_builder, "_buck")
def test_query_targets(self, buck: MagicMock) -> None:
query_output = {
"//foo/bar/...": ["//foo/bar:baz", "//foo/bar:tests-library"],
"//bar:baz": [
"//bar:baz",
"//bar:tests-mypy_ini",
"//bar:tests-library-testmodules-lib",
],
}
buck.return_value = json.dumps(query_output)
self.assertEqual(
source_database_buck_builder._query_targets(
["//foo/bar/...", "//bar:baz"],
isolation_prefix=None,
mode=None,
buck_root=Path(""),
),
["//foo/bar:baz", "//foo/bar:tests-library", "//bar:baz"],
)
def test_buck_build_arguments(self) -> None:
self.assertEqual(
source_database_buck_builder._get_buck_build_arguments(
mode="opt", targets=["//foo/bar:baz", "//foo/bar:tests-library"]
),
[
"@mode/opt",
"--show-full-json-output",
"//foo/bar:baz#source-db",
"//foo/bar:tests-library#source-db",
],
)
# pyre-fixme[56]: Argument `json` to decorator factory
# `unittest.mock.patch.object` could not be resolved in a global scope.
@patch.object(json, "loads")
@patch.object(Path, "read_text")
def test_load_source_databases(
self, read_text: MagicMock, loads: MagicMock
) -> None:
expected_database = {
"sources": {"bar.py": "some/other/bar.py"},
"dependencies": {"foo.py": "some/foo.py"},
}
loads.return_value = expected_database
source_databases = source_database_buck_builder._load_source_databases(
{"//foo:bar#source-db": "/some/bar#source-db/db.json"}
)
self.assertEqual(source_databases, {"//foo:bar#source-db": expected_database})
def test_merge_source_databases(self) -> None:
actual = source_database_buck_builder._merge_source_databases(
{
"hello": {
"sources": {
"foo.py": "foo.py",
"duplicate.py": "duplicate_in_hello.py",
},
"dependencies": {
"bar.pyi": "buck-out/bar.pyi",
"bar.cpp": "bar.cpp",
},
},
"foo": {
"sources": {},
"dependencies": {
"foo2.pyi": "buck-out/foo2.pyi",
"bar2.cpp": "bar2.cpp",
"duplicate.py": "duplicate_in_foo.py",
"__manifest__.py": "__manifest__.py",
"__test_modules__.py": "__test_modules__.py",
"__test_main__.py": "__test_main__.py",
},
},
}
)
self.assertEqual(
actual,
{
"foo.py": "foo.py",
"duplicate.py": "duplicate_in_foo.py",
"bar.pyi": "buck-out/bar.pyi",
"foo2.pyi": "buck-out/foo2.pyi",
},
)
# pyre-fixme[56]: Argument `shutil` to decorator factory
# `unittest.mock.patch.object` could not be resolved in a global scope.
@patch.object(shutil, "rmtree")
@patch.object(Path, "exists")
@patch.object(Path, "mkdir")
@patch.object(Path, "symlink_to")
def test_build_link_tree(
self,
symlink_to: MagicMock,
make_directory: MagicMock,
exists: MagicMock,
remove_tree: MagicMock,
) -> None:
source_database_buck_builder._build_link_tree(
{"foo.py": "foo.py", "bar/baz.pyi": "buck-out/bar.pyi"},
Path("foo_directory"),
Path("/root"),
)
self.assertEqual(
make_directory.call_args_list,
[
call(parents=True),
call(parents=True, exist_ok=True),
call(parents=True, exist_ok=True),
],
)
self.assertEqual(
symlink_to.call_args_list,
[call(Path("/root/foo.py")), call(Path("/root/buck-out/bar.pyi"))],
)
@patch.object(source_database_buck_builder, "_build_link_tree")
@patch.object(source_database_buck_builder, "_load_source_databases")
@patch.object(source_database_buck_builder, "_build_targets")
# pyre-fixme[56]: Argument
# `tools.pyre.tools.buck_project_builder.source_database_buck_builder` to
# decorator factory `unittest.mock.patch.object` could not be resolved in a global
# scope.
@patch.object(source_database_buck_builder, "_query_targets")
def test_build(
self,
query_targets: MagicMock,
build_targets: MagicMock,
load_source_databases: MagicMock,
build_link_tree: MagicMock,
) -> None:
load_source_databases.return_value = {
"hello": {"sources": {"foo.py": "foo.py"}, "dependencies": {}},
"foo": {"sources": {}, "dependencies": {"bar.pyi": "buck-out/bar.pyi"}},
}
source_database_buck_builder.build(
["//foo/bar/..."],
output_directory=Path("output_directory"),
buck_root=Path("buck_root"),
isolation_prefix=None,
mode=None,
)
query_targets.assert_called_once()
build_targets.assert_called_once()
build_link_tree.assert_called_once_with(
{"foo.py": "foo.py", "bar.pyi": "buck-out/bar.pyi"},
Path("output_directory"),
Path("buck_root"),
)
def test_normalize_specification(self) -> None:
self.assertEqual(
source_database_buck_builder._normalize_specification("foo/bar:baz"),
"//foo/bar:baz",
)
self.assertEqual(
source_database_buck_builder._normalize_specification(
"some_root//foo/bar:baz"
),
"some_root//foo/bar:baz",
)
def test_load_json__no_extra_data(self) -> None:
self.assertEqual(
source_database_buck_builder._load_json_ignoring_extra_data(
"""
{
"a": "b",
"a2": "b2"
}
"""
),
{"a": "b", "a2": "b2"},
)
def test_load_json__extra_data(self) -> None:
self.assertEqual(
source_database_buck_builder._load_json_ignoring_extra_data(
"""
{
"a": "b",
"a2": "b2"
}
Some error message.
Some error message.
"""
),
{"a": "b", "a2": "b2"},
)
def test_load_json__exception(self) -> None:
with self.assertRaises(json.JSONDecodeError):
source_database_buck_builder._load_json_ignoring_extra_data(
"""
Malformed JSON.
"""
)
|
11,208 | d56f0054b53c0ee21960f54ffd57bcb0557fe327 | from schematics import Model
from schematics.types import StringType, FloatType
class ConvertModel(Model):
currency_from = StringType(required=True)
currency_to = StringType(required=True, default="USD")
amount = FloatType(required=True)
class SetCurrencyModel(Model):
currency_name = StringType(required=True)
currency_rate = FloatType(required=True)
|
11,209 | aa95f2f39d80895607271f715762d84ab37a7bfa | #CODE HAS ISSUES 1/25/11 (TIF)
# Adding and scaling vectors
# This is the same example except you can make
# this easier to understand by using just two
# values: the initial point (a P3) and the direction
# (another P3). You can add two P3's together and multiply
# a P3 by a number.
from Panda import *
# Replace these by two P3's
p0 = P3(0,0,0)
vel = P3(1,1,1)
# This needs to be rewritten to use
# P3's
def f(t):
b = panda(position = f(time))
camera.location = P3(0, -10, 0)
# Try changing the HPR instead of the position
# Try using a slider to allow the value to be changed while the
# program is running
start() |
11,210 | a00c42d08869d3440e5a655fef6a588fa9e360c4 | import os
import json
import tomopy
import dxchange
import numpy as np
from tomopy_cli import log
from tomopy_cli import file_io
from tomopy_cli import prep
from tomopy_cli import beamhardening
def all(proj, flat, dark, params, sino):
# zinger_removal
proj, flat = zinger_removal(proj, flat, params)
if (params.dark_zero):
dark *= 0
log.warning(' *** *** dark fields are ignored')
# normalize
data = flat_correction(proj, flat, dark, params)
# remove stripes
data = remove_stripe(data, params)
# Perform beam hardening. This leaves the data in pathlength.
if params.beam_hardening_method == 'standard':
data = beamhardening_correct(data, params, sino)
else:
# phase retrieval
data = phase_retrieval(data, params)
# minus log
data = minus_log(data, params)
# remove outlier
data = remove_nan_neg_inf(data, params)
return data
def remove_nan_neg_inf(data, params):
log.info(' *** remove nan, neg and inf')
if(params.fix_nan_and_inf == True):
log.info(' *** *** ON')
log.info(' *** *** replacement value %f ' % params.fix_nan_and_inf_value)
data = tomopy.remove_nan(data, val=params.fix_nan_and_inf_value)
data = tomopy.remove_neg(data, val=params.fix_nan_and_inf_value)
data[np.where(data == np.inf)] = params.fix_nan_and_inf_value
else:
log.warning(' *** *** OFF')
return data
def zinger_removal(proj, flat, params):
log.info(" *** zinger removal")
if (params.zinger_removal_method == 'standard'):
log.info(' *** *** ON')
log.info(" *** *** zinger level projections: %d" % params.zinger_level_projections)
log.info(" *** *** zinger level white: %s" % params.zinger_level_white)
log.info(" *** *** zinger_size: %d" % params.zinger_size)
proj = tomopy.misc.corr.remove_outlier(proj, params.zinger_level_projections, size=params.zinger_size, axis=0)
flat = tomopy.misc.corr.remove_outlier(flat, params.zinger_level_white, size=params.zinger_size, axis=0)
elif(params.zinger_removal_method == 'none'):
log.warning(' *** *** OFF')
return proj, flat
def flat_correction(proj, flat, dark, params):
log.info(' *** normalization')
if(params.flat_correction_method == 'standard'):
data = tomopy.normalize(proj, flat, dark, cutoff=params.normalization_cutoff)
log.info(' *** *** ON %f cut-off' % params.normalization_cutoff)
elif(params.flat_correction_method == 'air'):
data = tomopy.normalize_bg(proj, air=params.air)
log.info(' *** *** air %d pixels' % params.air)
elif(params.flat_correction_method == 'none'):
data = proj
log.warning(' *** *** normalization is turned off')
return data
def remove_stripe(data, params):
log.info(' *** remove stripe:')
if(params.remove_stripe_method == 'fw'):
log.info(' *** *** fourier wavelet')
data = tomopy.remove_stripe_fw(data,level=params.fw_level,wname=params.fw_filter,sigma=params.fw_sigma,pad=params.fw_pad)
log.info(' *** *** *** fw level %d ' % params.fw_level)
log.info(' *** *** *** fw wname %s ' % params.fw_filter)
log.info(' *** *** *** fw sigma %f ' % params.fw_sigma)
log.info(' *** *** *** fw pad %r ' % params.fw_pad)
elif(params.remove_stripe_method == 'ti'):
log.info(' *** *** titarenko')
data = tomopy.remove_stripe_ti(data, nblock=params.ti_nblock, alpha=params.ti_alpha)
log.info(' *** *** *** ti nblock %d ' % params.ti_nblock)
log.info(' *** *** *** ti alpha %f ' % params.ti_alpha)
elif(params.remove_stripe_method == 'sf'):
log.info(' *** *** smoothing filter')
data = tomopy.remove_stripe_sf(data, size==params.sf_size)
log.info(' *** *** *** sf size %d ' % params.sf_size)
elif(params.remove_stripe_method == 'none'):
log.warning(' *** *** OFF')
return data
def phase_retrieval(data, params):
log.info(" *** retrieve phase")
if (params.retrieve_phase_method == 'paganin'):
log.info(' *** *** paganin')
log.info(" *** *** pixel size: %s" % params.pixel_size)
log.info(" *** *** sample detector distance: %s" % params.propagation_distance)
log.info(" *** *** energy: %s" % params.energy)
log.info(" *** *** alpha: %s" % params.retrieve_phase_alpha)
data = tomopy.retrieve_phase(data,pixel_size=(params.pixel_size*1e-4),dist=(params.propagation_distance/10.0),energy=params.energy, alpha=params.retrieve_phase_alpha,pad=True)
elif(params.retrieve_phase_method == 'none'):
log.warning(' *** *** OFF')
return data
def minus_log(data, params):
log.info(" *** minus log")
if(params.minus_log):
log.info(' *** *** ON')
data = tomopy.minus_log(data)
else:
log.warning(' *** *** OFF')
return data
def beamhardening_correct(data, params, sino):
"""
Performs beam hardening corrections.
Inputs
data: data normalized already for bright and dark corrections.
params: processing parameters
sino: row numbers for these data
"""
log.info(" *** correct beam hardening")
data_dtype = data.dtype
#Correct for centerline of fan
data = beamhardening.fcorrect_as_pathlength_centerline(data)
#Make an array of correction factors
beamhardening.center_row = params.center_row
log.info(" *** *** Beam hardening center row = {:f}".format(beamhardening.center_row))
angles = np.abs(np.arange(sino[0], sino[1])- beamhardening.center_row).astype(data_dtype)
angles *= beamhardening.pixel_size / beamhardening.d_source
log.info(" *** *** angles from {0:f} to {1:f} urad".format(angles[0], angles[-1]))
correction_factor = beamhardening.angular_spline(angles).astype(data_dtype)
if len(data.shape) == 2:
return data* correction_factor[:,None]
else:
return data * correction_factor[None, :, None]
|
11,211 | 50024dda9e6169a0916ec4bd7cf2f8d4ed861de1 | def facto(number):
number = int(number)
if number <= 1:
return 1
else:
return number * facto(number - 1)
# note: f strings don't allow escape sequences in substrings
print(f"{facto(input('Enter a number:'))} is the factorial")
|
11,212 | c3414efc519aab0cd8e56c53f0b007873775a983 | from PySide6.QtCore import *
from PySide6.QtWidgets import *
import sys
class myLabel(QLabel):
clicked = Signal()
def mouseReleaseEvent(self, QMouseEvent):
if QMouseEvent.button() == Qt.LeftButton:
self.clicked.emit()
class Wind(QDialog):
clicked = Signal()
def __init__(self):
super().__init__()
self.label = myLabel()
self.label.setText('This is a text label')
vb = QVBoxLayout()
vb.addWidget(self.label)
self.setLayout(vb)
self.label.clicked.connect(self.showData)
self.clicked.connect(self.showData1)
def showData(self):
print('ok')
def showData1(self):
print('OOK')
if __name__ == '__main__':
app = QApplication(sys.argv)
win = Wind()
win.show()
sys.exit(app.exec_())
|
11,213 | 3c10debca638eac97dad7e32a71469b0a366ee48 | from ast import literal_eval
from enum import Enum
import numpy as np
from evaluation.relative_error.rerr_reduction import rel_err_reduction
class Metric(Enum):
# Relative Error Reduction
RERR_RED = 'nae_reduction'
PRED_BASELINE = 'pred_baseline'
PRED_MEAN = 'pred_mean'
ACTUAL_MEAN = 'actual_mean'
RERR_RED_TUPLES = 'nae_t_reduction'
SUM_TUPLES = 'sum_tuples'
ACTUAL_NO_TUPLES = 'actual_no_tuples'
BASELINE_NO_TUPLES = 'baseline_no_tuples'
# Query Processing
QP_IMPROVEMENT = 'qp_improvement'
def __str__(self):
return self.value
def literal_eval_second_list(eval_str):
eval_str = eval_str.replace("<RemovalMethod.CATEGORICAL_PROB_BIAS: 'categorical_prob_bias'>",
"'categorical_prob_bias'")
eval_str = eval_str.replace("<RemovalMethod.BIAS: 'bias'>", "'bias'")
eval_list = literal_eval(eval_str)
if len(eval_list) == 2:
eval_list = [ev for ev in eval_list if ev[0] == True]
return eval_list[0]
def relative_error(true, predicted, debug=False):
true = float(true)
predicted = float(predicted)
if true == 0:
return np.nan
relative_error = (true - predicted) / true
if debug:
print(f"\t\tpredicted : {predicted:.2f}")
print(f"\t\ttrue : {true:.2f}")
print(f"\t\trelative_error: {100 * relative_error:.2f}%")
return abs(relative_error)
def avg_relative_error(r_dict, r_dict_ground_truth):
avg_rel_errors = []
for projection in r_dict_ground_truth.keys():
predicted = r_dict.get(projection)
true = r_dict_ground_truth[projection]
if predicted is None:
continue
avg_rel_errors.append(relative_error(true, predicted))
return np.average(avg_rel_errors)
def result_dict(result_tuples):
return {t[:-1]: t[-1] for t in result_tuples}
def sum_dict(avg_tuples, count_tuples):
return {t_avg[:-1]: t_avg[-1] * t_count[-1] for t_avg, t_count in zip(avg_tuples, count_tuples)}
def result_dicts(count, avg):
r_count = result_dict(count)
r_avg = None
r_sum = None
if avg is not None:
r_avg = result_dict(avg)
r_sum = sum_dict(avg, count)
return r_count, r_avg, r_sum
def avg_errors(baseline, pred, ground_truth):
avg_improvement = None
sum_improvement = None
if isinstance(pred[0], list):
b_count, b_avg, b_sum = result_dicts(*baseline)
r_count, r_avg, r_sum = result_dicts(*pred)
gt_count, gt_avg, gt_sum = result_dicts(*ground_truth)
count_improvement = avg_relative_error(b_count, gt_count) - avg_relative_error(r_count, gt_count)
if r_avg is not None:
avg_improvement = avg_relative_error(b_avg, gt_avg) - avg_relative_error(r_avg, gt_avg)
sum_improvement = avg_relative_error(b_sum, gt_sum) - avg_relative_error(r_sum, gt_sum)
else:
b_count, b_avg = baseline
r_count, r_avg = pred
gt_count, gt_avg = ground_truth
count_improvement = relative_error(b_count, gt_count) - relative_error(r_count, gt_count)
if r_avg is not None:
b_sum = b_count * b_avg
r_sum = r_count * r_avg
gt_sum = gt_count * gt_avg
avg_improvement = relative_error(b_avg, gt_avg) - relative_error(r_avg, gt_avg)
sum_improvement = relative_error(b_sum, gt_sum) - relative_error(r_sum, gt_sum)
return max(count_improvement, 0), avg_improvement, sum_improvement
def eval_column(row, metric=None):
evaluation_method = row['evaluation_method']
eval_list = literal_eval_second_list(row['evaluation'])
if evaluation_method == 'relative_error':
fp_opt, removal_method, removal_attr, _pred_baseline, _pred_mean, _actual_mean, _actual_no_tuples, _baseline_no_tuples, _sum_tuples = eval_list
if metric == Metric.RERR_RED:
return rel_err_reduction(_pred_baseline, _pred_mean, _actual_mean) * 100
elif metric == Metric.PRED_BASELINE:
return _pred_baseline
elif metric == Metric.PRED_MEAN:
return _pred_mean
elif metric == Metric.ACTUAL_MEAN:
return _actual_mean
elif metric == Metric.RERR_RED_TUPLES:
# we know that there are at least the number of tuples we already have
return max(rel_err_reduction(_baseline_no_tuples, _sum_tuples, _actual_no_tuples) * 100, 0)
elif metric == Metric.SUM_TUPLES:
return _sum_tuples
elif metric == Metric.ACTUAL_NO_TUPLES:
return _actual_no_tuples
elif metric == Metric.BASELINE_NO_TUPLES:
return _baseline_no_tuples
elif evaluation_method == 'aqp':
results = dict()
for query_id, pred, baseline, ground_truth in eval_list[-1]:
results[query_id] = avg_errors(baseline, pred, ground_truth)
return results
return None
|
11,214 | bcb6e4bca061fcc0dce6428bd61926deb5a4838a | import re
text = """ This PEP contains the index of all Python Enhancement Proposals,
known as PEPs. PEP numbers are assigned by the PEP editors, and
once assigned are never changed[1]. The Mercurial history[2] of
the PEP texts represent their historical record. """
pattern = r'hist+'
match_obj = re.findall(pattern, text)
match_obj2 = re.search(pattern, text)
print(match_obj)
print(match_obj2.group())
|
11,215 | 1017fc3e825e41870b0f0284bf6512e2f59f2d5c | import csv
product=[]
order=[]
relationship=[]
with open("products.csv") as pr_csv:
pr_reader=csv.reader(pr_csv,delimiter=',')
for row in pr_reader:
product.append(row)
pr_csv.close()
with open("orders.csv") as or_csv:
or_reader=csv.reader(or_csv,delimiter=',')
next(or_reader)
for row in or_reader:
order.append(row)
or_csv.close()
with open("order-details.csv") as rr_csv:
rr_reader=csv.reader(rr_csv,delimiter=',')
next(rr_reader)
for row in rr_reader:
relationship.append(row)
rr_csv.close()
for row in relationship:
print("relation : ",row,"\n")
for row_p in product:
for row_o in order:
if row[0]==row_o[0] and row[1]==row_p[0]:
print(row_p,"---------------",row_o,"\n")
|
11,216 | 6b19084f1f338c8bef5087b49b104b8b6cbb3454 | #! /usr/bin/env python
# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
## @Package test_trfExceptions.py
# @brief Unittests for trfExceptions.py
# @author graeme.andrew.stewart@cern.ch
# @version $Id: test_trfExceptions.py 570543 2013-11-14 21:52:16Z graemes $
import unittest
import logging
msg = logging.getLogger(__name__)
# Allowable to import * from the package for which we are the test suite
from PyJobTransforms.trfExceptions import *
## Unittests for this module
class trfExceptionTests(unittest.TestCase):
def test_baseException(self):
e = TransformException(1, 'A simple exception message')
self.assertTrue(isinstance(e, TransformException))
def test_inheritedException(self):
e = TransformLogfileErrorException(1, 'A simple exception message')
self.assertTrue(isinstance(e, TransformLogfileErrorException))
self.assertTrue(isinstance(e, TransformException))
def test_getters(self):
e = TransformLogfileErrorException(1, 'A simple exception message')
self.assertEqual(e.errCode, 1)
self.assertEqual(e.errMsg, 'A simple exception message')
def test_errMsgSetter(self):
e = TransformLogfileErrorException(1, 'A simple exception message')
e.errMsg = 'A new message'
self.assertEqual(e.errMsg, 'A new message')
if __name__ == '__main__':
unittest.main()
|
11,217 | 1e28d0dc0724c76a5ac09f63e9073ac6095e04cf | '''
Created on Oct 31, 2012
@author: jason
'''
import pymc
from math import log
import random as rand
import numpy
from itertools import izip
import _Survival
class Event(object):
'''Represents a single event that can happen once. The typical example is death.
time - (float) The time to death or censorship
censored - (bool) True if the event was right censored; False otherwise.
'''
def __init__(self, time, censored = False):
self.time = time
self.censored = censored
class MultiEvent(object):
'''
Represents a repeatable event, such as a heart attack.
uncensored_events - (list) A list of Event objects, all of which have censored=False.
'''
def __init__(self, uncensored_events, decumulate_times=False):
self.uncensored_events = uncensored_events
assert(not any([event.censored for event in self.uncensored_events]))
if decumulate_times:
self.uncensored_events = self.uncensored_events.sort(key=lambda event: event.time)
last_time = 0.0
for event in self.uncensored_events:
event.time = event.time - last_time
def __iter__(self):
return iter(self.uncensored_events)
def __len__(self):
return len(self.uncensored_events)
@property
def time(self):
if self.uncensored_events:
return sum([event.time for event in self.uncensored_events])
else:
return 0.0
class MultiCost(object):
def __init__(self, costs=[]):
self.costs = costs
def __iter__(self):
return iter(self.costs)
def __len__(self):
return len(self.costs)
@property
def cost(self):
if self.costs:
return sum([cost for cost in self.costs if cost is not None])
else:
return 0.0
class EventProposer(pymc.Metropolis):
'''
Simple proposal distribution for Event objects for use in Metropolis samplers.
'''
def __init__(self, stochastic, *args, **kwargs):
return super(self.__class__, self).__init__(stochastic, *args, **kwargs)
def propose(self):
tau = 1./(self.adaptive_scale_factor * self.proposal_sd)**2
time = pymc.rnormal(self.stochastic.value.time, tau)
censored = rand.random() > 0.5
self.stochastic.value = Event(time, censored)
def competence(self, stochastic):
if stochastic.dtype == Event:
return 3
else:
return 0
class MultiEventProposer(pymc.Metropolis):
'''
Simple proposal distribution for MultiEvent objects for use in Metropolis samplers.
'''
def __init__(self, stochastic, *args, **kwargs):
return super(self.__class__, self).__init__(stochastic, *args, **kwargs)
def propose(self):
tau = 1./(self.adaptive_scale_factor * self.proposal_sd)**2
time = pymc.rnormal(self.stochastic.value.time, tau)
n = pymc.rnormal(len(self.stochastic.value), tau)
if n <= 0:
n = 0
times = [rand.random() for _ in range(n)]
total = float(sum(times))
times = [item*time/total for item in times]
events = [Event(time=item, censored=False) for item in times]
self.stochastic.value = MultiEvent(events)
def competence(self, stochastic):
if stochastic.dtype == MultiEvent:
return 3
else:
return 0
class NegativeHazardError(Exception):
pass
def ConstantHazardMultiEventObservation(name, hazard, period, observed = False, value = None):
'''
Create a pymc Stochastic representing a Poisson process. The values produced are
MultiEvent objects.
'''
def multievent_logp(value, hazard, period):
'''Scalar valued function of scalars.
value - (MultiEvent) The event times
hazard - (float) The hazard rate
period - (float) The duration of the observation period
'''
result = 0
remaining_time = period
for event in value:
result += log(hazard) - hazard * event.time
remaining_time = remaining_time - event.time
if remaining_time < 0:
return 0
result += -1 * hazard * remaining_time
return result
__logp = numpy.vectorize(multievent_logp,otypes=[MultiEvent])
#__logp = numpy.frompyfunc(multievent_logp, 3, 1)
def logp(value, hazard, period):
'''
Array valued function of arrays.
value - array of MultiEvent objects
hazard - array of hazard rates
period - array of observation period durations
'''
if type(value) is list:
value = numpy.array(value)
return numpy.sum(__logp(value, hazard, period))
def multievent_random(hazard, period):
'''
Scalar valued function of scalars. Generates a random MultiEvent from the
distribution.
hazard - (float) The hazard rate
period - (float) The duration of the observation period
'''
if hazard == 0:
return MultiEvent([])
if hazard < 0.0:
raise NegativeHazardError
remaining_time = period
events = []
while True:
F = rand.random()
time = -1 * log(1-F) / float(hazard)
if time > remaining_time:
break
remaining_time -= time
events.append(Event(time=time,censored=False))
return MultiEvent(events)
__random = numpy.vectorize(multievent_random,otypes=[MultiEvent])
def random(hazard, period):
'''
Array valued function of arrays. Generated random MultiEvents from the
distribution.
hazard - Array of floats representing hazard rates
period - Array of floats representing observation period durations
'''
return __random(hazard, period)
dtype = MultiEvent
#Create the pymc Stochastic object
result = pymc.Stochastic(logp = logp,
doc = 'A constant hazard survival time node for multiple events.',
name = name,
parents = {'hazard':hazard,'period':period},
random = random,
trace = True,
value = value,
dtype = dtype,
rseed = 1,
observed = observed,
cache_depth = 2,
plot = True,
verbose = 0)
return result
def ConstantHazardEventObservation(name, hazard, period, observed = False, value = None):
'''
Create a pymc Stochastic representing a constant hazard failure process. The values
produced are Event objects.
'''
def logp(value, hazard, period):
'''
Array valued function of arrays.
value - Array of event objects
hazard - Array of floats representing hazard rates
period - Array of floats representing observation period durations
'''
if type(value) is list:
value = numpy.array(value)
time = numpy.empty_like(value,dtype=numpy.float)
time.flat = [val.time for val in value.flat]
censored = numpy.empty_like(value,dtype=numpy.float)
censored.flat = [float(val.censored) for val in value.flat]
return numpy.sum((time < period) * ((1.-censored)*numpy.log(hazard) - time*hazard))
def random(hazard, period):
'''
Array valued function of arrays.
hazard - Array of floats representing hazard rates
period - Array of floats representing observation period durations
'''
if len(hazard.shape) > len(period.shape):
F = numpy.empty_like(hazard.shape)
else:
F = numpy.empty_like(period.shape)
#TODO: What if a dimension has length 1?
F = numpy.random.uniform(size = F.shape)
time = -1. * numpy.log(1-F) / hazard
censored = time >= period
return [Event(t, c) for t, c in izip(time,censored)]
dtype = Event
result = pymc.Stochastic(logp = logp,
doc = 'A constant hazard survival time node for single events.',
name = name,
parents = {'hazard':hazard,'period':period},
random = random,
trace = True,
value = value,
dtype = dtype,
rseed = 1,
observed = observed,
cache_depth = 2,
plot = True,
verbose = 0)
return result
# The above with C calls
class EventProposer_C(pymc.Metropolis):
'''
Simple proposal distribution for Event objects for use in Metropolis samplers.
'''
def __init__(self, stochastic, *args, **kwargs):
return super(self.__class__, self).__init__(stochastic, *args, **kwargs)
def propose(self):
tau = 1./(self.adaptive_scale_factor * self.proposal_sd)**2
time = pymc.rnormal(self.stochastic.value.time, tau)
censored = rand.random() > 0.5
self.stochastic.value = _Survival._event(time, censored)
def competence(self, stochastic):
if stochastic.dtype == _Survival._event:
return 3
else:
return 0
class MultiEventProposer_C(pymc.Metropolis):
'''
Simple proposal distribution for MultiEvent objects for use in Metropolis samplers.
'''
def __init__(self, stochastic, *args, **kwargs):
return super(self.__class__, self).__init__(stochastic, *args, **kwargs)
def propose(self):
tau = 1./(self.adaptive_scale_factor * self.proposal_sd)**2
time = pymc.rnormal(self.stochastic.value.time, tau)
n = pymc.rnormal(len(self.stochastic.value), tau)
if n <= 0:
n = 0
times = [rand.random() for _ in range(n)]
total = float(sum(times))
times = [item*time/total for item in times]
events = [_Survival._event(time=item, censored=False) for item in times]
events = numpy.array(events)
self.stochastic.value = _Survival._multiEvent(events)
def competence(self, stochastic):
if stochastic.dtype == _Survival._multiEvent:
return 3
else:
return 0
def ConstantHazardMultiEventObservation_C(name, hazard, period, observed = False, value = None):
'''
Create a pymc Stochastic representing a Poisson process. The values produced are
MultiEvent objects.
'''
dtype = _Survival._multiEvent
#Create the pymc Stochastic object
result = pymc.Stochastic(logp = _Survival._multiEvent_logp,
doc = 'A constant hazard survival time node for multiple events.',
name = name,
parents = {'hazard':hazard,'period':period},
random = _Survival._multiEvent_random,
trace = True,
value = value,
dtype = dtype,
rseed = 1,
observed = observed,
cache_depth = 2,
plot = False,
verbose = 0)
return result
def ConstantHazardEventObservation_C(name, hazard, period, observed = False, value = None):
'''
Create a pymc Stochastic representing a constant hazard failure process. The values
produced are Event objects.
'''
dtype = _Survival._event
result = pymc.Stochastic(logp = _Survival._event_logp,
doc = 'A constant hazard survival time node for single events.',
name = name,
parents = {'hazard':hazard,'period':period},
random = _Survival._event_random,
trace = True,
value = value,
dtype = dtype,
rseed = 1,
observed = observed,
cache_depth = 2,
plot = False,
verbose = 1)
return result
|
11,218 | 80982caa189b21086a39995ed3819501e730771a | import re
import urllib
from kafka import KafkaProducer, KafkaClient
import sys
import json
from kafka.client import KafkaClient
from kafka.producer import KafkaProducer
from twitter import *
class Producer(object):
def __init__(self, addr):
self.producer = KafkaProducer(bootstrap_servers=addr)
def produce_msgs(self, source_symbol):
topic ='twitter_stream'
opener = urllib.URLopener()
myurl = "https://s3-us-west-2.amazonaws.com/timo-twitter-data/2016-02-08-11-57_tweets.txt"
myfile = opener.open(myurl)
for line in myfile:
try:
d = json.loads(line)
print d['text']
for i in range(0,500):
self.producer.send(topic,json.dumps(d['text']))
except:
print 'why'
if __name__ == "__main__":
args = sys.argv
ip_addr = str(args[1])
partition_key = str(args[2])
prod = Producer(ip_addr)
prod.produce_msgs(partition_key)
|
11,219 | 016ba92883045e0be008ae8da4e28fd45fa81f6f | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('bookaround', '0025_auto_20140927_1958'),
]
operations = [
migrations.AlterField(
model_name='educatorprofile',
name='user_profile',
field=models.OneToOneField(related_name=b'educator_profile', to='bookaround.UserProfile'),
),
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.OneToOneField(related_name=b'profile', to=settings.AUTH_USER_MODEL),
),
]
|
11,220 | 2e847ec1f6a8b9379f3a7bd47929c3d1549996e4 | #!/usr/bin/env python
import rospy
import time
from geometry_msgs.msg import Twist
from rosgraph_msgs.msg import Clock
from std_msgs.msg import Bool
class ActionPublisher():
def __init__(self):
if rospy.get_param("train_mode"):
raise Exception("This node should be used solely in eval mode!")
rospy.init_node('action_publisher', anonymous = True)
self._step_size = rospy.get_param("step_size")
self._update_rate = rospy.get_param("update_rate")
# real time second in sim time
self._real_second_in_sim = self._step_size * self._update_rate
self._action_publish_rate = rospy.get_param("/robot_action_rate")
# apply rate in sim time
rate = (1/self._action_publish_rate)/self._real_second_in_sim
ns_prefix = "" if '/single_env' in rospy.get_param_names() else "/eval_sim/"
self._pub_cmd_vel = rospy.Publisher(
f"{ns_prefix}cmd_vel", Twist, queue_size=1)
self._pub_cycle_trigger = rospy.Publisher(
f"{ns_prefix}next_cycle", Bool, queue_size=1)
self._sub = rospy.Subscriber(
f"{ns_prefix}cmd_vel_pub", Twist, self.callback_receive_cmd_vel, queue_size=1)
# to measure sim time
# self._clock_sub = rospy.Subscriber(
# f"{ns_prefix}clock", Clock, self.callback_clock)
# last = 0
self._action = Twist()
self._signal = Bool()
self._clock = Clock().clock.to_sec()
last_action = self._action
while not rospy.is_shutdown():
if self._sub.get_num_connections() < 1:
print(f"ActionPublisher: No publisher to {ns_prefix}cmd_vel_pub yet.. ")
time.sleep(1)
continue
self._pub_cmd_vel.publish(self._action)
self._pub_cycle_trigger.publish(self._signal)
print(f"Published same action: {last_action==self._action}")
last_action = self._action
time.sleep(rate)
# print(f"sim time between cmd_vel: {self._clock - last}")
# last = self._clock
def callback_receive_cmd_vel(self, msg_cmd_vel: Twist):
self._action = msg_cmd_vel
def callback_clock(self, msg_clock: Clock):
self._clock = msg_clock.clock.to_sec()
if __name__ == '__main__':
try:
ActionPublisher()
except rospy.ROSInterruptException:
pass
|
11,221 | 9032db9e403c94854a9c06c3a38df0c57275d8c7 | import datetime as dt
from typing import Optional, Iterable, Any, Dict, Tuple, Sequence
from sqlalchemy import (
select,
delete,
update,
func,
literal_column,
)
from sqlalchemy.dialects.postgresql import insert, aggregate_order_by
from sqlalchemy.orm import selectinload, defer
from aleph.db.models import AggregateDb, AggregateElementDb
from aleph.types.db_session import DbSession
def aggregate_exists(session: DbSession, key: str, owner: str) -> bool:
return AggregateDb.exists(
session=session,
where=(AggregateDb.key == key) & (AggregateDb.owner == owner),
)
def get_aggregates_by_owner(
session: DbSession, owner: str, keys: Optional[Sequence[str]] = None
) -> Iterable[Tuple[str, Dict[str, Any]]]:
where_clause = AggregateDb.owner == owner
if keys:
where_clause = where_clause & AggregateDb.key.in_(keys)
select_stmt = (
select(AggregateDb.key, AggregateDb.content)
.where(where_clause)
.order_by(AggregateDb.key)
)
return session.execute(select_stmt).all() # type: ignore
def get_aggregate_by_key(
session: DbSession,
owner: str,
key: str,
with_content: bool = True,
) -> Optional[AggregateDb]:
options = []
if not with_content:
options.append(defer(AggregateDb.content))
select_stmt = select(AggregateDb).where(
(AggregateDb.owner == owner) & (AggregateDb.key == key)
)
return (
session.execute(
select_stmt.options(
*options,
selectinload(AggregateDb.last_revision),
)
)
).scalar()
def get_aggregate_content_keys(
session: DbSession, owner: str, key: str
) -> Iterable[str]:
return AggregateDb.jsonb_keys(
session=session,
column=AggregateDb.content,
where=(AggregateDb.key == key) & (AggregateDb.owner == owner),
)
def get_aggregate_elements(
session: DbSession, owner: str, key: str
) -> Iterable[AggregateElementDb]:
select_stmt = (
select(AggregateElementDb)
.where((AggregateElementDb.key == key) & (AggregateElementDb.owner == owner))
.order_by(AggregateElementDb.creation_datetime)
)
return (session.execute(select_stmt)).scalars()
def insert_aggregate(
session: DbSession,
key: str,
owner: str,
content: Dict[str, Any],
creation_datetime: dt.datetime,
last_revision_hash: str,
) -> None:
insert_stmt = insert(AggregateDb).values(
key=key,
owner=owner,
content=content,
creation_datetime=creation_datetime,
last_revision_hash=last_revision_hash,
dirty=False,
)
session.execute(insert_stmt)
def update_aggregate(
session: DbSession,
key: str,
owner: str,
content: Dict[str, Any],
creation_datetime: dt.datetime,
last_revision_hash: str,
prepend: bool = False,
) -> None:
merged_content = (
content + AggregateDb.content if prepend else AggregateDb.content + content
)
update_stmt = (
update(AggregateDb)
.values(
content=merged_content,
creation_datetime=creation_datetime,
last_revision_hash=last_revision_hash,
)
.where((AggregateDb.key == key) & (AggregateDb.owner == owner))
)
session.execute(update_stmt)
def insert_aggregate_element(
session: DbSession,
item_hash: str,
key: str,
owner: str,
content: Dict[str, Any],
creation_datetime: dt.datetime,
) -> None:
insert_stmt = insert(AggregateElementDb).values(
item_hash=item_hash,
key=key,
owner=owner,
content=content,
creation_datetime=creation_datetime,
)
session.execute(insert_stmt)
def count_aggregate_elements(session: DbSession, owner: str, key: str) -> int:
select_stmt = select(AggregateElementDb).where(
(AggregateElementDb.key == key) & (AggregateElementDb.owner == owner)
)
return session.execute(select(func.count()).select_from(select_stmt)).scalar_one()
def merge_aggregate_elements(elements: Iterable[AggregateElementDb]) -> Dict:
content = {}
for element in elements:
content.update(element.content)
return content
def mark_aggregate_as_dirty(session: DbSession, owner: str, key: str) -> None:
update_stmt = (
update(AggregateDb)
.values(dirty=True)
.where((AggregateDb.key == key) & (AggregateDb.owner == owner))
)
session.execute(update_stmt)
def refresh_aggregate(session: DbSession, owner: str, key: str) -> None:
# Step 1: use a group by to retrieve the aggregate content. This uses a custom
# aggregate function (see 78dd67881db4_jsonb_merge_aggregate.py).
select_merged_aggregate_subquery = (
select(
AggregateElementDb.key,
AggregateElementDb.owner,
func.min(AggregateElementDb.creation_datetime).label("creation_datetime"),
func.max(AggregateElementDb.creation_datetime).label(
"last_revision_datetime"
),
func.jsonb_merge(
aggregate_order_by(
AggregateElementDb.content, AggregateElementDb.creation_datetime
)
).label("content"),
)
.group_by(AggregateElementDb.key, AggregateElementDb.owner)
.where((AggregateElementDb.key == key) & (AggregateElementDb.owner == owner))
).subquery()
# Step 2: we miss the last revision hash, so we retrieve it through an additional
# join.
# TODO: is this really necessary? Could we just store the last revision datetime
# instead and avoid the join? Consider the case where two aggregate elements
# have the same timestamp.
select_stmt = select(
select_merged_aggregate_subquery.c.key,
select_merged_aggregate_subquery.c.owner,
select_merged_aggregate_subquery.c.creation_datetime,
select_merged_aggregate_subquery.c.content,
AggregateElementDb.item_hash,
literal_column("false").label("dirty"),
).join(
AggregateElementDb,
(select_merged_aggregate_subquery.c.key == AggregateElementDb.key)
& (select_merged_aggregate_subquery.c.owner == AggregateElementDb.owner)
& (
select_merged_aggregate_subquery.c.last_revision_datetime
== AggregateElementDb.creation_datetime
),
)
# Step 3: insert/update the aggregate.
insert_stmt = insert(AggregateDb).from_select(
["key", "owner", "creation_datetime", "content", "last_revision_hash", "dirty"],
select_stmt,
)
upsert_aggregate_stmt = insert_stmt.on_conflict_do_update(
constraint="aggregates_pkey",
set_={
"content": insert_stmt.excluded.content,
"creation_datetime": insert_stmt.excluded.creation_datetime,
"last_revision_hash": insert_stmt.excluded.last_revision_hash,
"dirty": insert_stmt.excluded.dirty,
},
)
session.execute(upsert_aggregate_stmt)
def delete_aggregate(session: DbSession, owner: str, key: str) -> None:
delete_aggregate_stmt = delete(AggregateDb).where(
(AggregateDb.key == key) & (AggregateDb.owner == owner)
)
session.execute(delete_aggregate_stmt)
def delete_aggregate_element(session: DbSession, item_hash: str) -> None:
delete_element_stmt = delete(AggregateElementDb).where(
AggregateElementDb.item_hash == item_hash
)
session.execute(delete_element_stmt)
|
11,222 | ef3b268d46e3f7ddfcb12048cb040533db6af125 | from lesson18.pages.base_page import BasePage
from lesson18.pages.locators import HeaderPageLocators
class HeaderPage(BasePage):
def search(self, txt):
""" Поиск по сайту """
self.__should_be_search_input()
self.__should_be_search_button()
self.__set_search_text(txt)
self.__start_search()
def __should_be_search_input(self):
""" Проверка видимости элемента на странице """
assert self.is_element_visible(*HeaderPageLocators.SEARCH_INPUT)
def __should_be_search_button(self):
""" Проверка видимости элемента на странице """
assert self.is_element_visible(*HeaderPageLocators.SEARCH_BUTTON)
def __set_search_text(self, txt):
""" Ввод текста в поле поиска """
search_input = self.is_element_visible(*HeaderPageLocators.SEARCH_INPUT)
self.clear_input(search_input)
self.send_keys(search_input, txt)
def __start_search(self):
""" Нажатие на кнопку запуска поиска """
self.click_on_element(*HeaderPageLocators.SEARCH_BUTTON)
|
11,223 | c3665e4c0d14802989275f6b3f73ca69c03dab05 | from django.db import models
from django.forms import ModelForm
from datetime import datetime
import datetime as dt
from django.conf import settings
from django.utils import timezone
from django.dispatch import receiver
from django.db.models.signals import post_save
class DrinkInformation(models.Model):
name = models.CharField(max_length=128, primary_key=True)
calPerFlOz = models.IntegerField(default=0)
class FoodInformation(models.Model):
name = models.CharField(max_length=128, primary_key=True)
density = models.FloatField(default=0.0)
caloricDensity = models.FloatField(default=0.0)
class ExerciseInformation(models.Model):
name = models.CharField(max_length=128, primary_key=True)
calPerHour = models.IntegerField(default=0)
class AlertLog(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
description = models.CharField(max_length=200)
time = models.DateTimeField(default=timezone.now)
def upcomingonehour(self):
return timezone.now() <= self.time and self.time <= (timezone.now() + dt.timedelta(hours=1))
def alertlate(self):
return self.time < timezone.now()
class UserInformation(models.Model):
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
primary_key=True,
)
weight = models.IntegerField(default=0)
height = models.IntegerField(default=0)
gender = models.CharField(max_length=25)
UNITCHOICE = (
('IMPERIAL', 'Imperial'),
('METRIC', 'Metric'),
)
NOTIFCHOICE = (
('EMAIL', 'email'),
('PHONE', 'phone'),
('WEB', 'web'),
)
units = models.CharField(max_length=8, choices = UNITCHOICE)
notificationType = models.CharField(max_length=5, choices = NOTIFCHOICE)
phoneNumber = models.CharField(max_length=10)
class FoodLog(models.Model):
info = models.ForeignKey(
'FoodInformation',
on_delete=models.CASCADE,
)
quantity = models.IntegerField(default=0)
#usage
date = models.DateTimeField(default=timezone.now)
calories = models.IntegerField(default=0)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
class DrinkLog(models.Model):
info = models.ForeignKey(
'DrinkInformation',
on_delete=models.CASCADE,
)
quantity = models.IntegerField(default=0)
date = models.DateTimeField(default=timezone.now)
calories = models.IntegerField(default=0)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
class ExerciseLog(models.Model):
info = models.ForeignKey(
'ExerciseInformation',
on_delete=models.CASCADE,
)
duration = models.IntegerField(default=0)
date = models.DateTimeField(default=timezone.now)
calories = models.IntegerField(default=0)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
#this function auto generates the calorie content of a newly created or modified drink log
@receiver(post_save, sender=DrinkLog, dispatch_uid='generate_drink_calories')
def gen_drink_calorie(sender, instance, **kwargs):
if kwargs.get('created', False):
temp = instance.info.calPerFlOz
userPreference = UserInformation.objects.get(user=instance.user)
#dividing by 29.5735 converts the calories per floz to calories per ml
if(userPreference.units == 'METRIC'):
total = (instance.quantity * temp) / 29.5735
else:
total = instance.quantity * temp
instance.calories = int(total)
instance.save()
#this function auto generates the calorie content of a newly created or modified drink log
@receiver(post_save, sender=FoodLog, dispatch_uid='generate_food_calories')
def gen_food_calorie(sender, instance, **kwargs):
if kwargs.get('created', False):
userPreference = UserInformation.objects.get(user=instance.user)
#calorie density is cal per 100g
#multiplying by 28.3495 converts from grams to oz (all of our math for food is in metric)
if (userPreference.units == 'METRIC'):
total = (instance.quantity * instance.info.caloricDensity) /100
else:
total = (28.3495 * instance.quantity * instance.info.caloricDensity) / 100
instance.calories = int(total)
instance.save()
#this function auto generates the calorie content of a newly created or modified drink log
@receiver(post_save, sender=ExerciseLog, dispatch_uid='generate_exercise_calories')
def gen_exercise_calorie(sender, instance, **kwargs):
if kwargs.get('created', False):
temp = instance.info.calPerHour
total = instance.duration * temp
instance.calories = int(total)
instance.save() |
11,224 | 1c52c7b6d9857824e599ead4060bc211efbd085c | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-12-08 16:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('projects', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CounterPart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('price', models.DecimalField(decimal_places=2, max_digits=2)),
],
),
migrations.AddField(
model_name='project',
name='published_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='counterpart',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.Project'),
),
]
|
11,225 | 4b76d73d3c75a57ecbb9cdfb867192f741009c5c | import pandas as pd
IN_FPATH = "./ThemeList.xlsx"
OUT_FPATH = "./ThemeList.csv"
df = pd.read_excel(IN_FPATH, sheet_name="Sheet1")
df_out = []
for theme in df["theme"].unique():
df_ = df[ df["theme"] == theme ]
df_ = df_["content"]
for i in range(len(df_.index)):
for j in range(len(df_.index)):
if( i != j ):
df_out.append([ df_.iloc[i], df_.iloc[j] ])
df_out = pd.DataFrame( df_out )
df_out.to_csv(OUT_FPATH, encoding='utf-8')
|
11,226 | f08f234b5e3b21dea0e28af767b31255cc1b713b | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import disdat.common as common
import disdat.api as api
import disdat.fs
from disdat import logger as _logger
def _add(args):
"""Invoke the api.add() call from the CLI to create a bundle.
Args:
args: command line args.
Returns:
None
"""
fs = disdat.fs.DisdatFS()
if not fs.in_context():
_logger.warning('Not in a data context')
return
_ = api.add(fs._curr_context.get_local_name(),
args.bundle,
args.path_name,
tags=common.parse_args_tags(args.tag))
return
def add_arg_parser(subparsers):
"""Initialize a command line set of subparsers with the add command.
Args:
subparsers: A collection of subparsers as defined by `argsparse`.
"""
# add
add_p = subparsers.add_parser('add', description='Create a bundle from a .csv, .tsv, or a directory of files.')
add_p.add_argument('-t', '--tag', nargs=1, type=str, action='append',
help="Set one or more tags: 'dsdt add -t authoritative:True -t version:0.7.1'")
add_p.add_argument('bundle', type=str, help='The destination bundle in the current context')
add_p.add_argument('path_name', type=str, help='File or directory of files to add to the bundle', action='store')
add_p.set_defaults(func=lambda args: _add(args)) |
11,227 | 887566ad6e6035a30759a8d078f23c127ff39527 | import os
import django
import timeit
from datetime import date, datetime, timedelta
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "myproject.settings")
django.setup()
import string
from random import choice, randint, random, sample
from django.contrib.auth.models import User
from django.utils.text import slugify
class Utils:
''' Métodos genéricos. '''
@staticmethod
def gen_string(max_length):
'''
Gera uma string randomica.
'''
return str(''.join(choice(string.ascii_letters) for i in range(max_length)))
gen_string.required = ['max_length']
@staticmethod
def gen_date(min_year=2018, max_year=datetime.now().year):
'''
Gera um date no formato yyyy-mm-dd.
'''
start = date(min_year, 1, 1)
years = max_year - min_year + 1
end = start + timedelta(days=365 * years)
return start + (end - start) * random()
def gen_digits(max_length):
return str(''.join(choice(string.digits) for i in range(max_length)))
class UserClass:
'''
Métodos pra criar User.
'''
@staticmethod
def create_user1(max_itens=None):
user = User.objects.create_user(
username='admin',
email='admin@email.com',
first_name='Admin',
last_name='Admin',
is_staff=True,
is_superuser=True,
)
user.set_password('d')
user.save()
tic = timeit.default_timer()
User.objects.all().delete()
UserClass.create_user1()
toc = timeit.default_timer()
print('time', toc - tic)
|
11,228 | 40d34e45034c15edf20e142cd1a030f7a24bc30b | import pybedtools
from pybedtools import BedTool
import numpy as np
from tqdm import tqdm
import pandas as pd
import multiprocessing
from multiprocessing import Pool
import functools
import argparse
# Process the given region, retrive all valid GEMs
def ProcessRegion(ChIA_Drop,Loop,savebedpath):
# Loop: three lines, they are
# chr, start, end, startX
# chr, start, end, endX
# chr, start, end, middleX
# X: loop id, start from 0
List1 = [] #Left, plot1
List2 = [] #Right, plot2
List3 = [] #Both, plot3
List4 = [] #Mid to Left, plot4
List5 = [] #Mid and Left, plot5
List6 = [] #Mid to Right, plot6
List7 = [] #Mid and Right, plot7
# Extract valid fragments
Lmostsite = Loop.iloc[0]['S']
Rmostsite = Loop.iloc[1]['E']
LoopID = Loop.iloc[0]['Name'][5:]
# ipdb.set_trace()
Meta_ValidFrags = ChIA_Drop[(ChIA_Drop['start']>Lmostsite) & (ChIA_Drop['end']<Rmostsite) & (ChIA_Drop['chr']==Loop.iloc[0]['chr'])]
# For plot 1-3 (Left, Right, Both)
ValidFrags = Meta_ValidFrags
# Extract complexes name
ComplexName = ValidFrags['Complex_name'].drop_duplicates()
for ii in range(len(ComplexName)):
complexName = ComplexName.iloc[ii]
Complex = ValidFrags[ValidFrags['Complex_name'] == complexName]
Complex = Complex.sort_values(by = ['start'])
if len(Complex)<2:
continue
if Checkintersect(Complex.iloc[0]['start'],Loop.iloc[0]['S'],Complex.iloc[0]['end'],Loop.iloc[0]['E']) and Checkintersect(Complex.iloc[-1]['start'],Loop.iloc[1]['S'],Complex.iloc[-1]['end'],Loop.iloc[1]['E']):
Tmpstr = str(Complex.iloc[0]['chr'])+' '+str(Complex.iloc[0]['start'])+' '+str(Complex.iloc[0]['end'])+' '+complexName+' '+str(len(Complex))+' '
for ff in range(1,len(Complex)-1):
Tmpstr += str(Complex.iloc[ff]['start'])+','+str(Complex.iloc[ff]['end'])+','
Tmpstr += str('-1,-1')+' '+str(Complex.iloc[len(Complex)-1]['start'])+' '+str(Complex.iloc[len(Complex)-1]['end'])+ '\n'
List3.append(Tmpstr)
elif Checkintersect(Complex.iloc[0]['start'],Loop.iloc[0]['S'],Complex.iloc[0]['end'],Loop.iloc[0]['E']):
Tmpstr = str(Complex.iloc[0]['chr'])+' '+str(Complex.iloc[0]['start'])+' '+str(Complex.iloc[0]['end'])+' '+complexName+' '+str(len(Complex))+' '
for ff in range(1,len(Complex)-1):
Tmpstr += str(Complex.iloc[ff]['start'])+','+str(Complex.iloc[ff]['end'])+','
Tmpstr += str('-1,-1')+' '+str(Complex.iloc[len(Complex)-1]['start'])+' '+str(Complex.iloc[len(Complex)-1]['end'])+ '\n'
List1.append(Tmpstr)
elif Checkintersect(Complex.iloc[-1]['start'],Loop.iloc[1]['S'],Complex.iloc[-1]['end'],Loop.iloc[1]['E']):
Tmpstr = str(Complex.iloc[0]['chr'])+' '+str(Complex.iloc[0]['start'])+' '+str(Complex.iloc[0]['end'])+' '+complexName+' '+str(len(Complex))+' '
for ff in range(1,len(Complex)-1):
Tmpstr += str(Complex.iloc[ff]['start'])+','+str(Complex.iloc[ff]['end'])+','
Tmpstr += str('-1,-1')+' '+str(Complex.iloc[len(Complex)-1]['start'])+' '+str(Complex.iloc[len(Complex)-1]['end'])+ '\n'
List2.append(Tmpstr)
# Plot 4-5, mid --- left
ValidFrags = Meta_ValidFrags[(Meta_ValidFrags['start']>Lmostsite) & (Meta_ValidFrags['end']<Loop.iloc[2]['E'])]
ComplexName = ValidFrags['Complex_name'].drop_duplicates()
for ii in range(len(ComplexName)):
complexName = ComplexName.iloc[ii]
Complex = ValidFrags[ValidFrags['Complex_name'] == complexName]
Complex = Complex.sort_values(by = ['start'])
if len(Complex)<2:
continue
if Checkintersect(Complex.iloc[0]['start'],Loop.iloc[0]['S'],Complex.iloc[0]['end'],Loop.iloc[0]['E']) and Checkintersect(Complex.iloc[-1]['start'],Loop.iloc[2]['S'],Complex.iloc[-1]['end'],Loop.iloc[2]['E']):
Tmpstr = str(Complex.iloc[0]['chr'])+' '+str(Complex.iloc[0]['start'])+' '+str(Complex.iloc[0]['end'])+' '+complexName+' '+str(len(Complex))+' '
for ff in range(1,len(Complex)-1):
Tmpstr += str(Complex.iloc[ff]['start'])+','+str(Complex.iloc[ff]['end'])+','
Tmpstr += str('-1,-1')+' '+str(Complex.iloc[len(Complex)-1]['start'])+' '+str(Complex.iloc[len(Complex)-1]['end'])+ '\n'
List5.append(Tmpstr)
elif Checkintersect(Complex.iloc[-1]['start'],Loop.iloc[2]['S'],Complex.iloc[-1]['end'],Loop.iloc[2]['E']):
Tmpstr = str(Complex.iloc[0]['chr'])+' '+str(Complex.iloc[0]['start'])+' '+str(Complex.iloc[0]['end'])+' '+complexName+' '+str(len(Complex))+' '
for ff in range(1,len(Complex)-1):
Tmpstr += str(Complex.iloc[ff]['start'])+','+str(Complex.iloc[ff]['end'])+','
Tmpstr += str('-1,-1')+' '+str(Complex.iloc[len(Complex)-1]['start'])+' '+str(Complex.iloc[len(Complex)-1]['end'])+ '\n'
List4.append(Tmpstr)
# Plot 6-7, mid---right
ValidFrags = Meta_ValidFrags[(Meta_ValidFrags['start']>Loop.iloc[2]['S']) & (Meta_ValidFrags['end']<Rmostsite)]
ComplexName = ValidFrags['Complex_name'].drop_duplicates()
for ii in range(len(ComplexName)):
complexName = ComplexName.iloc[ii]
Complex = ValidFrags[ValidFrags['Complex_name'] == complexName]
Complex = Complex.sort_values(by = ['start'])
if len(Complex)<2:
continue
if Checkintersect(Complex.iloc[0]['start'],Loop.iloc[2]['S'],Complex.iloc[0]['end'],Loop.iloc[2]['E']) and Checkintersect(Complex.iloc[-1]['start'],Loop.iloc[1]['S'],Complex.iloc[-1]['end'],Loop.iloc[1]['E']):
Tmpstr = str(Complex.iloc[0]['chr'])+' '+str(Complex.iloc[0]['start'])+' '+str(Complex.iloc[0]['end'])+' '+complexName+' '+str(len(Complex))+' '
for ff in range(1,len(Complex)-1):
Tmpstr += str(Complex.iloc[ff]['start'])+','+str(Complex.iloc[ff]['end'])+','
Tmpstr += str('-1,-1')+' '+str(Complex.iloc[len(Complex)-1]['start'])+' '+str(Complex.iloc[len(Complex)-1]['end'])+ '\n'
List7.append(Tmpstr)
elif Checkintersect(Complex.iloc[0]['start'],Loop.iloc[2]['S'],Complex.iloc[0]['end'],Loop.iloc[2]['E']):
Tmpstr = str(Complex.iloc[0]['chr'])+' '+str(Complex.iloc[0]['start'])+' '+str(Complex.iloc[0]['end'])+' '+complexName+' '+str(len(Complex))+' '
for ff in range(1,len(Complex)-1):
Tmpstr += str(Complex.iloc[ff]['start'])+','+str(Complex.iloc[ff]['end'])+','
Tmpstr += str('-1,-1')+' '+str(Complex.iloc[len(Complex)-1]['start'])+' '+str(Complex.iloc[len(Complex)-1]['end'])+ '\n'
List6.append(Tmpstr)
# ipdb.set_trace()
Tempstr = ''.join(List1)
FinalGEMs = BedTool(Tempstr,from_string=True).saveas(savebedpath+'Region'+str(LoopID)+'_'+'Left'+'.bed')
Tempstr = ''.join(List2)
FinalGEMs = BedTool(Tempstr,from_string=True).saveas(savebedpath+'Region'+str(LoopID)+'_'+'Right'+'.bed')
Tempstr = ''.join(List3)
FinalGEMs = BedTool(Tempstr,from_string=True).saveas(savebedpath+'Region'+str(LoopID)+'_'+'Both'+'.bed')
Tempstr = ''.join(List4)
FinalGEMs = BedTool(Tempstr,from_string=True).saveas(savebedpath+'Region'+str(LoopID)+'_'+'M2L'+'.bed')
Tempstr = ''.join(List5)
FinalGEMs = BedTool(Tempstr,from_string=True).saveas(savebedpath+'Region'+str(LoopID)+'_'+'MaL'+'.bed')
Tempstr = ''.join(List6)
FinalGEMs = BedTool(Tempstr,from_string=True).saveas(savebedpath+'Region'+str(LoopID)+'_'+'M2R'+'.bed')
Tempstr = ''.join(List7)
FinalGEMs = BedTool(Tempstr,from_string=True).saveas(savebedpath+'Region'+str(LoopID)+'_'+'MaR'+'.bed')
return
# Check for the left/right/both/none condition
def Checkintersect(s1,s2,e1,e2):
return (min(e1, e2) - max(s1, s2)) > 0
def inInterval(FF,Temp,Type,Length,CHR):
# ipdb.set_trace()
if CHR == FF[0]:
# print(CHR,FF[0],'True')
interval = [0,0,0,0]
interval[0] = Temp[0]
interval[1] = Temp[1]
interval[2] = Temp[2]
interval[3] = Temp[3]
NumFrag = FF[4]
Start = list(map(int, FF[1:3]))
End = list(map(int, FF[-2:]))
if Type == 'Left':
return (Checkintersect(interval[0],Start[0],interval[1],Start[1])) and not (Checkintersect(interval[2],End[0],interval[3],End[1]))
elif Type == 'Right':
return not (Checkintersect(interval[0],Start[0],interval[1],Start[1])) and (Checkintersect(interval[2],End[0],interval[3],End[1]))
elif Type == 'Both':
return (Checkintersect(interval[0],Start[0],interval[1],Start[1])) and (Checkintersect(interval[2],End[0],interval[3],End[1]))
else:
return not (Checkintersect(interval[0],Start[0],interval[1],Start[1])) and not (Checkintersect(interval[2],End[0],interval[3],End[1]))
else:
return False
def bedfn(i,Loops,ChIA_Drop,savebedpath):
Loop = Loops.iloc[i:(i+3)]
# ipdb.set_trace()
ProcessRegion(ChIA_Drop,Loop,savebedpath)
return None
def mainfunc(path1,path2,savebedpath,MultiThread = 20):
# path1 = 'GM12878-CTCF-pooled_comp_FDR_0.2_PASS_motifext4kbboth.region.PEanno' (CTCF, RNAPII or Cohesin file)
# path2 = 'RNAPII-peaks-anchor_mt-forward_TSS-forward_sem-annot_20200711.bed' (9 regions file)
# savebedpath: path for saving extracted GEMs in .bed
# MultiThread: Number of multithread
header_list = ['chr', 'start','end','?','Complex_name','??']
ChIA_Drop = pd.read_csv(path1,sep = '\t',names = header_list)
header_list = ['chr', 'S','E','Name']
Loops = pd.read_csv(path2,sep = '\t',names=header_list)
test_bedfn = functools.partial(bedfn,Loops,ChIA_Drop = ChIA_Drop,Loops=Loops,savebedpath = savebedpath)
with Pool(MultiThread) as p:
r = list(tqdm(p.imap(test_bedfn, range(0,len(Loops),3)), total = int(len(Loops)/3)))
ProcessRegion(ChIA_Drop,Loop,savebedpath)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path1',type = str)
parser.add_argument('--path2',type = str)
parser.add_argument('--savebedpath',type = str)
parser.add_argument('--MultiThread',type = int)
args = parser.parse_args()
path1 = args.path1
path2 = args.path2
savebedpath = args.savebedpath
MultiThread = args.MultiThread
mainfunc(path1,path2,savebedpath,MultiThread) |
11,229 | 6fb35881f4509c11769dfcb746d2736cc0b317f7 | import random
import time
import threading
from DetectDeadlock import DetectDeadlock
pfNum = 5
phi_nums = ('Philosopher 1','Philosopher 2','Philosopher 3','Philosopher 4', 'Philosopher 5')
Inventory = {
'fork1': 0,
'fork2': 0,
'fork3': 0,
'fork4': 0,
'fork5': 0,
'soup': 0,
'bread': 0,
'Philosopher 1': None,
'Philosopher 2': None,
'Philosopher 3': None,
'Philosopher 4': None,
'Philosopher 5': None,
}
class Phi(threading.Thread):
running = True
def __init__(self, num, leftfork, rightfork, idn, bread, soup): #initiate threads
threading.Thread.__init__(self)
self.num = num
self.leftfork = leftfork
self.rightfork = rightfork
self.idn = idn
self.bread = bread
self.soup = soup
def run(self):
while(self.running):
time.sleep(random.uniform(20,30)) #time before hungry
print ('%s is HUNGRY ' % self.num)
self.dine()
def dine(self):
global Inventory
fork1, fork2, bread, soup = self.leftfork, self.rightfork, self.bread, self.soup
fork1num = self.idn % pfNum
fork2num = (self.idn + 1) % pfNum
if (fork1num == 0): fork1num = pfNum
if (fork2num == 0): fork2num = pfNum
gotfork1 = False
gotfork2 = False
gotbread = False
gotsoup = False
acquisition = ['fork1', 'fork2', 'bread', 'soup']
while self.running:
random.shuffle(acquisition)
print(str(self.num) + ' is getting items in this order ' + ', '.join(acquisition))
for item in acquisition:
if (item == 'fork1'):
Inventory[self.num] = 'fork' + str(fork1num)
if (item == 'fork2'):
Inventory[self.num] = 'fork' + str(fork2num)
if (item == 'bread' or item == 'soup'):
Inventory[self.num] = item
time.sleep(2)
deadlock = DetectDeadlock(Inventory, self.idn + 1)
if (deadlock == True): return
if (item == 'fork1'):
gotfork1 = fork1.acquire(True)
print(str(self.num) + ' holds LEFT fork ' + str(fork1num))
Inventory['fork' + str(fork1num)] = self.idn + 1
Inventory[self.num] = None
if (item == 'fork2'):
gotfork2 = fork2.acquire(True)
print(str(self.num) + ' holds RIGHT fork ' + str(fork2num))
Inventory['fork' + str(fork2num)] = self.idn + 1
Inventory[self.num] = None
if (item == 'bread'):
gotbread = bread.acquire(True)
print(str(self.num) + ' gets BREAD ')
Inventory['bread'] = self.idn + 1
time.sleep(random.uniform(5,10))
bread.release()
print(str(self.num) + ' releases plate of BREAD ')
Inventory['bread'] = 0
Inventory[self.num] = None
if (item == 'soup'):
gotsoup = soup.acquire(True)
Inventory['soup'] = self.idn + 1
# THIS IS THE CODE THAT CAN CAUSE DEADLOCK
if (random.randint(1,3)==1): # 1/5 probability that Philosopher will not release soup
print(str(self.num) + ' keeping the bowl of soup *******************')
Inventory['soup'] = self.idn + 1
Inventory[self.num] = None
else:
print(str(self.num) + ' gets SOUP ')
Inventory['soup'] = self.idn + 1
time.sleep(random.uniform(5,10))
soup.release()
print(str(self.num) + ' releases bowl of SOUP ')
Inventory['soup'] = 0
Inventory[self.num] = None
time.sleep(2)
deadlock = DetectDeadlock(Inventory, self.idn + 1)
if (deadlock == True): return
#thread doesn't go past here unless acquired both forks and food!
if (gotfork1 and gotfork2 and gotbread and gotsoup): break
else:
return
self.dining()
fork1.release()
Inventory['fork' + str(fork1num)] = 0
fork2.release()
Inventory['fork' + str(fork2num)] = 0
print ('%s released all forks, thinks Philosophy '% self.num)
def dining(self):
print ('%s starts eating - feeds body, mind and spirit '% self.num)
time.sleep(random.uniform(10,20)) #time before finish eating, should less than time of thinking
print ('%s FINISHES eating' % self.num)
def DiningPhilosophers():
forks = [threading.Lock() for jj in range(pfNum)]
bread = threading.Lock()
soup = threading.Lock()
phis= [Phi(phi_nums[ii], forks[ii%pfNum], forks[(ii+1)%pfNum], ii, bread, soup) for ii in range(pfNum)] #choose fork next to them
Phi.running = True
for num in phis: num.start()
time.sleep(200)
Phi.running = False
DiningPhilosophers()
#Reference: https://github.com/KLdivergence/-dining-philosophers-problem |
11,230 | bacd3cd8937871137d5bfca8607114a2d3e4e23b | # -*- coding: utf-8 -*-
import time
from serial import rs485
import serial
from serial.tools.list_ports import comports
from hextools import *
import binascii
class SerialPortProblem(Exception):
pass
class OledButton(object):
'''
oled cy-7031
The OLED BUTTON SWITCH and DISPLAY MODULE programmable into different images.
'''
def __init__(self):
self.mode = "RS232" # "RS232", "RS485"
self.device_id = 1
self.package_base = self.get_package_base()
self.serial = None
def connect(self, port='COM5'):
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
ports.append(port)
if (len(ports) == 0) or not (port in ports):
raise SerialPortProblem("Connect button!")
if self.mode == "RS232":
self.serial = serial.Serial(port, baudrate=115200, timeout=1, write_timeout=1, inter_byte_timeout=0.1)
elif self.mode == "RS485":
self.serial = rs485.RS485(port, 115200, timeout=1)
self.serial.rs485_mode = rs485.RS485Settings()
if self.serial.is_open:
self.serial.close()
self.serial.open()
def disconnect(self):
self.serial.close()
def get_package_base(self):
package = bytearray()
package.append(0x1B)
package.append(self.device_id)
package.append(0xFF - self.device_id)
return package
def send(self, payload) :
command = bytearray(self.package_base)
if type(payload) is str:
command += bytes(payload, 'utf-8')
elif type(payload) is bytes or type(payload) is bytearray:
command.extend(payload)
else:
raise SerialPortProblem("Not supported payload format: %s" % type(payload))
# handle with transmit buffer
def write_and_echo(b):
#string = binascii.b2a_qp(b).decode("utf-8")
#wr.write(string + "\n\n")
self.serial.write(b)
init_l = 7
if init_l > 0:
write_and_echo(command[:init_l])
self.serial.flush()
time.sleep(0.1)
packet_l = 1024
if len(command) > packet_l:
for i in range(init_l, len(command), packet_l):
end_i = min(i + packet_l, len(command))
chunk = command[i:end_i]
write_and_echo(chunk)
self.serial.flush()
time.sleep(0.1)
#wr.close()
else:
return self.serial.write(command)
def display_internal_image(self, id):
self.send("D" + str(id).zfill(3)) # D000, D001, ... D024
def save_image(self, id):
self.send("S" + str(id).zfill(3)) # S000, S001, ... S024
def on(self):
self.send("d1")
def off(self):
self.send("d0")
def set_brightness(value):
self.send("B" + str(id).zfill(2))
def transfer_image(self, filename):
payload = bytearray()
payload.append(chr2hex("G"))
data = get_rgb565_bytes_from_image(filename)
length = len(data)
length_b = int2bytes(length, 4)
payload.extend(length_b)
payload.append(chr2hex("S"))
payload.extend(data)
self.send(payload)
def get_info(self):
self.send("I")
direction = self.serial.read(3).decode("cp437")
print(direction)
product_info = self.serial.read(12).decode("cp437")
print("Product ID", product_info[:7])
print("firmware version", product_info[7:])
def is_pressed(self):
self.send("T")
state = self.serial.read(3).decode("cp437")
return state[-1] == '1'
def response(self):
result_hex = self.serial.read(5)
result = hex2str(result_hex)
print("response", result)
return result[-2:] == 'OK'
|
11,231 | f88b87dd25a06f4f77d3bf8b1407377f45b5acb2 | #! /usr/bin/python
# -*- coding: utf8 -*-
import sys
file=(sys.argv[1])
open_file=open(file,"r")
l=0
for line in open_file:
l=l+1
print ("Number of lines:", l)
open_file.close() |
11,232 | bae9b9680aa1457991ee01ff2637accd0e3fa242 | x=raw_input("enter the string")
n=input("enter the number of times")
for y in range(n):
print(x)
|
11,233 | dfa057df15570c9470a88696cb36de4c07cfe226 | #编程实现字符串反转
#输入:Hello,World!
#输出:!dlroW,olleH
# 字符串里面的每一个字符进行压栈
# 出栈 直到栈空为止 打印每个出栈元素
stack=[]
for i in "hello,world!":
stack.append(i)
# print(stack.pop())#confirm if its right
result=[]
while len(stack) is not 0:
result.append(stack.pop())
print("".join(result))
|
11,234 | 55d73504e5aeb8b5380e75bd0c84828c2afd650d | #_*_coding=UTF-8_*_
# copyright 2019 BILLAL FAUZAN
# Karya Anak Bangsa
# MD5 DENCRYPT Offline 90%
"""
NOTE:
Please Abang Eneng Jangan Ubah Source Code Ny
Saya Susah Payah Membuat Program Ini
Saya Sengaja Open Source Supaya Anak Indonesia Bisa Membuat Program Sendiri
Jadi
Jangan Di Recode Bangsat!!
"""
__banner__ = """\033[31;1m
__ __ __ ____
/ / / /___ ______/ /_ / __ \___ _____
/ /_/ / __ `/ ___/ __ \/ / / / _ \/ ___/\033[37;1m
/ __ / /_/ (__ ) / / / /_/ / __/ /__
/_/ /_/\__,_/____/_/ /_/_____/\___/\___/\033[33;1m
copyright 2019 Billal Fauzan
\033[32;1m
INFO:\033[34;1m
Author \033[31;1m:\033[32;1m Billal Fauzan\033[34;1m
Version\033[31;1m:\033[32;1m 1.0\033[34;1m
Name \033[31;1m :\033[32;1m Hash Dencrypter Offline 50%\033[34;1m
Link To Download \033[31;1m:\033[32;1m bit.ly/passwordhashdec\033[0m
"""
try:
import os,sys,time,hashlib
from prompt_toolkit import prompt
except Exception as E:
print ("[Err]: "+str(E))
sys.exit()
def dencrypt(hash,password):
try:
sys.stdout.write("\n\033[34;1m[\033[33;1m!\033[34;1m] \033[33;1mTrying: \033[31;1m"+str(password))
sys.stdout.flush()
# time.sleep(0.1)
sha1 = hashlib.sha1()
sha1.update(str(password))
md = sha1.hexdigest()
if hash == md:
print ("\033[34;1m\n[\033[32;1m+\033[34;1m] \033[37;1mFound: \033[32;1m"+str(password))
print ("\033[0m")
sys.exit()
except:
print ("\033[31;1m[-] unknown error\033[0m")
sys.exit()
def main():
os.system("clear")
print (__banner__)
hash = raw_input("[?] Hash: ")
wordlist =""
while True:
p = raw_input("[?] Wordlist Default? (Y/n): ")
if p in ["Y","y"]:
wordlist = "password.txt"
break
elif p in ["N","n"]:
wordlist = raw_input("[?] Wordlist: ")
break
try:
pw = open(wordlist,"r").read()
for password in pw.splitlines():
dencrypt(hash,password)
except IOError:
print ("[Err]: File %s not found"%(wordlist))
sys.exit()
def login():
os.system("clear")
print (__banner__)
pwd = "6d845a610cf2ff6872a04239349251e4"
print ("[*] Please contact author to find out the password")
pw = prompt("[?] Password to unlock: ",is_password=True)
m = hashlib.md5()
m.update(pw)
md = m.hexdigest()
if pwd == md:
main()
else:
print ("[*] Please contact author to find out the password")
main()
|
11,235 | 14d974e9e4e582334604abc88558b127653cf12f | # coding=gb18030
cars = ['bmw',
'audi',
'toyota',
'subaru']
print(cars)
print("注意,reverse()不是指按与字母顺序相反的顺序排列列表元素,")
print("而只是反转列表元素的排列顺序:")
cars.reverse()
print(cars)
print("方法reverse()永久性地修改列表元素的排列顺序,")
print("但可随时恢复到原来的排列顺序, ")
print("为此只需对列表再次调用reverse()即可。")
|
11,236 | 445db428f0a9a8c5f6189a589681fc53207e2425 | from sklearn.model_selection import GridSearchCV
from lightgbm import LGBMClassifier
import joblib
import pickle
with open('yy.pickle' ,'rb') as f:
x_train , y_train = pickle.load(f)
model_lgb = LGBMClassifier(num_class =10 ,
objective='multiclass',
metric='multi_logloss')
parameters ={
'max_leaf': [100 , 200, 300 ,400],
'max_depth': [2, 5 ,10 , 15, 20, 35],
'learning_rate': [0.01, 0.02, 0.05, 0.1, 0.15],
'feature_fraction': [0.6, 0.7, 0.8, 0.95],
'bagging_fraction': [0.6, 0.7, 0.8, 0.95],
'bagging_freq': [2, 4, 6, 8],
'lambda_l1': [0.1, 0.4, 0.6],
'lambda_l2': [10, 15, 35],
'cat_smooth': [1, 10, 15, 20, 35],
}
gs = GridSearchCV(model_lgb ,
param_grid =parameters ,
n_jobs = 20)
gs.fit(x_train , y_train)
joblib.dump(gs, 'gs_model_yy')
print('GS Done')
|
11,237 | 953a0c93772ef561e21c1fc78c2e117ae62f32d8 | for n in range(6):
out = '\t\t'
for off in range(2):
out += str(4*n)+','
for x in range(2):
out += str(x+1+off+4*n)+','
out += ' '
print(out)
|
11,238 | 2c667978f29fedb95d21bafbdb79f9af3a7bb6f8 | from random import randint
import pyglet
from source.settings import SETTINGS, player, enemies, Tile, TEXTURES, scenes
from source.dungeons import dungeon
import source.resources as res
# Utility Functions
def map_x(x):
return x * (16) + 32
def map_y(y):
return SETTINGS['Height'] - 16 * (y + 1) - 16
def convert_index(c, f):
return len(dungeon[0]) * f + c
def check_exist(x, y):
if not zone[convert_index(x, y)]:
return False
if x < 0 or y < 0:
return False
if x >= len(dungeon[0]) or y >= len(dungeon):
return False
return True
enemy_movs = [(0,1), (0,-1), (1,0), (-1,0)]
def enemyMove(index):
mov = randint(0, 3)
newX = enemies[index].x + enemy_movs[mov][0]
newY = enemies[index].y + enemy_movs[mov][1]
newIndex = convert_index(newX, newY)
if check_exist(newX, newY) and zone[newIndex].char != '|' and zone[newIndex].char != '-' and zone[newIndex].char != '+':
if newX == player.x and newY == player.y:
dmg = randint(1, enemies[index].atk)
player.hp -= dmg
print("You received", dmg, "points of damage")
return enemies[index].x, enemies[index].y
return newX, newY
else:
return enemies[index].x, enemies[index].y
def makeDamage(index):
dmg = randint(1, player.atk)
enemies[index].hp -= dmg
print("You made", dmg, "points of damage to", enemies[index].char)
if enemies[index].hp <= 0:
enemies[index].alive = False
enemies[index].set_visible(False)
print("You killed", enemies[index].char)
def game_over():
pyglet.app.exit()
def visit(x, y):
if(check_exist(x, y)):
zone[convert_index(x, y)].visited = True
zone[convert_index(x, y)].set_visible(True)
window = pyglet.window.Window(SETTINGS['Width'], SETTINGS['Height'], visible = False)
window.set_location(50, 50)
#-------------------------------------------------------------------------------------------------
# Main Menu
#-------------------------------------------------------------------------------------------------
menu_items = {
'Title' : pyglet.text.Label(SETTINGS['Title'],
font_name = SETTINGS['Font'],
font_size = 150,
x = 450, y = 550,
anchor_x='center', anchor_y='center',
color = (255, 255, 255, 255),
batch = scenes["menu_batch"]),
'New Game' : pyglet.text.Label("New Game",
font_name = SETTINGS['Font'],
font_size = 40,
x = 235, y = 250,
anchor_x='center', anchor_y='center',
color = (255, 255, 255, 100),
batch = scenes["menu_batch"]),
'Load Game' : pyglet.text.Label("Load Game",
font_name = SETTINGS['Font'],
font_size = 40,
x = 250, y = 150,
anchor_x='center', anchor_y='center',
color = (255, 255, 255, 100),
batch = scenes["menu_batch"]),
'active_index' : 'New Game'
}
def menu_on_draw():
window.clear()
if menu_items['active_index'] == 'New Game':
menu_items['New Game'].color = (255, 255, 255, 255)
menu_items['Load Game'].color = (255, 255, 255, 100)
else:
menu_items['Load Game'].color = (255, 255, 255, 255)
menu_items['New Game'].color = (255, 255, 255, 100)
scenes['menu_batch'].draw()
def menu_on_key_press(symbol, modifiers):
if symbol == pyglet.window.key.DOWN and menu_items['active_index'] == 'New Game':
menu_items['active_index'] = 'Load Game'
elif symbol == pyglet.window.key.UP and menu_items['active_index'] == 'Load Game':
menu_items['active_index'] = 'New Game'
def menu_on_key_release(symbol, modifiers):
if symbol == pyglet.window.key.ENTER:
if menu_items['active_index'] == 'New Game':
#input_name()
new_game()
else:
print("under maintenance")
def begin_main_menu():
window.set_visible()
window.push_handlers(on_key_press = menu_on_key_press,
on_key_release = menu_on_key_release,
on_draw = menu_on_draw)
#-------------------------------------------------------------------------------------------------
# Game
#-------------------------------------------------------------------------------------------------
player.sprite = pyglet.sprite.Sprite(TEXTURES['@'], map_x(2), map_y(2))
ui_items = {
'Name Label' : pyglet.text.Label(player.name,
font_name = SETTINGS['Font'],
font_size = 30,
x = 16, y = map_y(len(dungeon)) - 40,
anchor_x='left', anchor_y='baseline',
color = (255, 255, 255, 255),
batch = scenes["game_batch"]),
'HP' : pyglet.text.Label("HP:",
font_name = SETTINGS['Font'],
font_size = 30,
x = 16, y = map_y(len(dungeon)) - 80,
anchor_x='left', anchor_y='baseline',
color = (255, 255, 255, 255),
batch = scenes["game_batch"]),
'HP Counter' : pyglet.text.Label(str(player.hp) + " % ",
font_name = SETTINGS['Font'],
font_size = 30,
x = 250, y = map_y(len(dungeon)) - 80,
anchor_x='right', anchor_y='baseline',
color = (255, 255, 255, 255),
batch = scenes["game_batch"])
}
#zone Wrapper
dr = [map_x(len(dungeon[0])), map_y(len(dungeon))]
dl = [16, map_y(len(dungeon))]
ur = [map_x(len(dungeon[0])), map_y(0) + 16]
ul = [16, map_y(0) + 16]
vertex_list = pyglet.graphics.vertex_list_indexed(8,
[0, 3, 4, 3, 4, 7, 3, 7, 2, 2, 7, 6, 2, 6, 5, 5, 2, 1, 5, 1, 0, 0, 4, 5],
('v2i', (dl[0], dl[1], #0
dr[0], dr[1], #1
ur[0], ur[1], #2
ul[0], ul[1], #3
dl[0] + 4, dl[1] + 4, #4
dr[0] - 4, dr[1] + 4, #5
ur[0] - 4, ur[1] - 4, #6
ul[0] + 4, ul[1] - 4, #7
)))
zone = []
def generateMap():
for i in range(len(dungeon)):
for j in range(len(dungeon[i])):
tile = None
char = dungeon[i][j]
if char != " ":
tile = Tile(char, map_x(j), map_y(i), scenes["game_batch"])
zone.append(tile)
for i in enemies.keys():
#enemies[i].sprite = pyglet.sprite.Sprite(TEXTURES[i], map_x(enemies[i].x), map_y(enemies[i].y), batch = scenes["enemies"])
enemies[i].set_visible(False)
generateMap()
def update():
#player.sprite.x = map_x(player.x)
#player.sprite.y = map_y(player.y)
player.update()
tx = player.x
ty = player.y
visit(tx, ty)
visit(tx, ty - 1)
visit(tx, ty + 1)
visit(tx - 1, ty)
visit(tx - 1, ty - 1)
visit(tx - 1, ty + 1)
visit(tx + 1, ty)
visit(tx + 1, ty - 1)
visit(tx + 1, ty + 1)
if player.hp < 0:
player.hp = 0
game_over()
if(player.hp < 25):
ui_items['HP Counter'].color = (255, 0, 0, 255)
ui_items['HP Counter'].text = str(player.hp) + " % "
for i in enemies.keys():
if enemies[i].alive:
enemies[i].x, enemies[i].y = enemyMove(i)
enemies[i].update()
#enemies[i].sprite.x = map_x(enemies[i].x)
#enemies[i].sprite.y = map_y(enemies[i].y)
if zone[convert_index(enemies[i].x, enemies[i].y)].visited:
enemies[i].set_visible(True)
else:
enemies[i].set_visible(False)
def game_on_draw():
window.clear()
scenes['game_batch'].draw()
player.sprite.draw()
scenes['enemies'].draw()
#HP Bar
pyglet.graphics.draw_indexed(4, pyglet.gl.GL_TRIANGLES,
[0, 1, 2, 0, 2, 3],
('v2i', (250, map_y(len(dungeon)) - 80, #DOWN-LEFT
max(0, 250 + 500 - 5 * (100 - player.hp)), map_y(len(dungeon)) - 80, #DOWN rIGHT
max(0, 250 + 500 - 5 * (100 - player.hp)), map_y(len(dungeon)) - 50, #UP RIGHT
250, map_y(len(dungeon)) - 50))) #UP LEFT
vertex_list.draw(pyglet.gl.GL_TRIANGLES)
def game_on_key_press(symbol, modifiers):
pass
def game_on_key_release(symbol, modifiers):
newY = player.y
newX = player.x
x_mov = 0
if symbol == pyglet.window.key.UP:
newY -= 1
elif symbol == pyglet.window.key.RIGHT:
newX += 1
elif symbol == pyglet.window.key.DOWN:
newY += 1
elif symbol == pyglet.window.key.LEFT:
newX -= 1
newIndex = convert_index(newX, newY)
if check_exist(newX, newY) and zone[newIndex].char != '|' and zone[newIndex].char != '-':
if zone[newIndex].char == '+':
if not zone[newIndex].opened:
zone[newIndex].opened = True
zone[newIndex].set_texture(TEXTURES['*'])
else:
player.x = newX
player.y = newY
else:
for i in enemies.keys():
if newX == enemies[i].x and newY == enemies[i].y:
makeDamage(i)
if enemies[i].alive:
newX = player.x
newY = player.y
player.x = newX
player.y = newY
update()
def input_name():
window.pop_handlers()
window.push_handlers(on_key_press = game_on_key_press,
on_key_release = game_on_key_release,
on_draw = game_on_draw)
def new_game():
window.pop_handlers()
window.push_handlers(on_key_press = game_on_key_press,
on_key_release = game_on_key_release,
on_draw = game_on_draw)
begin_main_menu()
pyglet.app.run()
|
11,239 | 3d995aeb24138460994fa4aebccf25e786880b7b | from re import S
from django import forms
from django.forms import widgets
from .models import Pizza, Size
# class PizzaForm(forms.Form):
# topping1 = forms.CharField(label='Topping 1', max_length=100)
# topping2 = forms.CharField(label='Topping 1', max_length=100)
# size = forms.ChoiceField(label='Size', choices=[('Small', 'Small'), ('Medium', 'Medium'), ('Large', 'Large')])
class PizzaForm(forms.ModelForm):
size = forms.ModelChoiceField(queryset=Size.objects, empty_label=None, widget=forms.RadioSelect)
class Meta:
model = Pizza
fields = ['topping1', 'topping2', 'size']
labels = {
'topping1': 'Topping 1',
'topping2': 'Topping 2',
} |
11,240 | 1a283ab2450840ad01b8187ccfc37f10430c2f36 | import sys
"""
Combines the submission of various models into one submission
Enter the files as command line arguments in order of decreasing weightage
Assumes all files contain same number of rows
"""
def get_majority(lst):
"""
Uses Boyer-Moore algorithm to find
majority element
candidate = lst[0]
count = 0
for elem in lst:
if (count == 0):
candidate = elem
if (elem == candidate):
count += 1
else:
count -= 1
count = 0
for elem in lst:
if (elem == candidate):
count += 1
"""
a = {}
candidate = lst[0]
for elem in lst:
if elem not in a:
a[elem] = 0
else:
a[elem] += 1
for elem in lst:
if (a[elem] >= len(lst) / 3):
candidate = elem
return candidate
def ensemble(result, fo):
"""
Use majority to get a final submission
result is a list of file lines
"""
size = len(result)
lst = []
for line in result:
values = line.split(',')
lst.append(values[1].split()[0])
majority_elem = get_majority(lst)
fo.write(values[0] + ',' + majority_elem + ' ')
ads = result[0].split(',')[1].split()
for ad in ads:
if (ad != majority_elem):
fo.write(ad + ' ')
fo.write('\n')
size = len(sys.argv)
print size
print sys.argv
#size -= 1
print sys.argv
fo = open('model-ensemble-submission-19jan.csv', 'w')
fo.write('display_id,ad_id\n')
f = []
i = 1
while (i < size):
print 'Filename', sys.argv[i]
f.append(open(sys.argv[i], 'r'))
i += 1
result = []
#Read the header
for elem in f:
elem.readline()
for elem in f:
result.append(elem.readline())
flag = True
while (len(result) != 0):
ensemble(result, fo)
result[:] = []
for elem in f:
line = elem.readline()
if (line):
result.append(line)
else:
flag = False
break
if (flag == False):
break
for elem in f:
elem.close()
fo.close()
|
11,241 | ddf54566c58ddb4bb59019cd32c7141aa2cab50c | """nibble_bee URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from django.views.decorators.cache import never_cache
from ckeditor_uploader import views as ck_views
from . import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.home, name="home"),
url(r'^users/', include('users.urls')),
# url(r'^ckeditor/', include('ckeditor_uploader.urls')),
url(r'^articles/', include('articles.urls')),
url(r'^ckeditor/upload/', ck_views.upload, name='ckeditor_upload'),
url(r'^ckeditor/browse/', never_cache(ck_views.browse), name='ckeditor_browse'),
]
# original urls
# url(r'^upload/', staff_member_required(views.upload), name='ckeditor_upload'),
# url(r'^browse/', never_cache(staff_member_required(views.browse)), name='ckeditor_browse'),
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
11,242 | 1d4ba6f99db49af65391efc177494eebc9ef1b8d | cash = int(input("Amount of money you have?: "))
apple_price = int(input("How much is an apple?: "))
capacity_to_buy = cash//apple_price
money_change = cash%apple_price
print(f"You can buy {capacity_to_buy} apples and your change is {money_change} pesos.") |
11,243 | a994e4662a6592097bf8da52454e8f53c7de7251 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'jump_buy_1.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5.QtWidgets import *
from PyQt5 import QtCore, QtGui, QtWidgets
import pyodbc
class Ui_Dialog_jump_buy(object):
state = -1
num = -1
username = 'aa'
def setupUi(self, Dialog_jump_buy):
Dialog_jump_buy.setObjectName("Dialog_jump_buy")
Dialog_jump_buy.resize(513, 430)
Dialog_jump_buy.setFixedSize(513,430)
self.pushButton = QtWidgets.QPushButton(Dialog_jump_buy)
self.pushButton.setGeometry(QtCore.QRect(240, 334, 93, 28))
self.pushButton.setMaximumSize(QtCore.QSize(93, 16777215))
self.pushButton.setObjectName("pushButton")
self.name = QtWidgets.QLineEdit(Dialog_jump_buy)
self.name.setGeometry(QtCore.QRect(240, 178, 131, 21))
self.name.setMinimumSize(QtCore.QSize(131, 0))
self.name.setMaximumSize(QtCore.QSize(131, 16777215))
self.name.setMaxLength(100)
self.name.setFrame(True)
self.name.setObjectName("name")
self.label = QtWidgets.QLabel(Dialog_jump_buy)
self.label.setGeometry(QtCore.QRect(69, 74, 78, 21))
self.label.setMaximumSize(QtCore.QSize(125, 16777215))
self.label.setObjectName("label")
self.sex = QtWidgets.QComboBox(Dialog_jump_buy)
self.sex.setGeometry(QtCore.QRect(240, 126, 78, 21))
self.sex.setMaximumSize(QtCore.QSize(87, 16777215))
self.sex.setObjectName("sex")
self.sex.addItem("")
self.sex.addItem("")
self.remain_num = QtWidgets.QLabel(Dialog_jump_buy)
self.remain_num.setGeometry(QtCore.QRect(410, 74, 81, 21))
self.remain_num.setMinimumSize(QtCore.QSize(81, 0))
self.remain_num.setMaximumSize(QtCore.QSize(225, 16777215))
self.remain_num.setText("")
self.remain_num.setObjectName("remain_num")
self.label_6 = QtWidgets.QLabel(Dialog_jump_buy)
self.label_6.setGeometry(QtCore.QRect(69, 282, 75, 21))
self.label_6.setMaximumSize(QtCore.QSize(75, 16777215))
self.label_6.setObjectName("label_6")
self.com_class = QtWidgets.QComboBox(Dialog_jump_buy)
self.com_class.setGeometry(QtCore.QRect(154, 74, 87, 21))
self.com_class.setMaximumSize(QtCore.QSize(87, 16777215))
self.com_class.setObjectName("com_class")
self.com_class.addItem("")
self.com_class.addItem("")
self.com_class.addItem("")
self.label_5 = QtWidgets.QLabel(Dialog_jump_buy)
self.label_5.setGeometry(QtCore.QRect(69, 230, 75, 21))
self.label_5.setMaximumSize(QtCore.QSize(75, 16777215))
self.label_5.setObjectName("label_5")
self.label_4 = QtWidgets.QLabel(Dialog_jump_buy)
self.label_4.setGeometry(QtCore.QRect(69, 126, 69, 21))
self.label_4.setMaximumSize(QtCore.QSize(69, 16777215))
self.label_4.setObjectName("label_4")
self.label_2 = QtWidgets.QLabel(Dialog_jump_buy)
self.label_2.setGeometry(QtCore.QRect(325, 74, 78, 21))
self.label_2.setMaximumSize(QtCore.QSize(163, 16777215))
self.label_2.setObjectName("label_2")
self.phone_num = QtWidgets.QLineEdit(Dialog_jump_buy)
self.phone_num.setGeometry(QtCore.QRect(240, 282, 189, 21))
self.phone_num.setMaximumSize(QtCore.QSize(189, 16777215))
self.phone_num.setObjectName("phone_num")
self.label_3 = QtWidgets.QLabel(Dialog_jump_buy)
self.label_3.setGeometry(QtCore.QRect(69, 178, 75, 21))
self.label_3.setMaximumSize(QtCore.QSize(75, 16777215))
self.label_3.setObjectName("label_3")
self.identity_num = QtWidgets.QLineEdit(Dialog_jump_buy)
self.identity_num.setGeometry(QtCore.QRect(240, 230, 189, 21))
self.identity_num.setMinimumSize(QtCore.QSize(189, 0))
self.identity_num.setMaximumSize(QtCore.QSize(189, 16777215))
self.identity_num.setMaxLength(1000)
self.identity_num.setObjectName("identity_num")
self.retranslateUi(Dialog_jump_buy)
self.pushButton.clicked.connect(self.insert_tacket)
self.com_class.currentIndexChanged.connect(self.remain_tacket)
QtCore.QMetaObject.connectSlotsByName(Dialog_jump_buy)
# self.pushButton.clicked.connect(self.change)
def retranslateUi(self, Dialog_jump_buy):
_translate = QtCore.QCoreApplication.translate
Dialog_jump_buy.setWindowTitle(_translate("Dialog_jump_buy", "Dialog"))
self.pushButton.setText(_translate("Dialog_jump_buy", "购买"))
self.label.setText(_translate("Dialog_jump_buy", " 舱位:"))
self.sex.setItemText(0, _translate("Dialog_jump_buy", "男"))
self.sex.setItemText(1, _translate("Dialog_jump_buy", "女"))
self.label_6.setText(_translate("Dialog_jump_buy", "联系电话:"))
self.com_class.setItemText(0, _translate("Dialog_jump_buy", "头等舱"))
self.com_class.setItemText(1, _translate("Dialog_jump_buy", "商务舱"))
self.com_class.setItemText(2, _translate("Dialog_jump_buy", "经济舱"))
self.label_5.setText(_translate("Dialog_jump_buy", "身份证号:"))
self.label_4.setText(_translate("Dialog_jump_buy", " 性别:"))
self.label_2.setText(_translate("Dialog_jump_buy", "剩余票数:"))
self.label_3.setText(_translate("Dialog_jump_buy", "乘客姓名:"))
def insert_tacket(self):
dairport = ''
aairport = ''
tacket_num = ()
conn = pyodbc.connect(
'DRIVER={SQL SERVER NATIVE CLIENT 10.0};SERVER=127.0.0.1;DATABASE=air;UID=sa;PWD=123456')
if (self.state == 0):
sql = "select *"\
" from 飞行计划安排 inner join 航班 on 飞行计划安排.航班编号 = 航班.航班编号"\
" inner join 机场 on 机场代码 = 出发机场代码"\
" inner join 机场 as b on b.机场代码 = 到达机场代码"\
" where 航程号 = ?"
cursor = conn.cursor()
result = (cursor.execute(sql, self.num).fetchall())
dairport = result[0][29]
aairport = result[0][32]
tacket_num = result[0][6:9]
if (self.state == 1):
sql = "select *"\
" from 飞行计划安排 inner join 航班 on 飞行计划安排.航班编号 = 航班.航班编号"\
" inner join 机场 on 机场代码 = 出发机场代码"\
" inner join 机场 as b on b.机场代码 = 经停机场代码"\
" where 航程号 = ?"
cursor = conn.cursor()
result = (cursor.execute(sql, self.num).fetchall())
dairport = result[0][29]
aairport = result[0][32]
tacket_num = result[0][12:15]
if (self.state == 2):
sql = "select *"\
" from 飞行计划安排 inner join 航班 on 飞行计划安排.航班编号 = 航班.航班编号"\
" inner join 机场 on 机场代码 = 出发机场代码"\
" inner join 机场 as b on b.机场代码 = 经停机场代码"\
" where 航程号 = ?"
cursor = conn.cursor()
result = (cursor.execute(sql, self.num).fetchall())
dairport = result[0][29]
aairport = result[0][32]
tacket_num = result[0][9:12]
kind = {}
kind['经济舱'] = 0
kind['商务舱'] = 1
kind['头等舱'] = 2
state_text = ['(开始-到达)','(开始-经停)','(经停-到达)']
cart = self.com_class.currentText()
print(tacket_num)
tacket = tacket_num[kind[cart]]
print(tacket)
if (tacket is None):
print('none')
tacket = 0
sex = self.sex.currentText()
tele = self.phone_num.text()
id = self.identity_num.text()
name = self.name.text()
if (len(name)<1 or len(id)<1 or len(tele)<1):
reply = QMessageBox.warning(self,
"消息框标题",
"请先填写好信息!",
QMessageBox.Yes | QMessageBox.No)
elif (tacket>0):
insert_sql = "insert into 订票 values (?,?,?,?,?,?,?,?,?,?,?)"
result_in = cursor.execute(insert_sql, self.num, dairport, aairport, str(tacket), cart, self.username, name, sex, '成人', str(id), str(tele))
cursor.commit()
print('insert complete')
update_sql = "update 飞行计划安排 set [" + cart + state_text[self.state]+"剩余座位] = ?" \
" where 航程号=?"
result_up = cursor.execute(update_sql, tacket-1, self.num)
if (self.state == 0):
sql_1 = "update 飞行计划安排 set [" + cart + state_text[1]+"剩余座位] = [" + cart + state_text[1]+"剩余座位]-1"\
" where 航程号=?"
cursor.execute(sql_1,self.num)
sql_2 = "update 飞行计划安排 set [" + cart + state_text[2]+"剩余座位] = [" + cart + state_text[2]+"剩余座位]-1"\
" where 航程号=?"
cursor.execute(sql_2,self.num)
else:
sql_1 = "update 飞行计划安排 set [" + cart + state_text[0] + "剩余座位] = [" + cart + state_text[0]+"剩余座位]-1"\
" where 航程号=?"
cursor.execute(sql_1, self.num)
cursor.commit()
print('update complete')
reply = QMessageBox.information(self,
"消息框标题",
"购票成功",
QMessageBox.Yes | QMessageBox.No)
else:
reply = QMessageBox.warning(self,
"消息框标题",
"没有余票!",
QMessageBox.Yes | QMessageBox.No)
conn.close()
def remain_tacket(self):
tacket_num = ()
conn = pyodbc.connect(
'DRIVER={SQL SERVER NATIVE CLIENT 10.0};SERVER=127.0.0.1;DATABASE=air;UID=sa;PWD=123456')
if (self.state == 0):
sql = "select *"\
" from 飞行计划安排 inner join 航班 on 飞行计划安排.航班编号 = 航班.航班编号"\
" inner join 机场 on 机场代码 = 出发机场代码"\
" inner join 机场 as b on b.机场代码 = 到达机场代码"\
" where 航程号 = ?"
cursor = conn.cursor()
result = (cursor.execute(sql, self.num).fetchall())
tacket_num = result[0][6:9]
if (self.state == 1):
sql = "select *"\
" from 飞行计划安排 inner join 航班 on 飞行计划安排.航班编号 = 航班.航班编号"\
" inner join 机场 on 机场代码 = 出发机场代码"\
" inner join 机场 as b on b.机场代码 = 经停机场代码"\
" where 航程号 = ?"
cursor = conn.cursor()
result = (cursor.execute(sql, self.num).fetchall())
tacket_num = result[0][12:15]
if (self.state == 2):
sql = "select *"\
" from 飞行计划安排 inner join 航班 on 飞行计划安排.航班编号 = 航班.航班编号"\
" inner join 机场 on 机场代码 = 出发机场代码"\
" inner join 机场 as b on b.机场代码 = 经停机场代码"\
" where 航程号 = ?"
cursor = conn.cursor()
result = (cursor.execute(sql, self.num).fetchall())
tacket_num = result[0][9:12]
kind = {}
kind['经济舱'] = 0
kind['商务舱'] = 1
kind['头等舱'] = 2
cart = self.com_class.currentText()
tacket = tacket_num[kind[cart]]
if (tacket is None):
tacket = 0
self.remain_num.setText(str(tacket))
conn.close()
|
11,244 | 30199b27f4c167b5e9cadb215b796af4a40b5a0c | import cv2
import numpy as np
print("[CUDA TEST] Check CUDA enable device")
count = cv2.cuda.getCudaEnabledDeviceCount()
print("CUDA enable device count : %s \n" % count)
print("[CUDA TEST] test CUDA upload & download")
npMat = (np.random.random((128, 128, 3)) * 255).astype(np.uint8)
cuMat = cv2.cuda_GpuMat()
cuMat.upload(npMat)
print( "Uploaded & Downloaded Matrix is close : %s \n" % np.allclose(cuMat.download(), npMat))
print("[CUDA TEST] test CUDA interoperability")
npMat = (np.random.random((128, 128, 3)) * 255).astype(np.uint8)
cuMat = cv2.cuda_GpuMat()
cuMat.upload(npMat)
print("Upload pointer:", cuMat.cudaPtr())
stream = cv2.cuda_Stream()
print("CUDA stream pointer:", stream.cudaPtr()) |
11,245 | f447e26f1dcc23acd18908bedf878038ee002f14 | import requests
import pandas as pd
from lxml import etree
import time
url_pa = 'https://search.51job.com/list/000000,000000,0000,00,9,99,%25E6%2595%25B0%25E6%258D%25AE,2,'
url_end = '.html?lang=c&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='
for i in range(1, 1000):
print('正在爬取第', i, '页数据')
url = url_pa + str(i) + url_end
web = requests.get(url)
web.encoding = 'GBK'
dom = etree.HTML(web.text)
job_name = dom.xpath('//div[@class="dw_table"]/div[@class="el"]//p/span/a[@target="_blank"]/@title')
company_name = dom.xpath('//div[@class="dw_table"]/div[@class="el"]//span[@class="t2"]/a[@target="_blank"]/@title')
address = dom.xpath('//div[@class="dw_table"]/div[@class="el"]//span[@class="t3"]/text()')
salary_mid = dom.xpath('//div[@class="dw_table"]/div[@class="el"]//span[@class="t4"]') # 部分招聘信息未公布薪资,故需要做进一步处理
salary = [i.text for i in salary_mid]
release_time = dom.xpath('//div[@class="dw_table"]/div[@class="el"]//span[@class="t5"]/text()')
href = dom.xpath('//div[@class="dw_table"]/div[@class="el"]//p/span/a[@target="_blank"]/@href')
JobDes = []
CompanyType = []
NumberStaff = []
Industry = []
for i in range(len(href)):
web_sub = requests.get(href[i])
web_sub.encoding = 'GBK'
dom_sub = etree.HTML(web_sub.text)
job_des = dom_sub.xpath('//div[@class="tCompany_main"]//div[@class="bmsg job_msg inbox"]/p/text()')
company_type = dom_sub.xpath('//div[@class="tCompany_sidebar"]//div[@class="com_tag"]/p[1]/@title')
number_staff = dom_sub.xpath('//div[@class="tCompany_sidebar"]//div[@class="com_tag"]/p[2]/@title')
industry_ = dom_sub.xpath('//div[@class="tCompany_sidebar"]//div[@class="com_tag"]/p[3]/@title')
JobDes.append(job_des)
CompanyType.append(company_type)
NumberStaff.append(number_staff)
Industry.append(industry_)
time.sleep(1)
da = pd.DataFrame()
da['岗位名'] = job_name
da['公司名'] = company_name
da['工作地点'] = address
da['工资'] = salary
da['发布时间'] = release_time
da['公司类型'] = CompanyType
da['公司人数'] = NumberStaff
da['行业'] = Industry
da['工作描述'] = JobDes
try:
da.to_csv('job_info.csv', mode='a+', encoding='GBK', header=None)
except:
print('当页数据写入失败, 跳转到下页继续爬取。')
time.sleep(1)
|
11,246 | f703a21f4d54bd7bd79bfbe22b6dec4b146f4ffb | import tensorflow as tf
import numpy as np
"""
https://www.jianshu.com/p/52e7ae04cecf
保存模型和加载模型
这种方法不方便的在于,在使用模型的时候,必须把模型的结构重新定义一遍,然后载入对应名字的变量的值。
"""
def save():
W = tf.Variable([[1, 1, 1], [2, 2, 2]], dtype=tf.float32, name="w")
b = tf.Variable([0, 1, 2], dtype=tf.float32, name="b")
print(W.shape)
print(b.shape)
init = tf.initialize_all_variables()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
# 保存模型
save_path = saver.save(sess, "save/model.ckpt")
# 加载模型
# saver.restore(sess, "save/model.ckpt")
def load():
W = tf.Variable(tf.truncated_normal(shape=(2, 3)), dtype=tf.float32, name='w')
b = tf.Variable(tf.truncated_normal(shape=(3,)), dtype=tf.float32, name='b')
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, "save/model.ckpt")
def main():
save()
# load()
if __name__ == '__main__':
main()
|
11,247 | f9e4705f9bca3bfc29e8619b6be83e76ab334cb5 | """
@ Filename: AClassifier.py
@ Author: ffcccc
@ Create Date: 2019-11-07
@ Update Date: 2019-11-07
@ Description: Classification base class
"""
import numpy as np
import pickle
import preProcess
class aClassifier:
def __init__(self):
self.prediction = None
self.probability = None
'''
Function: accuracy
Description: show detection result
Input: test_data dataType: ndarray description: data for test
test_label dataType: ndarray description: labels of test data
Output: accuracy dataType: float description: detection accuarcy
'''
def accuracy(self, test_label):
# test_label = np.expand_dims(test_label, axis=1)
# prediction = self.prediction
# accuarcy = sum(prediction == test_label)/len(test_label)
local_accuracy = preProcess.calculateAccuracy(test_label, self.prediction)
# calculateAccuracy(test_label, self.prediction):
return local_accuracy |
11,248 | 726fbd2056797cabedcef594665a703f8236f44b | import os
import platform
__version__ = "1.5.6"
__author__ = "Hao-Wei Lee"
if platform.system() == "Windows":
SEP = "\\"
STDNUL = "NUL"
else:
SEP = "/"
STDNUL = "/dev/null"
ROOT_DIR = SEP.join(os.path.dirname(os.path.realpath(__file__)).split(SEP)[:-1])
def get_path(*names):
return "{}{}{}".format(ROOT_DIR, SEP, SEP.join(names))
|
11,249 | 6d7e796ae1fc9d0a0f4c9924e63d09ea60bea207 | import sys
import time
import random
def get(type):
# random.seed(time.time() + 64738)
rng = random.SystemRandom(time.time() * 64738)
sort = random.SystemRandom(time.time() * 64738)
if type == 'dick':
rng.shuffle(DICK)
return sort.choice(DICK)
elif type == 'iching':
linecount = ["hello"]
lineplace = 8
for g in range (6):
rng.shuffle(ICHING)
coin3 = sort.choice(ICHING)
if coin3 == 1 :
linecount.append(lineplace);
lineplace = lineplace-1
return linecount
ICHING = [1, 1, 1, 0, 0]
DICK = [
"You have herpes.",
"It is small and insignificant like the rest of your life.",
"No one cares about your incher.",
"I advise you to keep the lights off and substitute for a strap-on instead.",
"It's practically an innie.",
"The only thing you'll successfully have sex with is a pasta strainer.",
"Your sex reassignment surgery will be a breeze.",
"Your dick is so ugly it cries itself to sleep at night.",
"It looks like a California raisin."
]
|
11,250 | 09b6d774962b521b685276ff889f5447a838f9e9 | #by zhuxing
import datetime
def do_telnet(Host, username, password):
import telnetlib
tn = telnetlib.Telnet(Host)
tn.set_debuglevel(2)
tn.read_until(‘Username:’)
tn.write(username + ‘\n’)
tn.read_until(‘Password:’)
tn.write(password + ‘\n’)
tn.read_until(‘switch#’)
tn.write(‘sh run ‘ + ‘\n’)
spacetime = 0
while spacetime <= 500:
tn.write(” “)
spacetime = spacetime + 1
msg=tn.read_until(“switch# “)
when = datetime.datetime.now().strftime(“%Y%m%d”)
path = when+”_swconfig.txt”
f = file(path,”wb”)
f.write(msg)
f.close()
tn.close() # tn.write(‘exit\n’)
if __name__==’__main__’:
Host = ‘switch ip address’
username = ‘username’
password = ‘password’
do_telnet(Host, username, password)
|
11,251 | 5d4e1899ca8236348b34cf693fa9b4c32a21c0e1 | import pip
def install(package):
if hasattr(pip, 'main'):
pip.main(['install', package])
else:
pip._internal.main(['install', package])
# Example
if __name__ == '__main__':
install('requests')
install('collections')
install('beautifulsoup4')
install('nltk')
|
11,252 | 0e816a8f75ad8f45591c5c84302160ee154293a8 | __all__ = [
'ClothesPeg',
]
from .peg import ClothesPeg
|
11,253 | b324c161b9a1f63dabbaf9e352dd17a4b312c9b6 |
#03 - Modify the previous program such that only the users Alice and Bob are greeted with their names.
print("Please, insert your name:")
name = input()
if name.lower() == "alice" or name.lower() == "bob":
print("Welcome,", name)
|
11,254 | bb4d85ea5a87f934441dc01085380d6c2e74ec13 | # ! Author tuhinsaud@tuhinsaud
# version: 1
import os
from io import BytesIO
from PIL import ImageTk, Image as PILImage
import tkinter as tk
from tkinter import *
from tkinter import ttk, filedialog, messagebox
import database_helper as db_helper
from show_products_panel import *
from update_window import update_window_frame
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# print(os.path.expanduser("~"))
root = Tk()
root.title("Shop Management System")
root.geometry("600x400")
note_book = ttk.Notebook(root)
note_book.pack(fill=tk.BOTH)
# TODO:Show Products Starts
show_products_frame = ttk.Frame(note_book)
note_book.add(show_products_frame, text="Show Products", sticky="nesw")
show_products_tree_view_headers = (
"Name", "Price", "Category", "Created at")
show_products_tree = custom_tree_view(
show_products_frame, *show_products_tree_view_headers)
def show_all_data():
global show_products_tree
for i in show_products_tree.get_children():
show_products_tree.delete(i)
for data in db_helper.all_products():
show_products_tree.insert(
"", "end", value=data[1:len(data)], text=data[0])
show_all_data()
selected_item_id = None
selected_item_values = None
def on_select(event):
global show_products_tree
global selected_item_id
global selected_item_values
for idx in event.widget.selection():
selected_item_id = show_products_tree.item(idx)['text']
selected_item_values = show_products_tree.item(idx)['values']
show_products_tree.bind("<<TreeviewSelect>>", on_select)
def update_item_handler():
global root
global selected_item_id
global selected_item_values
if selected_item_id is None or selected_item_values is None:
return
else:
# update_window = tk.Toplevel(root)
# update_window.title("Update for {}".format(selected_item_values[0]))
# update_window.geometry("600x400")
# update_window.grab_set()
update_window_frame(root, selected_item_id,
selected_item_values, show_all_data)
selected_item_values = None
selected_item_id = None
update_product_btn = Button(
show_products_frame, text="UPDATE", bg="GREEN", fg="WHITE", command=update_item_handler)
update_product_btn.pack()
def delete_item_handler():
global selected_item_id
if selected_item_id is None:
return
else:
db_helper.delete_product(selected_item_id)
show_all_data()
delete_product_btn = Button(
show_products_frame, text="DELETE", bg="RED", fg="WHITE", command=lambda: delete_item_handler())
delete_product_btn.pack()
# TODO:Show Products Ends
# TODO: ADD Products
# This for category popup
category = ("Laptop", "Desktop", "Mobile")
global_product_data = {
"name": None,
"price": 0,
"image": None,
"category_name": category[0]
}
product_frame = ttk.Frame(note_book)
note_book.add(product_frame, text="Add Product")
product_name_label = Label(product_frame, text="Name")
product_name_label.grid(row="0", column="0", pady="5")
product_name_txt = Entry(product_frame)
product_name_txt.grid(row="0", column="1", pady="5")
product_price_label = Label(product_frame, text="Price")
product_price_label.grid(row="1", column="0", pady="5")
product_price_txt = Entry(product_frame)
product_price_txt.grid(row="1", column="1", pady="5")
def open_file_selector():
root.filename = filedialog.askopenfilename(title="select Image", filetypes=(
("jpg files", "*.jpg"), ("png files", "*.png")))
# image = ImageTk.PhotoImage(PILImage.open(root.filename))
# # image_path_label = Label(add_product_frame, text=root.filename)
# image_path_label = Label(add_product_frame, image=image, height=30, width=30)
# image_path_label.grid(row="2", column="1", columnspan="3", pady="5")
if root.filename:
global_product_data["image"] = root.filename
image_path_label = Label(product_frame, text=root.filename)
image_path_label.grid(row="2", column="1", pady="5")
product_image_btn = Button(
product_frame, text="Select image.", command=open_file_selector)
product_image_btn.grid(row="2", column="0", pady="5")
# TODO:Category start
category_variable = StringVar()
category_variable.set(category[0])
product_category_label = Label(product_frame, text="Category")
product_category_label.grid(row="3", column="0", pady="5")
product_category_selection = OptionMenu(
product_frame, category_variable, *category)
product_category_selection.grid(row="3", column="1", pady="5")
# TODO:Category end
def clear_form_data():
global product_name_txt
global product_price_txt
global category_variable
product_name_txt.delete(0, "end")
product_price_txt.delete(0, "end")
category_variable.set(category[0])
def saveData():
global_product_data["name"] = str(product_name_txt.get())
if len(global_product_data["name"]) == 0:
messagebox.showerror("Error", "Name must not be empty.")
return
temp_price = str(product_price_txt.get()).replace(" ", "")
# if not re.search('[+-]?[0-9]+\.[0-9]+', temp_price):
if not re.search('[0-9]+\.?[0-9]*', temp_price):
messagebox.showerror("Error", "Invalid price.")
return
else:
global_product_data["price"] = float(temp_price)
global_product_data["category_name"] = category_variable.get()
db_helper.insert_product(**global_product_data)
clear_form_data()
show_all_data()
note_book.select(0)
insert_product_btn = Button(
product_frame, text="Save", command=lambda: saveData())
insert_product_btn.grid(row="7", column="0", columnspan="2", pady="5")
# TODO:Add Product end
root.mainloop()
|
11,255 | 2c3b64a137617b3d8835dcdb3bc08f5ffd352649 | class RollingHash:
def __init__(self, s):
b1, b2 = 1007, 2009
self.mod1, self.mod2 = 10**9+7, 10**9+9
self.size = len(s)
self.string = s
self.hash1 = self.make_hashtable(b1, self.mod1)
self.hash2 = self.make_hashtable(b2, self.mod2)
self.pow1 = self.make_powtable(b1, self.mod1)
self.pow2 = self.make_powtable(b2, self.mod2)
def make_hashtable(self, B, MOD):
hashtable = [0] * (self.size+1)
for i in range(1, self.size+1):
hashtable[i] = (hashtable[i-1] * B + ord(self.string[i-1]))%MOD
return hashtable
def make_powtable(self, B, MOD):
power = [0] * (self.size+1)
power[0] = 1
for i in range(1, self.size+1):
power[i] = (B * power[i-1])%MOD
return power
def get_hash(self, l, length):
r = length+l-1
l -= 1
h1 = (self.hash1[r]-self.hash1[l]*self.pow1[r-l])%self.mod1
h2 = (self.hash2[r]-self.hash2[l]*self.pow2[r-l])%self.mod2
return (h1, h2)
from collections import defaultdict
def solve1():
N = int(input())
S = input()
size = len(S)
hash_table = RollingHash(S)
ans = 0
for i in range(1, N+1):
for j in range(i+1, N+1):
if S[i-1]!=S[j-1]:continue
left = 1
right = min(j-i, size-j+1)+1
while right-left>1:
m = (left+right)//2
if hash_table.get_hash(i, m)==hash_table.get_hash(j, m):
left = m
else:
right = m
ans = max(ans, left)
print(ans)
def solve2():
N = int(input())
S = input()
size = len(S)
hash_table = RollingHash(S)
def check(m):
d = defaultdict(lambda :10**20)
for i in range(1, size-m+2):
h = hash_table.get_hash(i, m)
d[h] = min(d[h], i)
if (i-d[h])>=m:
return True
return False
left, right = 0, size//2 + 1
while right-left>1:
m = (right+left)//2
if check(m):
left = m
else:
right = m
print(left)
if __name__ == "__main__":
# solve1()
solve2()
|
11,256 | e141ac58c84295bbbd91a089c9a895ad389ee1fb | # coding = utf-8
'''
@author = super_fazai
@File : demo1.py
@Time : 2017/8/9 17:36
@connect : superonesfazai@gmail.com
'''
import threading
import time
class MyThread(threading.Thread):
# 重写 构造⽅法
def __init__(self,num,sleepTime):
threading.Thread.__init__(self)
self.num = num
self.sleepTime = sleepTime
def run(self):
self.num += 1
time.sleep(self.sleepTime)
print('线程(%s),num=%d'%(self.name, self.num))
if __name__ == '__main__':
mutex = threading.Lock()
t1 = MyThread(100,5)
t1.start()
t2 = MyThread(200,1)
t2.start()
'''
测试结果:
线程(Thread-2),num=201
线程(Thread-1),num=101
''' |
11,257 | 6e5b3c2aebb562e048e2918e9d2fa2f5ba23fedb | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-03-03 11:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('testapp', '0006_auto_20190303_1105'),
]
operations = [
migrations.CreateModel(
name='Buy1',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Medicine', models.CharField(max_length=30)),
('Company_Name', models.CharField(max_length=30)),
('Number_of_Tablet', models.IntegerField()),
('Location', models.CharField(max_length=30)),
('Email', models.EmailField(max_length=254)),
('Pincode', models.IntegerField()),
('Phoneno', models.CharField(max_length=10)),
('price', models.IntegerField()),
('quantity', models.IntegerField()),
],
),
migrations.CreateModel(
name='Sell1',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Medicine', models.CharField(max_length=30)),
('Name', models.CharField(max_length=30)),
('Location', models.CharField(max_length=40)),
('Phoneno', models.CharField(max_length=10)),
('quantity', models.IntegerField()),
('Email', models.EmailField(max_length=254)),
('Pincode', models.IntegerField()),
('Batch_No', models.CharField(max_length=30)),
('Total_Amount', models.IntegerField()),
],
),
migrations.DeleteModel(
name='Buy',
),
migrations.DeleteModel(
name='Sell',
),
]
|
11,258 | 2dba51a6739827ce25fd0811c22852e3c44c289a | import time
kvad = []
for i in xrange(1,16000+1):
kvad.append(i*i)
kv = []
for i in xrange(2000000):
kv.append(i*i)
kvad_s = set(kv)
del kv
def daili_net(x,y,z):
if x+y in kvad_s and x-y in kvad_s and x+z in kvad_s and \
x-z in kvad_s and y+z in kvad_s and y-z in kvad_s:
return True
def danet(y,z):
if (y+z) in kvad_s and (y-z) in kvad_s:
return True
return False
try:
for i in xrange(16000):
#print i
z = kvad[i]
for j in xrange(i+1,16000):
y = kvad[j]
if danet(y,z):
print "41"
raise StopIteration
for m in xrange(j+1,1600):
x = kvad[m]
if daili_net(x,y,z):
print x,y,z
raise StopIteration
except StopIteration:
print "vishli iz vseh", i, j , k
|
11,259 | 0efedcb7e6a230d23dc14bfd8714e52a54799d03 | def prime(pStart=2,pEnd=20):
a = []
for i in range(pStart,pEnd+1):
for j in range(2,i+1):
if(i%j == 0 and i != j):
break;
elif(i == j):
a.append(i)
for i in range(0,len(a)-1):
print("{} ".format(a[i]),end='')
print("{}".format(a[len(a)-1]))
M = int(input())
N = int(input())
prime(M,N) |
11,260 | d84e022b4cf1597618ff98c161eb0476ef5f13ff | #!/usr/bin/env python
# coding: utf-8
# # Background
#
# - 1 The adjacent 3 tabs contain a data dump of search strings used by EXP clients to access relevant content available on Gartner.com for the months of August, September and October in the year 2018. Every row mentions if the EXP client is "Premium" or not, Persona (that was used for data extraction for EXP clients from main database), day on which the search string was used and finally the search string. In total there are 68544 rows of data available across all the months.
# ## Task 1
#
# - 2 Clean the dataset using standard text cleaning steps and process the data to allow for the following analysis.
# - 3 Identify the most popular topics being searched for by EXP clients and plot the top 10 topics by their frequency of occurrence.
#
# - 4 Report on the volume growth of these topics over August, September and October.
# ## Task 2
#
# - 5 Used the cleaned dataset from Step 2 and process your dataset for the following analysis.
# - 6 Use the concept of Named Entity Recognition in your code to identify a list of geographies and organizations being mentioned in the search terms.
#
# - 7 Plot the geographies and organizations by their frequency of occurrence (count 1 mention of a geography, if the same geography is mentioned more than once in the same search string). If you can do it for the mention of "technologies" such as ai, analytics etc. then it will be a plus. Any useful trends observed in these mentions of geographies, organizations and technologies should be plotted and presented.
#
# # Final Output & Next Steps
#
# - 8 "Final output of the exercise should include
#
# *1. 3 Code files- 1 used for data cleaning and 2 used for each of the 2 tasks (with data processing and data analysis). Please comment your code appropriately. You will be evaluated for properly structuring your code and for building checks and balances in your analysis- which should be included in your code as well.*
#
# *2. If some data visualization tool such as Tableau/PowerBI is used for presentation of the plots in the panel round (if selected) then it will be considered a plus for the candidate. PPT presentation is acceptable though. The following visualizations are required- *
#
# **- Please prepare 1-2 slides to explain your data cleaning and processing steps, 1-2 slides to display the results of Task 1 (include the methodology used for completing the task), 1-2 slides to display the result of Task 2 (include the methodology used for completing the task), 1-2 slides on what other analysis is possible on the data set including the expected insights from those (for this you will need to mention the preferred methodology for text analysis). "**
# - 9 You will be given a time limit of 3 Days from the time this test is given, to prepare the output. The candidates should upload the output docs- Dashboard/PPT & their 3 code files in a G-drive link and send them across to the assigned recruiter.
# - 10 If your output gets selected, you will be asked to present your findings & approach to our panel of experts who would cross question you on your analysis.
#
# In[26]:
import numpy as np
import pandas as pd
#for text processing
import re
import string
import nltk
from nltk.corpus import stopwords
from textblob import Word
#calculation of time
from time import time
##pretty print
import pprint
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
from gensim.corpora import Dictionary
# Build LDA model
from gensim.models.ldamulticore import LdaMulticore
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
# spacy
import spacy
# Plotting tools
import pyLDAvis
import pyLDAvis.gensim # don't skip this
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('max_colwidth', -1)
# #### Merge all 3 sheets
# In[2]:
# from pandas import ExcelWriter
# from pandas import ExcelFile
xls = pd.ExcelFile('data.xlsx')
df1 = pd.read_excel(xls, sheet_name='Aug')
df2 = pd.read_excel(xls, sheet_name='Sept')
df3 = pd.read_excel(xls, sheet_name='Oct')
# In[3]:
df = pd.concat([df1,df2,df3] , ignore_index=True)
# ## Inspect Text field
# In[4]:
df.head()
# In[5]:
df.info()
# In[6]:
df.isnull().sum()
# In[7]:
#fetch missing values of a column
df[df["Query Text"].isnull()]
# In[8]:
#drop all the rows which have NaN in Query Text
df = df.dropna(how='any',axis=0)
# In[9]:
df.isnull().sum()
# In[10]:
df.drop_duplicates(subset ="Query Text",
keep = 'last', inplace = True)
# In[11]:
df.info()
# In[12]:
# check the length of documents
document_lengths = np.array(list(map(len, df['Query Text'].str.split(' '))))
print("The average number of words in a document is: {}.".format(np.mean(document_lengths)))
print("The minimum number of words in a document is: {}.".format(min(document_lengths)))
print("The maximum number of words in a document is: {}.".format(max(document_lengths)))
# In[13]:
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 1)))
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 2)))
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 3)))
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 4)))
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 5)))
# ## Task 1
#
# ### Sub-task 2 : Text pre-processing
# In[14]:
def text_preprocessing(data):
#convert text to lower-case
data['processed_text'] = data['Query Text'].apply(lambda x:' '.join(x.lower() for x in x.split()))
#remove punctuations, unwanted characters
data['processed_text_1']= data['processed_text'].apply(lambda x: "".join([char for char in x if char not in string.punctuation]))
#remove numbers
data['processed_text_2']= data['processed_text_1'].apply(lambda x: re.sub('[0-9]+', ' ' , x))
#remove stopwords
stop = stopwords.words('english')
data['processed_text_3']= data['processed_text_2'].apply(lambda x:' '.join(x for x in x.split() if not x in stop))
#lemmatization
data['processed_text_4']= data['processed_text_3'].apply(lambda x: " ".join([Word(word).lemmatize() for word in x.split()]))
# remove all single characters
data['processed_text_5'] = data['processed_text_4'].apply(lambda x: re.sub(r'\s+[a-zA-Z]\s+', ' ', x))
#create a final text field to work on
data['final_text'] = data['processed_text_5']
# In[15]:
#pre-processing or cleaning data
text_preprocessing(df)
df.head()
# In[16]:
#create tokenized data for LDA
df['final_tokenized'] = list(map(nltk.word_tokenize, df.final_text))
df.head()
# ## LDA training
# In[17]:
# Create Dictionary
id2word = corpora.Dictionary(df['final_tokenized'])
texts = df['final_tokenized']
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
# View
print(corpus[:1])
# In[18]:
id2word[0]
# In[19]:
# Human readable format of corpus (term-frequency)
[[(id2word[id], freq) for id, freq in cp] for cp in corpus[:1]]
# In[20]:
get_ipython().run_cell_magic('time', '', "\nnum_topics = 10\n\nlda_model = LdaMulticore(corpus=corpus,\n id2word=id2word,\n num_topics=num_topics, \n workers=3, #CPU cores\n random_state=100,\n chunksize=400, #Number of documents to be used in each training chunk.\n passes=40, #Number of passes through the corpus during training.\n alpha='asymmetric',\n per_word_topics=True)")
# In[27]:
# View the topics in LDA model
pp.pprint(lda_model.print_topics())
doc_lda = lda_model[corpus]
# #### What is topic coeherence
#
# https://rare-technologies.com/what-is-topic-coherence/
#
# What exactly is this topic coherence pipeline thing? Why is it even important? Moreover, what is the advantage of having this pipeline at all? In this post I will look to answer those questions in an as non-technical language as possible. This is meant for the general reader as much as a technical one so I will try to engage your imaginations more and your maths skills less.
#
# Imagine that you get water from a lot of places. The way you test this water is by providing it to a lot of people and then taking their reviews. If most of the reviews are bad, you say the water is bad and vice-versa. So basically all your evaluations are based on reviews with ratings as bad or good. If someone asks you exactly how good (or bad) the water is, you blend in your personal opinion. But this doesn’t assign a particular number to the quality of water and thus is only a qualitative analysis. Hence it can’t be used to compare two different sources of water in a definitive manner.
#
# Since you are a lazy person and strive to assign a quantity to the quality, you install four different pipes at the end of the water source and design a meter which tells you the exact quality of water by assigning a number to it. While doing this you receive help from a lot of wonderful people around you and therefore you are successful in installing it. Hence now you don’t need to go and gather hundred different people to get their opinion on the quality of water. You can get it straight from the meter and this value is always in accordance with the human opinions.
#
# The water here is the topics from some topic modelling algorithm. Earlier, the topics coming out from these topic modelling algorithms used to be tested on their human interpretability by presenting them to humans and taking their input on them. This was not quantitative but only qualitative. The meter and the pipes combined (yes you guessed it right) is the topic coherence pipeline. The four pipes are:
#
# Segmentation : Where the water is partitioned into several glasses assuming that the quality of water in each glass is different.
# Probability Estimation : Where the quantity of water in each glass is measured.
# Confirmation Measure : Where the quality of water (according to a certain metric) in each glass is measured and a number is assigned to each glass wrt it’s quantity.
# Aggregation : The meter where these quality numbers are combined in a certain way (say arithmetic mean) to come up with one number.
# And there you have your topic coherence pipeline! There are surely much better analogies than this one but I hope you got the gist of it.
# In[28]:
get_ipython().run_cell_magic('time', '', "\n# Compute Perplexity\nprint('\\nPerplexity: ', lda_model.log_perplexity(corpus)) # a measure of how good the model is. lower the better.\n\n# Compute Coherence Score\ncoherence_model_lda = CoherenceModel(model=lda_model, texts=df['final_tokenized'], dictionary=id2word, coherence='c_v')\ncoherence_lda = coherence_model_lda.get_coherence()\nprint('\\nCoherence Score: ', coherence_lda)")
# ## Top 10 topics by frequency of occurence
#
#
# In[29]:
get_ipython().run_cell_magic('time', '', '\n# Visualize the topics\n\npyLDAvis.enable_notebook()\nvis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word)\nvis')
# #### How to find the optimal number of topics for LDA?
#
# My approach to finding the optimal number of topics is to build many LDA models with different values of number of topics (k) and pick the one that gives the highest coherence value.
#
# Choosing a ‘k’ that marks the end of a rapid growth of topic coherence usually offers meaningful and interpretable topics. Picking an even higher value can sometimes provide more granular sub-topics.
#
# If you see the same keywords being repeated in multiple topics, it’s probably a sign that the ‘k’ is too large.
#
# The compute_coherence_values() (see below) trains multiple LDA models and provides the models and their corresponding coherence scores.
# If the coherence score seems to keep increasing, it may make better sense to pick the model that gave the highest CV before flattening out. This is exactly the case here.
#
# So for further steps I will choose the model with 20 topics itself.
# ## Sub-Task2 Named Entity Recognition
# In[34]:
from IPython.display import Image
Image("img/picture.png")
# In[35]:
import spacy
from spacy import displacy
from collections import Counter
import en_core_web_sm
nlp = en_core_web_sm.load()
# In[36]:
#removing duplicates
final_text = df['final_text'].unique()
print('Number of Query Text: ', len(final_text))
# In[37]:
corpus = list(nlp.pipe(final_text))
# In[38]:
# Looking at number of times each ent appears in the total corpus
# nb. ents all appear as Spacy tokens, hence needing to cast as str
from collections import defaultdict
all_ents = defaultdict(int)
for i, doc in enumerate(corpus):
#print(i,doc)
for ent in doc.ents:
all_ents[str(ent)] += 1
#print(ent)
print('Number of distinct entities: ', len(all_ents))
# In[39]:
# labels = [x.label_ for x in corpus.ents]
# Counter(labels)
ent_label = []
ent_common = []
for i, doc in enumerate(corpus):
for ent in doc.ents:
ent_label.append(ent.label_)
ent_common.append(ent.text)
print("Unique labels for entities : ", Counter(ent_label))
print("Top 3 frequent tokens : ", Counter(ent_common).most_common(3))
# In[40]:
sentences = []
for i, doc in enumerate(corpus):
for ent in doc.sents:
sentences.append(ent)
print(sentences[0])
# In[41]:
# Most popular ents
import operator
sorted_ents = sorted(all_ents.items(), key=operator.itemgetter(1), reverse=True)
sorted_ents[:30]
# ### List of geographies and organizations being mentioned in the search terms.
# In[52]:
for i, doc in enumerate(corpus):
for ent in doc.ents:
if ent.label_ == 'ORG' or ent.label_ == 'GPE':
print(ent.text, ent.start_char, ent.end_char, ent.label_)
|
11,261 | 43ef305674c79110d5d416fa8f99f813bad3ef77 | from pyaudio import PyAudio,paInt16
import numpy as np
class AudioStream(object):
def __init__(self, sample_rate = 16000, sample_channel = 1, sample_width = 2, sample_time = 0.025):
samplebuffer = int(round(sample_rate * sample_channel * sample_width * sample_time))
pa = PyAudio()
self.stream_in = pa.open(format = paInt16, channels = sample_channel, rate = sample_rate, input = True, frames_per_buffer = samplebuffer)
self.stream_out = pa.open(format = paInt16, channels = sample_channel, rate=sample_rate, output = True)
self.samplepoints = int(round(sample_rate*sample_time))
def read_frame(self):
audio_string = self.stream_in.read(self.samplepoints)
audio_array = np.fromstring(audio_string, dtype = np.short)
return audio_array
def write_frame(self, audio_array):
audio_string = audio_array.tostring()
self.stream_out.write(audio_string)
|
11,262 | 4fb9163e4a6b597b97c1a7fab2bd358dbb16ba63 | from django.contrib import admin
from .models import TalkProposal, AudienceChoice
class AudienceChoiceModelAdmin(admin.ModelAdmin):
model = AudienceChoice
list_display = ("label", "created")
ordering = ("label", )
class TalkProposalModelAdmin(admin.ModelAdmin):
model = TalkProposal
list_display = ("title", "created", "name", "email", "talk_type", "audience", "description")
ordering = ("-created", )
admin.site.register(AudienceChoice, AudienceChoiceModelAdmin)
admin.site.register(TalkProposal, TalkProposalModelAdmin)
|
11,263 | 5539203f0756b2708cf40a0a50d86ad727797310 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import binascii
import time
import traceback
import serial
import ind903_reader.ind903_packet as ind903Packet
from reader import reader
Ind903Packet = ind903Packet.Ind903Packet;
class DeviceException(Exception):
pass
class ReadException(Exception):
pass
class Ind903Reader(reader.Reader):
def __init__(self, portName, baud=115200, readerAddress=b'\x01', timeout=None):
"""
:param portName: serial port where the device is mounted (e.g., '/dev/ttyUSB0')
:param baud: (int) baud rate for communications over the serial port.
The baud rates are 9600bps、19200bps、38400bps、115200bps. The default baud rate is 115200bps.
:param readerAddress: (byte) the address of the reader (\x01 by default)
:param timeout: (float) Read timeout value in seconds. (timeout=None wait forever, timeout=0 non-blocking mode, timeout = x (seconds)
The structure of the Package [ Head | Length | Address | Cmd | Data[0…N] | Check ]
"""
self.portName = portName
self.address = readerAddress
self.baud = baud
self.timeout = timeout
def initialize(self):
self._serial = serial.Serial(self.portName, self.baud)
if self._serial is None:
raise DeviceException('No device found, please check the port name (i.e., python -m serial.tools.list_ports)')
def write(self, data):
"""
:param data: Data to send through the serial port. This should be of type bytes.
:return: Number of bytes written.
:rtype: int
:raises SerialTimeoutException: In case a write timeout is configured for the port and the time is exceeded.
"""
return self._serial.write(data)
def readCommand(self):
"""
Read the serial, looking for a command. If timeout is set to None, it waits until the command is received.
:return: a Ind903Packet with the packet that read from the serial.
:raises SerialException: when applied to a closed port.
"""
while (True):
time.sleep(1)
# At least a package of 4 bytes (minimum)
# [ Head | Length | Address | Data[0…N] | Check ]
if (self._serial.inWaiting()>=4):
# Gets only the first byte of the packet (it should be HEAD)
packet_header = self._serial.read(1)
if (packet_header != Ind903Packet.PACKET_HEAD):
# the next one is the length of the packet
packet_length_bytes = self._serial.read(1)
packet_length = int.from_bytes(packet_length_bytes, byteorder='big')
if (packet_length > 0):
raw_packet = b"".join([packet_header, packet_length_bytes, self._serial.read(packet_length)])
result_packet = Ind903Packet.parsePacket(raw_packet)
return (result_packet)
def doInventory(self, processCallback, antenna=b'\x01'):
"""
Process of inventory execution. After the reader is initialized, an infinite loop is executed with these tasks:
(1) the antenna set, (2) Inventory start requested, (3) Wait for several responses (stop when a control package is
received), (4) Process the response
:param processCallback(epcID, bibID): callback function to process a EPC found during inventory (bib=None).
:param antenna: byte with the address of the antenna (\x01 by default)
"""
setAntennaPacket = Ind903Packet.generatePacketSetAntenna()
startRealTimeInventoryPacket = Ind903Packet.generatePacketStartRealTimeInventory()
self.write(setAntennaPacket.packet)
print ('> ' + setAntennaPacket.toString())
receivedPacket = self.readCommand()
print ('< ' + receivedPacket.toString())
if (receivedPacket.isCommand(Ind903Packet.CMD_SET_WORKING_ANTENNA)):
# to check if is a success of failure
pass
while (True):
try:
self.write(startRealTimeInventoryPacket.packet)
print ('> ' + startRealTimeInventoryPacket.toString())
# While a control package (success/error) is not received
while (True):
receivedPacket = self.readCommand()
print ('< ' + receivedPacket.toString())
if (receivedPacket.isCommand(Ind903Packet.CMD_NAME_REAL_TIME_INVENTORY) and receivedPacket.isEndRealTimeInventory() != b'\x00'):
print(' [ end of inventory command found] ')
break # jumps out the inventory loop
# Reads EPCs
epc = receivedPacket.getTagEPCFromInventoryData();
if (int.from_bytes(epc,byteorder='big') == 0):
break # jumps out the inventory loop
epcString = binascii.hexlify(epc).decode()
print (' **** [EPC found: ' + epcString + '] ****')
processCallback(epcString, None)
except Exception as ex:
traceback.print_exc()
if __name__ == '__main__':
reader = Ind903Reader('/dev/ttyUSB0', 115200, 1000)
reader.doInventory()
|
11,264 | 2e83203c4b00fb53bcfb75138064a58a0618a8fc | from os.path import dirname, join
from setuptools import setup
from lan_presenter import __version__
with open(join(dirname(__file__), 'README.md')) as readme_file:
long_description = readme_file.read()
setup(
name='lan-presenter',
version=__version__,
description='Presenter remote control over local network connection',
long_description=long_description,
long_description_content_type='text/markdown',
author='Tibor Hári',
author_email='hartib@gmail.com',
url='https://github.com/tiborhari/lan-presenter/',
packages=['lan_presenter'],
package_data={'lan_presenter': ['index.html']},
entry_points={
'console_scripts': [
'lan_presenter=lan_presenter.__main__:main',
],
},
python_requires='>=3.6, <4',
install_requires=[
'aiohttp~=3.8.1',
'keyboard~=0.13.5',
'qrcode~=7.3.1'
],
extras_require={
'dev': [
'flake8==3.7.9',
]
},
)
|
11,265 | da47ec57dcaec311cac15a2b448732442911a824 | import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
van = Load(Filename='/SNS/CNCS/IPTS-21088/shared/autoreduce/van_273992.nxs', OutputWorkspace='van')
data_folder = '/SNS/CNCS/IPTS-20360/nexus/'
runs_1 = range(274470,274470+7, 1) #vanadium sample
iteration = 0
for this_run in runs_1:
filename = data_folder+'CNCS_{0}.nxs.h5'.format(this_run)
raw = Load(Filename=filename, OutputWorkspace=str(runs_1[iteration]))
iteration = iteration + 1
#Ei, _FMP, _FMI, T0 = GetEi(raw)
iteration = 0
for this_run in runs_1:
Ei, _FMP, _FMI, T0 = GetEi(str(runs_1[iteration]))
DgsReduction(
OutputWorkspace = str(runs_1[iteration]) + '_dgs',
SampleInputWorkspace = str(runs_1[iteration]),
SampleInputMonitorWorkspace = str(runs_1[iteration]),
EnergyTransferRange = [-0.15*Ei, Ei/200., 0.9*Ei],
SofPhiEIsDistribution = True, #this will keep the output data as a histogram
CorrectKiKf = True,
DetectorVanadiumInputWorkspace = van,
UseProcessedDetVan = True,
IncidentBeamNormalisation='ByCurrent',
)
ConvertToMD(InputWorkspace = str(runs_1[iteration]) + '_dgs', OutputWorkspace = str(runs_1[iteration]) + '_md', QDimensions = '|Q|', dEAnalysisMode = 'Direct')
iteration = iteration + 1
iteration = 0
for this_run in runs_1:
Ei, _FMP, _FMI, T0 = GetEi(str(runs_1[iteration]))
tth_max = 140. * np.pi / 180.
this_qmax = 4. * np.pi * np.sin(tth_max / 2.) / (9 / np.sqrt(Ei))
print(str(runs_1[iteration]), Ei, this_qmax)
BinMD(
InputWorkspace = str(runs_1[iteration]) + '_md',
OutputWorkspace = str(runs_1[iteration]) + '_slice',
AxisAligned = True,
AlignedDim0 = '|Q|, '+str(0.18+0.027*Ei)+', 2.5, 100',
AlignedDim1 = 'DeltaE, '+str(-0.1*Ei)+', '+str(0.9*Ei)+', 50',
)
BinMD(
InputWorkspace = str(runs_1[iteration]) + '_md',
OutputWorkspace = str(runs_1[iteration]) + '_lineV',
AlignedDim0 = '|Q|, '+str(0.18+0.027*Ei)+', 2.5, 1',
AlignedDim1 = 'DeltaE, '+str(-0.1*Ei)+', '+str(0.1*Ei)+', 30',
)
BinMD(
InputWorkspace = str(runs_1[iteration]) + '_md',
OutputWorkspace = str(runs_1[iteration]) + '_lineAll',
AlignedDim0 = '|Q|, '+str(0.18+0.027*Ei)+', {0}, 1'.format(this_qmax),
AlignedDim1 = 'DeltaE, '+str(-0.1*Ei)+', '+str(0.1*Ei)+', 30',
)
iteration = iteration + 1
mtd.importAll()
#make a plot of a slice using the mantid projection
fig, ax = plt.subplots(subplot_kw={'projection':'mantid'})
Ei, _FMP, _FMI, T0 = GetEi(_274476)
ax.set_title(str(Ei))
ax.plot(_274476_lineV, label = 'up to Q = 2.5 A-1')
ax.plot(_274476_lineAll, label = 'all Q')
ax.legend()
fig.show()
#make a plot of a slice using the mantid projection
fig, ax = plt.subplots(subplot_kw={'projection':'mantid'})
Ei, _FMP, _FMI, T0 = GetEi(_274475)
ax.set_title(str(Ei))
ax.plot(_274475_lineV, label = 'up to Q = 2.5 A-1')
ax.plot(_274475_lineAll, label = 'all Q')
ax.legend()
fig.show()
#make a plot of a slice using the mantid projection
fig, ax = plt.subplots(subplot_kw={'projection':'mantid'})
Ei, _FMP, _FMI, T0 = GetEi(_274474)
ax.set_title(str(Ei))
ax.plot(_274474_lineV, label = 'up to Q = 2.5 A-1')
ax.plot(_274474_lineAll, label = 'all Q')
ax.legend()
fig.show()
#make a plot of a slice using the mantid projection
fig, ax = plt.subplots(subplot_kw={'projection':'mantid'})
Ei, _FMP, _FMI, T0 = GetEi(_274473)
ax.set_title(str(Ei))
ax.plot(_274473_lineV, label = 'up to Q = 2.5 A-1')
ax.plot(_274473_lineAll, label = 'all Q')
ax.legend()
fig.show()
#make a plot of a slice using the mantid projection
fig, ax = plt.subplots(subplot_kw={'projection':'mantid'})
Ei, _FMP, _FMI, T0 = GetEi(_274472)
ax.set_title(str(Ei))
ax.plot(_274472_lineV, label = 'up to Q = 2.5 A-1')
ax.plot(_274472_lineAll, label = 'all Q')
ax.legend()
fig.show()
#plt.close('all')
E_274476_lineV, I_274476_lineV, dI_274476_lineV = mantid.plots.helperfunctions.get_md_data1d(_274476_lineV,mantid.plots.helperfunctions.get_normalization(_274476_lineV)[0])
E_274476_lineAll, I_274476_lineAll, dI_274476_lineAll = mantid.plots.helperfunctions.get_md_data1d(_274476_lineAll,mantid.plots.helperfunctions.get_normalization(_274476_lineAll)[0])
E_274475_lineV, I_274475_lineV, dI_274475_lineV = mantid.plots.helperfunctions.get_md_data1d(_274475_lineV,mantid.plots.helperfunctions.get_normalization(_274475_lineV)[0])
E_274475_lineAll, I_274475_lineAll, dI_274475_lineAll = mantid.plots.helperfunctions.get_md_data1d(_274475_lineAll,mantid.plots.helperfunctions.get_normalization(_274475_lineAll)[0])
E_274474_lineV, I_274474_lineV, dI_274474_lineV = mantid.plots.helperfunctions.get_md_data1d(_274474_lineV,mantid.plots.helperfunctions.get_normalization(_274474_lineV)[0])
E_274474_lineAll, I_274474_lineAll, dI_274474_lineAll = mantid.plots.helperfunctions.get_md_data1d(_274474_lineAll,mantid.plots.helperfunctions.get_normalization(_274474_lineAll)[0])
E_274473_lineV, I_274473_lineV, dI_274473_lineV = mantid.plots.helperfunctions.get_md_data1d(_274473_lineV,mantid.plots.helperfunctions.get_normalization(_274473_lineV)[0])
E_274473_lineAll, I_274473_lineAll, dI_274473_lineAll = mantid.plots.helperfunctions.get_md_data1d(_274473_lineAll,mantid.plots.helperfunctions.get_normalization(_274473_lineAll)[0])
E_274472_lineV, I_274472_lineV, dI_274472_lineV = mantid.plots.helperfunctions.get_md_data1d(_274472_lineV,mantid.plots.helperfunctions.get_normalization(_274472_lineV)[0])
E_274472_lineAll, I_274472_lineAll, dI_274472_lineAll = mantid.plots.helperfunctions.get_md_data1d(_274472_lineAll,mantid.plots.helperfunctions.get_normalization(_274472_lineAll)[0])
E_274471_lineV, I_274471_lineV, dI_274471_lineV = mantid.plots.helperfunctions.get_md_data1d(_274471_lineV,mantid.plots.helperfunctions.get_normalization(_274471_lineV)[0])
E_274471_lineAll, I_274471_lineAll, dI_274471_lineAll = mantid.plots.helperfunctions.get_md_data1d(_274471_lineAll,mantid.plots.helperfunctions.get_normalization(_274471_lineAll)[0])
E_274470_lineV, I_274470_lineV, dI_274470_lineV = mantid.plots.helperfunctions.get_md_data1d(_274470_lineV,mantid.plots.helperfunctions.get_normalization(_274470_lineV)[0])
E_274470_lineAll, I_274470_lineAll, dI_274470_lineAll = mantid.plots.helperfunctions.get_md_data1d(_274470_lineAll,mantid.plots.helperfunctions.get_normalization(_274470_lineAll)[0])
def gaussian(x, mu, sig, scale):
return scale*np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
popt_274476_lineV, pcov_274476_lineV = curve_fit(gaussian, E_274476_lineV, I_274476_lineV, p0 = (0.00, 0.025*GetEi(_274476)[0], np.max(I_274476_lineV)) )
popt_274475_lineV, pcov_274475_lineV = curve_fit(gaussian, E_274475_lineV, I_274475_lineV, p0 = (0.00, 0.025*GetEi(_274475)[0], np.max(I_274475_lineV)) )
popt_274474_lineV, pcov_274474_lineV = curve_fit(gaussian, E_274474_lineV, I_274474_lineV, p0 = (0.00, 0.025*GetEi(_274474)[0], np.max(I_274474_lineV)) )
popt_274473_lineV, pcov_274473_lineV = curve_fit(gaussian, E_274473_lineV, I_274473_lineV, p0 = (0.00, 0.025*GetEi(_274473)[0], np.max(I_274473_lineV)) )
popt_274472_lineV, pcov_274472_lineV = curve_fit(gaussian, E_274472_lineV, I_274472_lineV, p0 = (0.00, 0.025*GetEi(_274472)[0], np.max(I_274472_lineV)) )
popt_274471_lineV, pcov_274471_lineV = curve_fit(gaussian, E_274471_lineV, I_274471_lineV, p0 = (0.00, 0.025*GetEi(_274471)[0], np.max(I_274471_lineV)) )
popt_274470_lineV, pcov_274470_lineV = curve_fit(gaussian, E_274470_lineV, I_274470_lineV, p0 = (0.00, 0.025*GetEi(_274470)[0], np.max(I_274470_lineV)) )
popt_274476_lineAll, pcov_274476_lineAll = curve_fit(gaussian, E_274476_lineAll, I_274476_lineAll, p0 = (0.00, 0.025*GetEi(_274476)[0], np.max(I_274476_lineAll)) )
popt_274475_lineAll, pcov_274475_lineAll = curve_fit(gaussian, E_274475_lineAll, I_274475_lineAll, p0 = (0.00, 0.025*GetEi(_274475)[0], np.max(I_274475_lineAll)) )
popt_274474_lineAll, pcov_274474_lineAll = curve_fit(gaussian, E_274474_lineAll, I_274474_lineAll, p0 = (0.00, 0.025*GetEi(_274474)[0], np.max(I_274474_lineAll)) )
popt_274473_lineAll, pcov_274473_lineAll = curve_fit(gaussian, E_274473_lineAll, I_274473_lineAll, p0 = (0.00, 0.025*GetEi(_274473)[0], np.max(I_274473_lineAll)) )
popt_274472_lineAll, pcov_274472_lineAll = curve_fit(gaussian, E_274472_lineAll, I_274472_lineAll, p0 = (0.00, 0.025*GetEi(_274472)[0], np.max(I_274472_lineAll)) )
popt_274471_lineAll, pcov_274471_lineAll = curve_fit(gaussian, E_274471_lineAll, I_274471_lineAll, p0 = (0.00, 0.025*GetEi(_274471)[0], np.max(I_274471_lineAll)) )
popt_274470_lineAll, pcov_274470_lineAll = curve_fit(gaussian, E_274470_lineAll, I_274470_lineAll, p0 = (0.00, 0.025*GetEi(_274470)[0], np.max(I_274470_lineAll)) )
print(GetEi(_274476)[0], popt_274476_lineV[1], popt_274476_lineAll[1])
print(GetEi(_274475)[0], popt_274475_lineV[1], popt_274475_lineAll[1])
print(GetEi(_274474)[0], popt_274474_lineV[1], popt_274474_lineAll[1])
print(GetEi(_274473)[0], popt_274473_lineV[1], popt_274473_lineAll[1])
print(GetEi(_274472)[0], popt_274472_lineV[1], popt_274472_lineAll[1])
print(GetEi(_274471)[0], popt_274471_lineV[1], popt_274471_lineAll[1])
print(GetEi(_274470)[0], popt_274470_lineV[1], popt_274470_lineAll[1])
Ei_list = [GetEi(_274476)[0], GetEi(_274475)[0], GetEi(_274474)[0], GetEi(_274473)[0], GetEi(_274472)[0], GetEi(_274471)[0], GetEi(_274470)[0]]
V_list= [popt_274476_lineV[1], popt_274475_lineV[1], popt_274474_lineV[1], popt_274473_lineV[1], popt_274472_lineV[1], popt_274471_lineV[1], popt_274470_lineV[1]]
All_list = [popt_274476_lineAll[1], popt_274475_lineAll[1], popt_274474_lineAll[1], popt_274473_lineAll[1], popt_274472_lineAll[1], popt_274471_lineAll[1], popt_274470_lineAll[1]]
plt.figure()
plt.plot(Ei_list, V_list)
plt.plot(Ei_list, All_list)
plt.show() |
11,266 | 4db7c3394f65868fdf2e90da33a24d3f12214f86 | class Book:
def __init__(self,page):
self.page=page
def __sub__(self, other):
return Book(self.page-other.page)
def __mul__(self, other):
return Book(self.page*other.page)
def __str__(self):
return str(self.page)
b=Book(25)
c=Book(10)
d=Book(5)
print(b-c-d)
print(b*c*d)
|
11,267 | df28db9d5077f7058cc6fd0ac76ec99838724b50 | #!/usr/bin/env python3 # noqa: INP001, EXE001
"""Tests for constants module."""
import pytest
import overreact as rx
from overreact import _constants as constants
def test_reference_raw_constants():
"""Ensure raw constants are close to values commonly used by the community.
Reference values were taken from the ones used by Gaussian 16
(http://gaussian.com/constants/).
"""
assert constants.bohr / constants.angstrom == pytest.approx(0.52917721092)
assert constants.atomic_mass == pytest.approx(1.660538921e-27)
assert constants.h == pytest.approx(6.62606957e-34)
assert pytest.approx(6.02214129e23) == constants.N_A
assert constants.kcal == pytest.approx(4184.0)
assert constants.hartree == pytest.approx(4.35974434e-18)
assert constants.c / constants.centi == pytest.approx(2.99792458e10)
assert constants.k == pytest.approx(1.3806488e-23)
assert rx.thermo.molar_volume(
temperature=273.15,
pressure=constants.bar,
) == pytest.approx(0.022710953)
def test_reference_conversion_factors():
"""Ensure conversion factors are close to values commonly used by the community.
Reference values were taken from the ones used by Gaussian 16
(http://gaussian.com/constants/).
"""
assert constants.eV == pytest.approx(1.602176565e-19)
assert constants.eV * constants.N_A / constants.kcal == pytest.approx(23.06, 3e-5)
assert constants.hartree * constants.N_A / constants.kcal == pytest.approx(627.5095)
assert constants.hartree / constants.eV == pytest.approx(27.2114)
assert constants.hartree * constants.centi / (
constants.h * constants.c
) == pytest.approx(219474.63)
|
11,268 | 3a61a671e61858657ec7f580862f3e60718c476c | # coding=utf-8
# time:2019-06-03
import requests
from pyquery import PyQuery as pq
def user_if_exits():
global url_list
url_list = []
with open(file='url_list.txt', mode='r', encoding='utf-8') as u:
data = u.readlines()
for line in data:
url_list.append(line.strip())
def init():
global links
url_start = 'http://www.xbiquge.la/14/14930/'
response = requests.get(url_start)
response.encoding = response.apparent_encoding
doc = pq(response.text)
links = doc('#list > dl > dd > a')
def get_want():
for link in links.items():
url = 'http://www.xbiquge.la' + link.attr.href
if url in url_list:
pass
else:
with open(file='url_list.txt', mode='a+', encoding='utf-8') as u:
u.write(url)
u.write('\n')
response = requests.get(url)
response.encoding = response.apparent_encoding
doc = pq(response.text)
title = doc('#wrapper > div.content_read > div > div.bookname > h1').text()
content = doc('#content').text()
with open(file='元尊.txt', mode='a+', encoding='utf-8') as f:
f.write(title)
f.write('\n')
f.write(content)
f.write('\n')
print('%s 抓取完成' % title)
def main():
user_if_exits()
init()
get_want()
main()
|
11,269 | 515decf1d33aff5ef900d4eaa9cb057c0251924d | # Generated by Django 3.1.2 on 2021-04-02 19:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Comuna',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Persona',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=30)),
('rut', models.CharField(max_length=15, unique=True)),
('fecha_nacimiento', models.DateField()),
('comuna', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='core.comuna')),
],
),
]
|
11,270 | 333dc09dd095a7c19e4ea286f72abadea2bb7530 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("db")
from datetime import datetime, timedelta
import sqlalchemy
from sqlalchemy import Column, Integer, String, ForeignKey, DateTime
from sqlalchemy.sql import func
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.pool import StaticPool
from sqlalchemy.ext.declarative import declarative_base
def get_user(session, user_id):
return session.query(User).filter(User.id==user_id).one_or_none()
def count_users(session):
return session.query(func.count(User.id)).scalar()
def get_user_by_telegram_id(session, telegram_id):
try:
return session.query(User).filter(User.telegram_id==telegram_id).one()
except:
logger.info("Attempted to fetch user with telegram ID {} not found".format(telegram_id))
return None
def get_balance(session, user=None, user_id=None):
if not user_id:
user_id = user.id
return session.query(func.sum(Transaction.value)).filter(Transaction.user_id==user_id).scalar() or 0
def add_transaction(session, user, value, payment_request):
trans = Transaction(user_id=user.id, value=value, payment_request=payment_request)
session.add(trans)
return trans
def count_transactions(session):
return session.query(func.count(Transaction.id)).scalar()
def add_invoice(session, user, payreq_string):
invoice = Invoice(user_id=user.id, payment_request=payreq_string)
session.add(invoice)
return invoice
def get_invoice(session, payreq_string):
for invoice in session.query(Invoice).filter(Invoice.payment_request==payreq_string).all():
return invoice.user
def set_invoice_context(session, user, payment_request, timeout_seconds):
user.invoice_context = payment_request
user.invoice_context_expiry = datetime.utcnow() + timedelta(seconds=timeout_seconds)
def get_invoice_context(session, user):
if user.invoice_context and datetime.utcnow() < user.invoice_context_expiry:
return user.invoice_context
return None
def clear_invoice_context(session, user):
user.invoice_context = None
user.invoice_context_expiry = None
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
telegram_id = Column(Integer, unique=True)
invoice_context = Column(String)
invoice_context_expiry = Column(DateTime)
transactions = relationship("Transaction", back_populates="user")
invoices = relationship("Invoice", back_populates="user")
def __repr__(self):
return "<User {}>".format(self.id)
class Transaction(Base):
__tablename__ = 'transactions'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('users.id'))
value = Column(Integer)
payment_request = Column(String)
user = relationship("User", back_populates="transactions")
def __repr__(self):
return "<Transaction {} {}>".format(self.user_id, self.value)
class Invoice(Base):
__tablename__ = 'invoices'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('users.id'))
payment_request = Column(String, unique=True)
user = relationship("User", back_populates="invoices")
def __repr__(self):
return "<Invoice {}...>".format(self.payment_request[:32])
def open_database(path):
from sqlalchemy import create_engine
engine = create_engine('sqlite:///' + path, echo=False)
Base.metadata.create_all(engine)
return sessionmaker(bind=engine)
|
11,271 | 5f841a8f62bef0c4a3343ba6084a0561540e10a3 | from sklearn.feature_extraction.text import TfidfVectorizer
import jieba
def cut_word(text):
"""
对中文进行分词
"我爱北京天安门"————>"我 爱 北京 天安门"
:param text:
:return: text
"""
# 用结巴对中文字符串进行分词
text = " ".join(list(jieba.cut(text)))
return text
def text_chinese_tfidf_demo():
"""
对中文进行特征抽取
:return: None
"""
data = ["一种还是一种今天很残酷,明天更残酷,后天很美好,但绝对大部分是死在明天晚上,所以每个人不要放弃今天。",
"我们看到的从很远星系来的光是在几百万年之前发出的,这样当我们看到宇宙时,我们是在看它的过去。",
"如果只用一种方式了解某样事物,你就不会真正了解它。了解事物真正含义的秘密取决于如何将其与我们所了解的事物相联系。"]
# 将原始数据转换成分好词的形式
text_list = []
for sent in data:
text_list.append(cut_word(sent))
print(text_list)
# 1、实例化一个转换器类
# transfer = CountVectorizer(sparse=False)
transfer = TfidfVectorizer(stop_words=['一种', '不会', '不要'])
# 2、调用fit_transform
data = transfer.fit_transform(text_list)
print("文本特征抽取的结果:\n", data.toarray())
print("返回特征名字:\n", transfer.get_feature_names())
return None
text_chinese_tfidf_demo()
|
11,272 | 4a902ea36b9bc9de05cfad4d5f9eafba6e322b02 | from __future__ import annotations
import logging
import math
from collections import defaultdict, deque
from collections.abc import Iterable
from datetime import timedelta
from typing import TYPE_CHECKING, cast
import tlz as toolz
from tornado.ioloop import IOLoop
from dask.utils import parse_timedelta
from distributed.compatibility import PeriodicCallback
from distributed.metrics import time
if TYPE_CHECKING:
from distributed.scheduler import WorkerState
logger = logging.getLogger(__name__)
class AdaptiveCore:
"""
The core logic for adaptive deployments, with none of the cluster details
This class controls our adaptive scaling behavior. It is intended to be
used as a super-class or mixin. It expects the following state and methods:
**State**
plan: set
A set of workers that we think should exist.
Here and below worker is just a token, often an address or name string
requested: set
A set of workers that the cluster class has successfully requested from
the resource manager. We expect that resource manager to work to make
these exist.
observed: set
A set of workers that have successfully checked in with the scheduler
These sets are not necessarily equivalent. Often plan and requested will
be very similar (requesting is usually fast) but there may be a large delay
between requested and observed (often resource managers don't give us what
we want).
**Functions**
target : -> int
Returns the target number of workers that should exist.
This is often obtained by querying the scheduler
workers_to_close : int -> Set[worker]
Given a target number of workers,
returns a set of workers that we should close when we're scaling down
scale_up : int -> None
Scales the cluster up to a target number of workers, presumably
changing at least ``plan`` and hopefully eventually also ``requested``
scale_down : Set[worker] -> None
Closes the provided set of workers
Parameters
----------
minimum: int
The minimum number of allowed workers
maximum: int | inf
The maximum number of allowed workers
wait_count: int
The number of scale-down requests we should receive before actually
scaling down
interval: str
The amount of time, like ``"1s"`` between checks
"""
minimum: int
maximum: int | float
wait_count: int
interval: int | float
periodic_callback: PeriodicCallback | None
plan: set[WorkerState]
requested: set[WorkerState]
observed: set[WorkerState]
close_counts: defaultdict[WorkerState, int]
_adapting: bool
log: deque[tuple[float, dict]]
def __init__(
self,
minimum: int = 0,
maximum: int | float = math.inf,
wait_count: int = 3,
interval: str | int | float | timedelta = "1s",
):
if not isinstance(maximum, int) and not math.isinf(maximum):
raise TypeError(f"maximum must be int or inf; got {maximum}")
self.minimum = minimum
self.maximum = maximum
self.wait_count = wait_count
self.interval = parse_timedelta(interval, "seconds")
self.periodic_callback = None
def f():
try:
self.periodic_callback.start()
except AttributeError:
pass
if self.interval:
import weakref
self_ref = weakref.ref(self)
async def _adapt():
core = self_ref()
if core:
await core.adapt()
self.periodic_callback = PeriodicCallback(_adapt, self.interval * 1000)
self.loop.add_callback(f)
try:
self.plan = set()
self.requested = set()
self.observed = set()
except Exception:
pass
# internal state
self.close_counts = defaultdict(int)
self._adapting = False
self.log = deque(maxlen=10000)
def stop(self) -> None:
logger.info("Adaptive stop")
if self.periodic_callback:
self.periodic_callback.stop()
self.periodic_callback = None
async def target(self) -> int:
"""The target number of workers that should exist"""
raise NotImplementedError()
async def workers_to_close(self, target: int) -> list:
"""
Give a list of workers to close that brings us down to target workers
"""
# TODO, improve me with something that thinks about current load
return list(self.observed)[target:]
async def safe_target(self) -> int:
"""Used internally, like target, but respects minimum/maximum"""
n = await self.target()
if n > self.maximum:
n = cast(int, self.maximum)
if n < self.minimum:
n = self.minimum
return n
async def scale_down(self, n: int) -> None:
raise NotImplementedError()
async def scale_up(self, workers: Iterable) -> None:
raise NotImplementedError()
async def recommendations(self, target: int) -> dict:
"""
Make scale up/down recommendations based on current state and target
"""
plan = self.plan
requested = self.requested
observed = self.observed
if target == len(plan):
self.close_counts.clear()
return {"status": "same"}
if target > len(plan):
self.close_counts.clear()
return {"status": "up", "n": target}
# target < len(plan)
not_yet_arrived = requested - observed
to_close = set()
if not_yet_arrived:
to_close.update(toolz.take(len(plan) - target, not_yet_arrived))
if target < len(plan) - len(to_close):
L = await self.workers_to_close(target=target)
to_close.update(L)
firmly_close = set()
for w in to_close:
self.close_counts[w] += 1
if self.close_counts[w] >= self.wait_count:
firmly_close.add(w)
for k in list(self.close_counts): # clear out unseen keys
if k in firmly_close or k not in to_close:
del self.close_counts[k]
if firmly_close:
return {"status": "down", "workers": list(firmly_close)}
else:
return {"status": "same"}
async def adapt(self) -> None:
"""
Check the current state, make recommendations, call scale
This is the main event of the system
"""
if self._adapting: # Semaphore to avoid overlapping adapt calls
return
self._adapting = True
status = None
try:
target = await self.safe_target()
recommendations = await self.recommendations(target)
if recommendations["status"] != "same":
self.log.append((time(), dict(recommendations)))
status = recommendations.pop("status")
if status == "same":
return
if status == "up":
await self.scale_up(**recommendations)
if status == "down":
await self.scale_down(**recommendations)
except OSError:
if status != "down":
logger.error("Adaptive stopping due to error", exc_info=True)
self.stop()
else:
logger.error(
"Error during adaptive downscaling. Ignoring.", exc_info=True
)
finally:
self._adapting = False
def __del__(self):
self.stop()
@property
def loop(self) -> IOLoop:
return IOLoop.current()
|
11,273 | cd2b95246fb02225a9a4cf6a8f88d51086b03391 | import re
import os
import io
from datetime import datetime
from typing import Optional, List, Callable, Tuple
from pathlib import Path, PurePosixPath
from contextlib import contextmanager
from paramiko import RSAKey, DSSKey, ECDSAKey, Ed25519Key, \
Transport, ssh_exception, SSHClient, AutoAddPolicy, SSHException, Agent
from ..utils.progress import ProgressInterface
from ..utils.log import create_logger
from ..utils.config import get_config_file
from ..core.error import UserError
from .defs import ENVELOPE_DIR_FMT
logger = create_logger(__name__)
def upload(files: List[str], # nosec (False Positive: pkey_password_encoding)
host: str,
username: str,
destination_dir: str,
two_factor_callback: Callable,
envelope_dir: Optional[str] = None,
pkey: Optional[str] = None,
pkey_password: Optional[str] = None,
pkey_password_encoding: str = "utf_8",
jumphost: Optional[str] = None,
progress: ProgressInterface = None):
if envelope_dir is None:
envelope_dir = datetime.now().strftime(ENVELOPE_DIR_FMT)
progress_callback = progress and (lambda x, y: progress.update(x / y))
remote_dir = PurePosixPath(destination_dir) / envelope_dir
try:
with sftp_connection(host=host, username=username, pkey=pkey,
jumphost=jumphost,
pkey_password=pkey_password,
pkey_password_encoding=pkey_password_encoding,
two_factor_callback=two_factor_callback) as sftp:
sftp.mkdir(str(remote_dir))
for tar in files:
progress.set_label(tar)
remotepath = str(remote_dir / Path(tar).name)
remotepath_part = remotepath + ".part"
status = sftp.put(localpath=os.path.realpath(tar),
remotepath=remotepath_part,
callback=progress_callback,
confirm=True)
remote_size = status.st_size
local_size = os.path.getsize(os.path.realpath(tar))
if local_size != remote_size:
raise UserError(
f"Incomplete file transfer: '{tar}'\n"
f"Remote: {remote_size}\nLocal: {local_size}")
try:
sftp.posix_rename(remotepath_part, remotepath)
except IOError as e:
raise UserError(format(e))
with io.BytesIO(b"") as fl:
sftp.putfo(fl=fl,
remotepath=str(remote_dir / "done.txt"),
callback=progress_callback,
confirm=True)
except ssh_exception.AuthenticationException as e:
raise UserError(format(e))
@contextmanager
def sftp_connection(host: str, # nosec
username: str,
two_factor_callback: Callable,
pkey: Optional[str] = None,
pkey_password: Optional[str] = None,
pkey_password_encoding: str = "utf_8",
jumphost: Optional[str] = None):
key = pkey and private_key_from_file(
str(Path(pkey).expanduser()), pkey_password,
encoding=pkey_password_encoding)
if jumphost is not None:
pw = two_factor_callback()
sock = proxy_socket(host, jumphost, username, pkey=key, password=pw)
else:
sock = host
trans = Transport(sock)
trans.connect()
try:
auth(trans, username, key, two_factor_callback)
try:
sftp_client = trans.open_sftp_client()
yield sftp_client
finally:
sftp_client.close()
finally:
trans.close()
def auth(trans, username, key,
two_factor_callback: Callable):
allowed_types = set()
if key:
allowed_types = trans.auth_publickey(username, key)
else:
try:
allowed_types = auth_from_agent(trans, username)
except SSHException:
trans.auth_timeout = 120
trans.auth_interactive(username, auth_handler)
two_factor = bool(set(allowed_types) & _TWO_FACTOR_TYPES)
if two_factor:
f2_code = two_factor_callback()
trans.auth_password(username, f2_code)
def proxy_socket(host, jumphost, username, **kwargs):
tunnel = SSHClient()
tunnel.set_missing_host_key_policy(AutoAddPolicy())
tunnel.connect(jumphost, username=username, allow_agent=True,
**kwargs)
return tunnel.get_transport().open_channel("direct-tcpip",
parse_host(host),
parse_host(jumphost))
def parse_host(host: str) -> Tuple[str, int]:
try:
_host, port = host.split(":")
return _host, int(port)
except ValueError:
return host, 22
def auth_handler(_title, _instructions, prompt_list):
if prompt_list:
auth_url = re.findall(r'(https?://\S+)', prompt_list[0][0])
if auth_url:
logger.info("Authenticate at: %s", auth_url[0])
resp = ['' for _ in prompt_list]
return resp
def is_ascii(s):
return all(ord(c) < 128 for c in s)
def private_key_from_file(path, password, encoding="utf_8"):
errors = []
pass_bytes = None if password is None else password.encode(encoding)
for pkey_class in (RSAKey, DSSKey, ECDSAKey, Ed25519Key):
try:
return pkey_class.from_private_key_file(path, pass_bytes)
except (SSHException, ValueError) as e:
errors.append(e)
if password is not None and not is_ascii(password):
errors = [(
"Your ssh secret key's password seems to contain "
"some non-ascii characters.\n"
"Either change your password ("
"`ssh-keygen -f <path to your private key> -p`)"
" or make sure the config option "
"`ssh_password_encoding` is set to the same "
"encoding, your key has been created with."
"Your config file is here:\n" +
get_config_file() +
"\nThe encoding is usually `utf_8` on linux "
"/ mac and `cp437` on windows for keys generated "
"with ssh-keygen")] + errors
raise UserError("Could not load private key:\n" +
"\n".join(format(e) for e in errors))
def auth_from_agent(transport, username):
agent = Agent()
try:
for key in agent.get_keys():
try:
logger.debug("Trying SSH agent key %s", key.get_fingerprint())
# for 2-factor auth a successfully auth'd key password
# will return an allowed 2fac auth method
return transport.auth_publickey(username, key)
except SSHException:
pass
finally:
agent.close()
raise SSHException("Could not load key from ssh agent")
_TWO_FACTOR_TYPES = {"keyboard-interactive", "password"}
|
11,274 | 660a2d39d264b675f288c1395ce27c53647e1831 | from functools import wraps
def debug(func):
# func is a function being wrapped
@wraps(func)
def wrapper(*args, **kwargs):
print(func.__qualname__)
return func(*args, **kwargs)
return wrapper
@debug
def add(x, y):
return x + y
@debug
def subtract(x, y):
return x - y
@debug
def multiply(x, y):
return x * y
@debug
def divide(x, y):
return x / y
@debug
def power(x, y):
return x ** y
print(add(10, 90))
print(subtract(10, 90))
print(multiply(10, 90))
print(divide(10, 90))
print(power(10, 90))
print(help(debug))
|
11,275 | 905a1dc56df7fb6789646dd8aaf784127b96d15e | #_training_code
import pandas as pd
import numpy as np
from tensorflow.contrib.layers import flatten
from keras.layers.pooling import MaxPooling2D
from keras.models import Sequential, Model, load_model
from keras.callbacks import EarlyStopping, Callback
from keras.layers import Dense, Dropout, Activation, Flatten, Lambda, ELU,GlobalAveragePooling2D, regularizers
from keras.layers.convolutional import Convolution2D, Cropping2D, Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.optimizers import adam
from sklearn.utils import shuffle
from keras.utils import np_utils
import time, cv2, glob
global inputShape,size
def kerasModel4():
model = Sequential()
model.add(Conv2D(16, (8, 8), strides=(4, 4), padding='valid', input_shape=(sizeW,sizeH,1)))
model.add(Activation('relu'))
model.add(Conv2D(32, (5, 5), padding="same"))
model.add(Activation('relu'))
model.add(GlobalAveragePooling2D())
# model.add(Dropout(.2))
# model.add(Activation('relu'))
# model.add(Dense(1024))
# model.add(Dropout(.5))
model.add(Dense(512))
model.add(Dropout(.1))
model.add(Activation('relu'))
# model.add(Dense(256))
# model.add(Dropout(.5))
# model.add(Activation('relu'))
model.add(Dense(2))
model.add(Activation('softmax'))
return model
sizeW = 80
sizeH = 110
## load Training data : female
potholeTrainImages = glob.glob("C:/Users/anant/Documents/GitHub/gender-classification/dataset/Training/female/*.jpg")
# potholeTrainImages.extend(glob.glob("E:/Major 7sem/pothole-and-plain-rode-images/My Dataset/train/Pothole/*.jpeg"))
# potholeTrainImages.extend(glob.glob("E:/Major 7sem/pothole-and-plain-rode-images/My Dataset/train/Pothole/*.png"))
train1 = [cv2.imread(img,0) for img in potholeTrainImages]
for i in range(0,len(train1)):
train1[i] = cv2.resize(train1[i],(sizeW,sizeH))
temp1 = np.asarray(train1)
# ## load Training data : male
nonPotholeTrainImages = glob.glob("C:/Users/anant/Documents/GitHub/gender-classification/dataset/Training/male/*.jpg")
# nonPotholeTrainImages.extend(glob.glob("C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Plain/*.jpeg"))
# nonPotholeTrainImages.extend(glob.glob("C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Plain/*.png"))
train2 = [cv2.imread(img,0) for img in nonPotholeTrainImages]
for i in range(0,len(train2)):
train2[i] = cv2.resize(train2[i],(sizeW,sizeH))
temp2 = np.asarray(train2)
## load Testing data : females
potholeTestImages = glob.glob("C:/Users/anant/Documents/GitHub/gender-classification/dataset/Validation/female/*.jpg")
# nonPotholeTrainImages.extend(glob.glob("C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Plain/*.jpeg"))
# nonPotholeTrainImages.extend(glob.glob("C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Plain/*.png"))
test1 = [cv2.imread(img,0) for img in potholeTestImages]
for i in range(0,len(test1)):
test1[i] = cv2.resize(test1[i],(sizeW,sizeH))
temp3 = np.asarray(test1)
## load Testing data : male
nonPotholeTestImages = glob.glob("C:/Users/anant/Documents/GitHub/gender-classification/dataset/Validation/male/*.jpg")
# nonPotholeTrainImages.extend(glob.glob("C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Plain/*.jpeg"))
# nonPotholeTrainImages.extend(glob.glob("C:/Users/anant/Desktop/pothole-and-plain-rode-images/My Dataset/train/Plain/*.png"))
test2 = [cv2.imread(img,0) for img in nonPotholeTestImages]
for i in range(0,len(test2)):
test2[i] = cv2.resize(test2[i],(sizeW,sizeH))
temp4 = np.asarray(test2)
X_train = []
X_train.extend(temp1)
X_train.extend(temp2)
X_train = np.asarray(X_train)
X_test = []
X_test.extend(temp3)
X_test.extend(temp4)
X_test = np.asarray(X_test)
y_train1 = np.zeros([temp1.shape[0]],dtype = int)
y_train2 = np.ones([temp2.shape[0]],dtype = int)
y_test1 = np.zeros([temp3.shape[0]],dtype = int)
y_test2 = np.ones([temp4.shape[0]],dtype = int)
print(y_train1[0])
print(y_train2[0])
print(y_test1[0])
print(y_test2[0])
y_train = []
y_train.extend(y_train1)
y_train.extend(y_train2)
y_train = np.asarray(y_train)
y_test = []
y_test.extend(y_test1)
y_test.extend(y_test2)
y_test = np.asarray(y_test)
X_train,y_train = shuffle(X_train,y_train)
X_test,y_test = shuffle(X_test,y_test)
X_train = X_train.reshape(X_train.shape[0], sizeW, sizeH, 1)
X_test = X_test.reshape(X_test.shape[0], sizeW, sizeH, 1)
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
print("train shape X", X_train.shape)
print("train shape y", y_train.shape)
inputShape = (sizeW, sizeH, 1)
model = kerasModel4()
X_train = X_train/255
X_test = X_test/255
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_train, y_train, epochs=15,validation_split=0.1)
print("")
metricsTrain = model.evaluate(X_train, y_train)
print("Training Accuracy: ",metricsTrain[1]*100,"%")
print("")
metricsTest = model.evaluate(X_test,y_test)
print("Testing Accuracy: ",metricsTest[1]*100,"%")
# print("Saving model weights and configuration file")
model.save('model.h5')
print("Saved model to disk") |
11,276 | 84ea489cdf4e438082226f2b97aaf4fdfaeeabcb | class Heap:
def __init__(self):
self.storage = []
def insert(self, value):
index = len(self.storage)
self.storage.append(value)
self._bubble_up(index)
def delete(self):
deleted = self.storage.pop(0)
if self.storage:
temp = self.storage.pop()
self.storage.insert(0, temp)
self._sift_down(0)
return deleted
def get_max(self):
return self.storage[0]
def get_size(self):
return len(self.storage)
def _bubble_up(self, index):
while index > 0:
parent = (index - 1) // 2
if self.storage[index] > self.storage[parent]:
self.storage[index], self.storage[parent] = self.storage[parent], self.storage[index]
index = parent
else:
break
def _sift_down(self, index):
n = self.get_size()
while index < n - 1:
left = 2*index + 1
right = 2*index + 2
if left < n and right < n:
if self.storage[left] >= self.storage[right]:
child = left
else:
child = right
elif left < n:
child = left
else:
break
if self.storage[child] > self.storage[index]:
self.storage[child], self.storage[index] = self.storage[index], self.storage[child]
index = child
else:
break
|
11,277 | 3eea637fb55e6cd2269eb3e57ae4de059d9bc86c | import itertools
from utils import load_embedding, Results, Metrics, save_model, load_model, load_directional_supervision, load_element_pairs, load_pair_features
from model import PointNet, PairNet, TripletNet, PairNetWithPairFeatures, TripletNetWithPairFeatures
from trainer import train_triplet_epoch
from datasets import Triplets, DirectionalTriplets
from options import read_options
import numpy as np
import torch
import random
from evaluator import evaluate_synonym_ranking_prediction
from losses import TripletLoss
from tensorboardX import SummaryWriter
from test_semantic_classes import obtain_semantic_classes
from predictor import pair_prediction, pair_prediction_with_pair_feature
if __name__ == '__main__':
args = read_options()
# Add TensorBoard Writer
writer = SummaryWriter(log_dir=None, comment=args.comment)
# Initialize random seed
random.seed(args.random_seed)
torch.manual_seed(args.random_seed)
np.random.seed(args.random_seed)
if args.device_id != -1:
torch.cuda.manual_seed_all(args.random_seed)
torch.backends.cudnn.deterministic = True
torch.set_default_tensor_type(torch.cuda.FloatTensor)
else:
torch.set_default_tensor_type(torch.FloatTensor)
torch.set_printoptions(precision=9)
torch.set_num_threads(1)
# Load command line options
options = vars(args)
writer.add_text('Text', 'Hyper-parameters: {}'.format(options), 0)
# Load supervision pairs and convert to dict
f_supervision = options["supervision_file"]
train_hyper2hypo, train_hypo2hyper = load_directional_supervision(f_supervision)
# Load embedding files and word <-> index map
f_embed = options["embed_file"]
embedding, index2word, word2index, vocab_size, embed_dim = load_embedding(f_embed)
print("=== Finish loading embedding ===")
options["embedding"] = embedding
options["index2word"] = index2word
options["word2index"] = word2index
options["vocabSize"] = vocab_size
options["embedSize"] = embed_dim
# Load pair features
if options["use_pair_feature"]:
print("!!! Using pair features")
f_pair_feature_key = options["pair_feature_prefix"] + "edge.keys.tsv"
f_pair_feature_value = options["pair_feature_prefix"] + "edge.values.npy"
pair_features = load_pair_features(f_pair_feature_key, f_pair_feature_value)
# Construct testing set
f_pred = options["pred_pairs_file_in"]
print("!!! Loading term pairs for prediction from: {}".format(f_pred))
pred_pairs = load_element_pairs(f_pred, with_label=False)
print("Number of term pairs for prediction: {}".format(len(pred_pairs)))
# Construct model skeleton
cuda = options["device_id"] != -1
if options["use_pair_feature"]:
point_net = PointNet(options)
pair_net = PairNetWithPairFeatures(options, point_net)
model = TripletNetWithPairFeatures(pair_net)
else:
point_net = PointNet(options)
pair_net = PairNet(options, point_net)
model = TripletNet(pair_net)
if cuda:
model.cuda()
# Load pre-trained model
model_path = options["snapshot"]
model.load_state_dict(torch.load(model_path))
print(model)
# Conduct pair prediction and dump results to file
if options["use_pair_feature"]:
prediction_scores = pair_prediction_with_pair_feature(model.pair_net, pred_pairs, pair_features, cuda, options,
batch_size=10000)
else:
prediction_scores = pair_prediction(model.pair_net, pred_pairs, cuda, options, batch_size=10000)
f_res = options["pred_pairs_file_out"]
print("!!! Saving prediction results to: {}".format(f_res))
with open(f_res, "w") as fout:
for term_pair, score in zip(pred_pairs, prediction_scores):
fout.write("{}\t{}\t{}\n".format(term_pair[0], term_pair[1], -1.0 * score))
|
11,278 | 643808c52ee34983b54abc4ee0fde4658a65ec73 | credit = ' '
q =0
g=10
h=9
i=8
j=7
k=6
l=5
m='fail'
def jac(o):
if sub == 's' :
return g*3
elif sub == 'a' :
return h*3
elif sub == 'b':
return i*3
elif sub == 'c':
return j*3
elif sub == 'd':
return k*3
elif sub == 'e':
return l*3
elif sub == 'f':
return m
else:
print('no answer')
def lab(p):
if lab1 == 's' :
return g*2
elif lab1 == 'a' :
return h*2
elif lab1 == 'b':
return i*2
elif lab1 == 'c':
return j*2
elif lab1 == 'd':
return k*2
elif lab1 == 'e':
return l*2
elif lab1 == 'f':
return m
else:
print('no answer')
def maths(p):
if maths1 == 's' :
return g*4
elif maths1 == 'a' :
return h*4
elif maths1 == 'b':
return i*4
elif maths1 == 'c':
return j*4
elif maths1 == 'd':
return k*4
elif maths1 == 'e':
return l*4
elif maths1 == 'f':
return m
else:
print('no answer')
def sem(p):
if sem1 == 's' :
return g*1
elif sem1 == 'a' :
return h*1
elif sem1 == 'b':
return i*1
elif sem1 == 'c':
return j*1
elif sem1 == 'd':
return k*1
elif sem1 == 'e':
return l*1
elif sem1 == 'f':
return m
else:
print('no answer')
ask1 = int(input('Enter how subjects and labs do you have :'))
ask = int(input('How many subjects do you have in subjects except "Maths" : '))
for sub in range(0,ask):
sub = (input('Enter how much you have scored :' ))
print('--------------------------------------')
print('You scored in this subject is :',jac(sub))
print('--------------------------------------')
maths1 = (input('How much you scored in math : '))
print('----------------------------------------')
print('You scored in this subject is : ',maths(maths1))
print('---------------------------------------')
print('\n')
the = int(input('How many labs do you have except seminar : '))
for lab1 in range(0,the):
lab1 = input('Enter how much you have scored :' )
print('---------------------------------------')
print('You scored in this subject is : ',lab(lab1))
print('---------------------------------------')
sem1 = (input('How much you scored in Technical seminar : '))
print('----------------------------------------')
print('You scored in this subject is : ',sem(sem1))
print('---------------------------------------')
print('\n')
print('ADD ALL THE SCORES ')
for s in range(0,ask1):
s = int(input('Enter a number : '))
q += s
credit = q/26
print('-----------------------')
print('Your GPA is : {credit:1.2f}'.format(credit=credit))
print('-----------------------')
|
11,279 | 1db9d3b5d0bac9bb8a2d0068dcb2e92ec48513e4 | import asyncio
import aiohttp
import mock
import pytest
from bravado_asyncio.definitions import AsyncioResponse
from bravado_asyncio.response_adapter import AioHTTPResponseAdapter
from bravado_asyncio.response_adapter import AsyncioHTTPResponseAdapter
@pytest.fixture(params=(AioHTTPResponseAdapter, AsyncioHTTPResponseAdapter))
def response_adapter(request, mock_loop):
return request.param(mock_loop)
@pytest.fixture
def mock_incoming_response():
return mock.Mock(name="incoming response", spec=aiohttp.ClientResponse)
@pytest.fixture
def asyncio_response(mock_incoming_response):
return AsyncioResponse(response=mock_incoming_response, remaining_timeout=5)
@pytest.fixture
def mock_run_coroutine_threadsafe():
with mock.patch(
"bravado_asyncio.response_adapter.asyncio.run_coroutine_threadsafe"
) as _mock:
yield _mock
@pytest.fixture
def mock_wait_for_result():
return mock.Mock(name="mock_wait_for result")
@pytest.fixture
def mock_wait_for(mock_wait_for_result):
with mock.patch("bravado_asyncio.response_adapter.asyncio.wait_for") as _mock:
_mock.return_value = asyncio.coroutine(lambda: mock_wait_for_result)()
yield _mock
def test_initialization(response_adapter, mock_loop, asyncio_response):
called_response_adapter = response_adapter(asyncio_response)
assert called_response_adapter is response_adapter
assert response_adapter._loop is mock_loop
assert response_adapter._delegate is asyncio_response.response
assert response_adapter._remaining_timeout is asyncio_response.remaining_timeout
def test_properties(response_adapter, asyncio_response, mock_incoming_response):
response_adapter(asyncio_response)
assert response_adapter.status_code is mock_incoming_response.status
assert response_adapter.reason is mock_incoming_response.reason
assert response_adapter.headers is mock_incoming_response.headers
def test_thread_methods(
mock_run_coroutine_threadsafe, asyncio_response, mock_incoming_response, mock_loop
):
response_adapter = AioHTTPResponseAdapter(mock_loop)(asyncio_response)
assert (
response_adapter.text
is mock_run_coroutine_threadsafe.return_value.result.return_value
)
mock_run_coroutine_threadsafe.assert_called_once_with(
mock_incoming_response.text.return_value, mock_loop
)
mock_run_coroutine_threadsafe.return_value.result.assert_called_once_with(
asyncio_response.remaining_timeout
)
assert (
response_adapter.raw_bytes
is mock_run_coroutine_threadsafe.return_value.result.return_value
)
mock_run_coroutine_threadsafe.assert_called_with(
mock_incoming_response.read.return_value, mock_loop
)
mock_run_coroutine_threadsafe.return_value.result.assert_called_with(
asyncio_response.remaining_timeout
)
assert (
response_adapter.json()
is mock_run_coroutine_threadsafe.return_value.result.return_value
)
mock_run_coroutine_threadsafe.assert_called_with(
mock_incoming_response.json.return_value, mock_loop
)
mock_run_coroutine_threadsafe.return_value.result.assert_called_with(
asyncio_response.remaining_timeout
)
assert mock_run_coroutine_threadsafe.call_count == 3
assert mock_run_coroutine_threadsafe.return_value.result.call_count == 3
def test_asyncio_text(
mock_wait_for,
mock_wait_for_result,
asyncio_response,
mock_incoming_response,
event_loop,
):
response_adapter = AsyncioHTTPResponseAdapter(event_loop)(asyncio_response)
result = event_loop.run_until_complete(response_adapter.text)
assert result is mock_wait_for_result
mock_wait_for.assert_called_once_with(
mock_incoming_response.text.return_value,
timeout=asyncio_response.remaining_timeout,
loop=event_loop,
)
def test_asyncio_raw_bytes(
mock_wait_for,
mock_wait_for_result,
asyncio_response,
mock_incoming_response,
event_loop,
):
response_adapter = AsyncioHTTPResponseAdapter(event_loop)(asyncio_response)
result = event_loop.run_until_complete(response_adapter.raw_bytes)
assert result is mock_wait_for_result
mock_wait_for.assert_called_once_with(
mock_incoming_response.read.return_value,
timeout=asyncio_response.remaining_timeout,
loop=event_loop,
)
def test_asyncio_json(
mock_wait_for,
mock_wait_for_result,
asyncio_response,
mock_incoming_response,
event_loop,
):
response_adapter = AsyncioHTTPResponseAdapter(event_loop)(asyncio_response)
result = event_loop.run_until_complete(response_adapter.json())
assert result is mock_wait_for_result
mock_wait_for.assert_called_once_with(
mock_incoming_response.json.return_value,
timeout=asyncio_response.remaining_timeout,
loop=event_loop,
)
|
11,280 | f3fd97abe38d17c525b4d59967ab73ccb1944e91 | """
Client is resilient to enum and oneOf deserialization errors
"""
from datadog_api_client import ApiClient, Configuration
from datadog_api_client.v1.api.synthetics_api import SyntheticsApi
configuration = Configuration()
with ApiClient(configuration) as api_client:
api_instance = SyntheticsApi(api_client)
response = api_instance.list_tests()
print(response)
|
11,281 | d53397e386c4f16dfd9ef2474be7178bbdcd2be8 | # coding:utf-8
# This file is part of Alkemiems.
#
# Alkemiems is free software: you can redistribute it and/or modify
# it under the terms of the MIT License.
__author__ = 'Guanjie Wang'
__version__ = 1.0
__maintainer__ = 'Guanjie Wang'
__email__ = "gjwang@buaa.edu.cn"
__date__ = '2021/06/10 16:39:37'
from ztml.tools import get_random_groupby_index, norepeat_randint
from ztml.data.feature_normalize_data import get_normalize_data
from ztml.rdata.clean_csv_data import get_clean_data, read_data
from ztml.rdata.rename_cloumn import get_rename_column_data
from copy import deepcopy
def read_rename_clean_datafile():
tmp_file = 'temp_clean_data.csv'
clean_train_data = read_data(tmp_file)
# os.remove(tmp_file)
# 获取归一化之后数据,并删除相关列
normalized_data, _ = get_normalize_data(clean_train_data)
normalized_data.to_csv('normalized_data.csv', index=False)
def __rm_point_columns(data, index=None):
if index is None:
index = ["Index"]
ll = data.columns.get_values().tolist()
for i in index:
ll.remove(i)
return data[ll]
def run_compounds_split(fn, head_dir, is_nop_to_01=True):
"""
根据化合物分割训练集合测试集,并输出到CSV文件中
:param fn: 原始数据文件名
:param head_dir:
:param is_nop_to_01: 控制掺杂类型是分类问题还是连续问题, 是否将nop转化为0-1,0:大于0, 1: 小于0
:return: None
"""
# 重命名每一列
import os
if not os.path.isdir(head_dir):
os.mkdir(head_dir)
origin_data = read_data(fn)
rename_data = get_rename_column_data(origin_data)
# train_data, test_data = get_train_test_index(rename_data, column_index=['N_atom_unit'], ratio=0.575, to_data=True)
# 添加12个温度参数,并将所有温度对应的Kpa,N,ZT三个值添加上, 将70组数据扩展为840组数据
# is_nop_to_01
clean_train_data = get_clean_data(rename_data, is_nop_to_01=is_nop_to_01)
tmp_file = os.path.join(head_dir, 'temp_clean_data.csv')
clean_train_data.to_csv(tmp_file, index=False)
clean_train_data = read_data(tmp_file)
# os.remove(tmp_file)
# 获取归一化之后数据,并删除相关列
# normalized_data, _ = get_normalize_data(clean_train_data, gogal_column=['Index',
# 'NA', 'NB', 'NC', 'NT', 'VA', 'VB', 'VC',
# 'RA', 'RB', 'RC', 'ZA', 'ZB', 'ZC', 'rA',
# 'rB', 'rC', 'rCA', 'rCB', 'rCC', 'PA',
# 'PB', 'PC', 'PAv', 'PSd', 'a_b', 'c',
# 'MAv', 'LAC', 'LBC', 'LAv',
# 'Temperature'])
normalized_data, _ = get_normalize_data(clean_train_data)
__rm_point_columns(normalized_data).to_csv(os.path.join(head_dir, 'normalized_data.csv'), index=False)
# normalized_data = read_data('normalized_data.csv')
# 根据总原子(化合物)将数据分为30个训练和40个验证集
# 实际Excel中的Index值
# valid_index = [1, 2, 3, 5, 11, 14, 20, 22, 27, 29, 30, 32, 35, 37, 38, 41, 43, 44, 45, 49,
# 50, 51, 52, 54, 56, 58, 59, 64, 65, 67]
check_index = [12, 18, 21, 26, 34, 36, 39, 53, 60, 69]
train_index = [4, 6, 7, 8, 9, 10, 13, 15, 16, 17, 19, 23, 24, 25, 27, 28, 33, 40, 42, 46, 47, 48,
55, 57, 61, 62, 63, 66, 68, 70]
use2train_data, use2valid_data = get_random_groupby_index(normalized_data, column_index=['Index'],
ratio=3/7, to_data=True, train_index=train_index)
print(use2train_data.shape, use2valid_data.shape)
# 将训练及在次分为训练集和测试集
train_train_data, train_test_data = get_random_groupby_index(use2train_data, column_index=['Index'],
ratio=0.7, to_data=True)
print(train_train_data.shape, train_test_data.shape)
valid_10data, valid_30data = get_random_groupby_index(use2valid_data, column_index=['Index'],
ratio=0.25, to_data=True, train_index=check_index)
print(valid_10data.shape, valid_30data.shape)
print("Final features: %d" % (train_train_data.shape[1] - 3))
# 输出数据
# __rm_point_columns(normalized_data).to_csv(os.path.join(head_dir, 'normalized_data.csv'), index=False)
__rm_point_columns(train_train_data).to_csv(os.path.join(head_dir, 'train_30_train.csv'), index=False)
__rm_point_columns(train_test_data).to_csv(os.path.join(head_dir, 'train_30_test.csv'), index=False)
__rm_point_columns(valid_10data).to_csv(os.path.join(head_dir, '10_for_check.csv'), index=False)
__rm_point_columns(valid_30data).to_csv(os.path.join(head_dir, '30_for_predict.csv'), index=False)
if __name__ == '__main__':
# now_head_dir = "2_rmcoref_data" # 包含nop数值和 zt数值
# now_head_dir = "all_data" # nop 被转换为01,且没有根据相关系数实施特征工程
now_head_dir = "all_rmcoref_data" # nop 被转换为01, 根据相关系数删除相关系数大于0.9的项
file_name = r'simple_dataset.csv'
now_is_nop_to_01 = True
run_compounds_split(file_name, head_dir=now_head_dir, is_nop_to_01=now_is_nop_to_01)
|
11,282 | 06cfd0a3efb236ab9ca8e7f42d715e8118d11a7a | import pandas as pd
import numpy as np
from mining_tools import my_tools
from testimbalance import abalone_input
from imbalance_tools import handle_imbalance
from create_dendogram import my_dendogram
#Check the class abalone_input which is used to take input from the data-set manually
ab=abalone_input()
x=ab.encoded_Data()
x=pd.DataFrame(data=x[0:,0:])
y=x.iloc[:,-1].values
y=y.astype(np.float)
#Dealing with class imbalance
#Balanced sampling technique as described in the project report
hi=handle_imbalance()
abalone_balanced=hi.simple_oversample_undersample(x.values.astype(np.float),y)
abalone_smote=hi.modified_smote(x.values.astype(np.float),y,2)
y_abalone=abalone_balanced[:,-1]
x_abalone=abalone_balanced[:,:-1]
x_abalone=pd.DataFrame(data=x_abalone[0:,0:])
#print('balanced',x_abalone)
y_smote=abalone_smote[:,-1]
x_smote=abalone_smote[:,:-1]
x_smote=pd.DataFrame(data=x_smote[0:,0:])
#input from pulser dataset
data=pd.read_csv('HTRU_2.csv')
y_htru=data.iloc[:,-1].values.astype(np.float)
x_htru=data.iloc[:,:-1]
#This class takes feature vector & target as input along with the String that describes it & number of clusters
#At first the the dendrogram program should be ran to determine how many clusters the user wants to opt for.
ob=my_tools(x_htru,y_htru,'Pulser imbalanced Raw',2)
ob.full_throttle()#This method calls all the sub modules of my_tools program & therefore please be patient it'll take around 10 mins
#to finish execution & export autogenerated report with performance metrics.
#Balanced sampling technique as described in the project report
balanced_data=hi.simple_oversample_undersample(data.values.astype(np.float),y_htru)
#Balanced sampling with SMOTE
balanced_data_smote=hi.modified_smote(data.values.astype(np.float),y_htru,2)
y_balanced=balanced_data[:,-1]
x_balanced=balanced_data[:,:-1]
x_balanced=pd.DataFrame(data=x_balanced[0:,0:])
y_balanced_smote=balanced_data_smote[:,-1]
x_balanced_smote=balanced_data_smote[:,:-1]
x_balanced_smote=pd.DataFrame(data=x_balanced_smote[0:,0:])
ob=my_tools(x_balanced,y_balanced,'Pulser balanced sampling',2)
ob.full_throttle() #This method calls all the sub modules of my_tools program & therefore please be patient it'll take around 10 mins
#to finish execution & export autogenerated report with performance metrics.
ob=my_tools(x_balanced_smote,y_balanced_smote,'Pulser SMOTE',2)
ob.full_throttle()
|
11,283 | e17bde4f0dd144bf38ad9f87678ec7fe18d2e29c | import pymysql.cursors
import time
# Connect to the database
connection = pymysql.connect(host='localhost',
user='root',
password='1234',
db='LJ',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
print ("connect successful!!")
def curent_album_id(connection):
# 'select id, max(id) from list_album_id'
with connection.cursor() as cursor:
sql = 'SELECT * FROM list_album_id WHERE id=(select max(id) from list_album_id)'
cursor.execute(sql)
result = cursor.fetchone()
print(result)
# return result['album_id']
curent_album_id(connection)
connection.close()
import time
from load_post_vk_class import vk_api, clean
src = 'https://1957anti.ru/media/k2/items/cache/ed665975b819d9e4bff8f3321152810d_XL.jpg'
album_id = 260959360
group_id = 165089751
token = '9a3e3c787c27cdcffb50046fb31b70c4dbb6e1b78dacb8a91d7e1a6e28d6041731d6918fb84822f54483d'
vk_user = vk_api.VkApi(token ,api_version='5.92')
vk_user.auth()
clean(group_id,vk_user)
# # with connection.cursor() as cursor:
# # sql = "create table list_group_id( group_id integer(11) not null)"
# # cursor.execute(sql)
# # connection.commit()
# # connection.close()
# # load_img_PIL(vk_user,group_id,src,album_id)
# with connection.cursor() as cursor:
# sql = "INSERT INTO list_album_id (album_id) VALUES (%s)"
# album_id =261028865
# cursor.execute(sql, (album_id,))
# connection.commit()
# connection.close()
# def getting_links_to_creare_db(connection,vk_user,group_id):
# list_saves=0
# try:
# with connection.cursor() as cursor:
# sql = "INSERT INTO URL_PUBLIC_POSTS (URL, DATE_PUBLIC) VALUES (%s, %s)"
# for i in range(0,400,100):
# zapros = vk_user.method('wall.get',{'owner_id':-group_id,'offset':i,'count':100})
# print(len(zapros))
# for zap in zapros[1:]:
# date = time.strftime('%Y-%m-%d',time.localtime(int(zap['date'])))
# link = zap['text'].split('<br>')[1]
# cursor.execute(sql, (link, date))
# list_saves += 1
# connection.commit()
# finally:
# connection.close()
# return list_saves
# def load_list_sav(connection, date=int(time.time())):
# try:
# with connection.cursor() as cursor:
# date = time.strftime('%Y-%m-%d',time.localtime(date))
# sql = "SELECT URL FROM URL_PUBLIC_POSTS WHERE DATE_PUBLIC > '{}' ".format(date)
# cursor.execute(sql)
# # help(cursor)
# result = cursor.fetchmany(103)
# # result = cursor.fetchone()
# # print(result)
# finally:
# connection.close()
# return [i['URL'] for i in result]
# # print(getting_links_to_creare_db(connection,vk_user,group_id))
# # load_list_sav(connection, 1549988746)
# # from datetime import datetime, date, timedelta
# # import time
# # # help(time)
# # now_time = int(time.mktime(datetime.now().timetuple()))
# # next_day = int(time.mktime((date.today()+timedelta(days=1)).timetuple()))
# # for i in range(now_time+15, next_day ,(next_day-now_time)//2):
# # print(time.strftime('%H %M',time.localtime(i)))
import vk_api_my as vk_api
import pytube
import os
# help(os.remove)
# import pymedia
from io import BytesIO
# print(dir(pytube.YouTube))
# help(pytube.YouTube)
# data = bytearray()
from datetime import datetime, date, timedelta
import time
now_day =time.mktime(date.today().timetuple())
now_time = int(time.mktime(datetime.now().timetuple()))
next_day = int(time.mktime((date.today()+timedelta(days=1)).timetuple()))
print(now_time, next_day, now_day)
os._exit(1)
link = "https://www.youtube.com/watch?v=L1W0XvU_8M4"
yt = pytube.YouTube(link)
streams = yt.streams.first()
link_for_save = "/home/maksim/Загрузки/{}.{}".format(yt.title,streams.subtype)
# print(streams)
# print(dir(streams),streams.subtype,yt.title,sep='\n')
# # os.mkdir(data)
streams.download("/home/maksim/Загрузки/")
vk_user = vk_api.VkApi()
upload_url=vk_user.method('video.save',{'name':yt.title})['upload_url']
print(upload_url)
with open(link_for_save, 'rb') as video:
ur = vk_api.requests.post(upload_url, files = {'video_file':video}).json()
# print(ur)
os.remove(link_for_save)
link_video = "https://vk.com/video{}_{}".format(ur['owner_id'],ur['video_id'])
# # ur = vk_api.requests.post(upload_url, files = {'file1': foto}).json()
print(link_video)
|
11,284 | 0ee2ff418a7a27635bdbf90b9f3c5650263f7135 | #########################################################################
#-*- coding:utf-8 -*-
# File Name: find_max_crossing_sub_array.py
# Author: buptxiaomiao
# mail: buptxiaomiao@outlook.com
# Created Time: 2016年03月25日 星期五 20时13分35秒
#########################################################################
#!/usr/bin/env python2.7
q = [-1,5,6,5,2,3,-5,-6,2,8]
def find_max_crossing_sub_array(low,mid,high,*arr):
left_sum = -10000
temp_sum = 0
left_index = low
for i in reversed(range(low,mid+1)):
temp_sum += arr[i]
if temp_sum > left_sum:
left_sum = temp_sum
left_index = i
right_sum = -1000
temp_sum = 0
right_index = high
for i in range(mid+1,high+1):
temp_sum += arr[i]
if temp_sum > right_sum:
right_sum = temp_sum
right_index = i
return (left_index,right_index,left_sum+right_sum)
if __name__ =='__main__':
print find_max_crossing_sub_array(0,(len(q)-1)//2,len(q)-1,*q)
|
11,285 | 73660ee6ef6b00414c9b58104533b8cb37db331f | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 22 15:09:19 2017
@author: admin
"""
import re
#######
def searching(text):
#text = text.lower
for i in range(len(text)):
pattern = re.compile(r'192.\d{0,3}.\d{0,3}.\d{0,3}')
result = pattern.findall(text[i])
searchForResult = pattern.search(text[i])
if searchForResult != None:
print('Строка ',i,' Позиция ',searchForResult.span(),' : ',result)
#######
def workWithFile(pathToFile):
rfile = open(pathToFile)
#fileString = rfile.read()
fileString = rfile.readlines()
searching(fileString)
#######
finish_it = False
while finish_it !=True:
try:
_pathToFile = input('Введите путь к файлу (D:\Учеба\Python\git-lab1\Python-lab2\File for 4.txt)>> \n')
workWithFile(_pathToFile)
finish_it = True
except(FileNotFoundError,OSError):
print('Файл не найден!') |
11,286 | 2fc74a273869b594ceaead48ded835bbaa0cc4d0 | import sqlite3
import os
import sys
def main(args):
inputfile = args[1]
databaseexisted = os.path.isfile('moncafe.db')
dbcon = sqlite3.connect('moncafe.db')
with dbcon:
cursor = dbcon.cursor()
if not databaseexisted:
# initiating Employees table
cursor.execute("CREATE TABLE Employees(id INTEGER PRIMARY KEY,"
" name TEXT NOT NULL,"
" salary REAL NOT NULL,"
"coffee_stand INTEGER REFERENCES Coffee_stand(id))")
# initiating Suppliers table
cursor.execute("CREATE TABLE Suppliers(id INTEGER PRIMARY KEY,"
"name TEXT NOT NULL,"
"contact_information TEXT)")
# initiating Products table
cursor.execute("CREATE TABLE Products(id INTEGER PRIMARY KEY,"
"description TEXT NOT NULL,"
"price REAL NOT NULL,"
"quantity INTEGER NOT NULL)")
# initiating Coffee_stands table
cursor.execute("CREATE TABLE Coffee_stands(id INTEGER PRIMARY KEY,"
"location TEXT NOT NULL,"
"number_of_employees INTEGER)")
# initiating Activities table
cursor.execute("CREATE TABLE Activities(product_id INTEGER INTEGER REFERENCES Product(id),"
"quantity INTEGER NOT NULL,"
"activator_id INTEGER NOT NULL,"
"date DATE NOT NULL)")
with open(inputfile) as inputfile:
for line in inputfile:
currentline = line.split(',')
for i in range(len(currentline)):
currentline[i] = currentline[i].strip()
if currentline[0] == 'E':
cursor.execute("INSERT INTO Employees VALUES(?,?,?,?)",
(currentline[1], currentline[2], currentline[3], currentline[4]))
elif currentline[0] == 'S':
cursor.execute("INSERT INTO Suppliers VALUES(?,?,?)",
(currentline[1], currentline[2], currentline[3]))
elif currentline[0] == 'C':
cursor.execute("INSERT INTO Coffee_stands VALUES(?,?,?)",
(currentline[1], currentline[2], currentline[3]))
elif currentline[0] == 'P':
cursor.execute("INSERT INTO Products VALUES(?,?,?,?)",
(currentline[1], currentline[2], currentline[3], 0))
if __name__ == '__main__':
main(sys.argv) |
11,287 | e3caf7bc637bc1d5a6b4902b97a2d2ef3cc03ed2 | import numpy as np
from .Gateway import PacketRecord
class PropagationModel:
# log distance path loss model (or log normal shadowing)
def __init__(self, gamma=2.32, d0=1000.0, std=0.5, Lpld0=128.95, GL=0):
self.gamma = gamma
self.d0 = d0
self.std = std
if self.std < 0:
self.std = 0
self.Lpld0 = Lpld0
self.GL = GL
def tp_to_rss(self, indoor: bool, tp_dBm: int, d: float):
bpl = 0 # building path loss
if d == 0:
d = 1
if indoor:
bpl = np.random.choice([17, 27, 21, 30]) # according Rep. ITU-R P.2346-0
Lpl = 10 * self.gamma * np.log10(d / self.d0) + np.random.normal(self.Lpld0, self.std) + bpl
if Lpl < 0:
Lpl = 0
return tp_dBm - self.GL - Lpl
class SNRModel:
def __init__(self):
self.noise = -2 # mean_mean_values
self.std_noise = 1 # mean_std_values
self.noise_floor = -174 + 10 * np.log10(125e3) + np.random.normal(self.noise, self.std_noise)
def rss_to_snr(self, rss: float):
return rss - self.noise_floor
class AirInterface:
def __init__(self, sim_env, gateways, server, prop_model=None, snr_model=None):
self.prop_model = prop_model
self.snr_model = snr_model
self.server = server
self.sim_env = sim_env
self.gateways = gateways
if self.prop_model is None:
self.prop_model = PropagationModel()
if self.snr_model is None:
self.snr_model = SNRModel()
def transmit(self, p):
dispatch = [self.sim_env.event() for i in range(len(self.gateways))]
self.sim_env.process(self.server.receive_from_gateway(p, dispatch))
for i in range(len(self.gateways)):
gateway = self.gateways[i]
dist = p.node.location.distance(gateway.location)
rss = self.prop_model.tp_to_rss(False, p.para.tp, dist)
snr = self.snr_model.rss_to_snr(rss)
record = PacketRecord(p, gateway, rss, snr, dispatch[i])
self.sim_env.process(gateway.listen(record))
def reset(self, sim_env):
self.sim_env = sim_env |
11,288 | 858e6c3d3021f159ebdec2743390cffb03f74ebb | #!/usr/bin/env python3
import sys
#Define a function to calculate the insurance
def calc_insurance(salary):
return salary*(0.08+0.02+0.005+0.06)
#Define a function to calculates the tax
def calc_tax(salary):
pay = salary - calc_insurance(salary) - 3500
if pay <= 0:
tax = 0
elif pay > 0 and pay <= 1500:
tax = pay * 0.03 - 0
elif pay > 1500 and pay <= 4500:
tax = pay * 0.1 - 105
elif pay >4500 and pay <= 9000:
tax = pay * 0.2 - 55
elif pay > 9000 and pay <= 35000:
tax = pay * 0.25 - 1005
elif pay > 35000 and pay <= 55000:
tax = pay * 0.3 - 2755
elif pay > 55000 and pay <= 80000:
tax = pay * 0.35 - 5505
else:
tax = pay * 0.45 - 13505
return tax
#获取工资数额
try:
argvs = sys.argv[1:]
for arg in argvs:
items = arg.split(':')
work_id = int(items[0])
salary = int(items[1])
#计算个税
money = salary - calc_tax(salary) - calc_insurance(salary)
print("%d:%.2f " % (work_id, money))
except ValueError:
print("Parameter Error")
|
11,289 | 888e31a734c58dd1e432daae810884d099b27c3f |
from Gerrit_Extractor.Gerrit_Connection_Setup import Connection_Setup
from Gerrit_Extractor.Gerrit_Queries import Gerrit_Queries
from Utilities.Log_Progessbar import show_progress
class ReviewExtractor:
def __init__(self, url, username,password):
self.url = url
self.username = username
self.password = password
self.con=Connection_Setup(url=self.url,username=self.username,password=self.password)
self.rest=self.con.setConnection()
print('Gerrit Connection Established Successfully.')
self.gQ=Gerrit_Queries()
def getProjectList(self):
print("Project List Extracting...")
projectDict = self.rest.get(self.gQ.projectListQuery())
projectNameList = list(projectDict.keys())
return projectDict,projectNameList
def changesPerProject(self, projectName):
changes = self.rest.get(self.gQ.changeByProjectQuery(projectName))
return changes
# print(change)
def getAllChangeList(self,projectDict):
changeList=[]
for i, project in enumerate(list(projectDict.keys()), 1):
show_progress("Change List Extraction from Project List", i, len(projectDict.keys()))
changes = self.changesPerProject(project)
if (len(changes) != 0):
# print("i: " + str(i))
for change in changes:
if (len(change) != 0):
try:
# print(projectDict[project])
change['projectDetail'] = projectDict[project]
except:
continue
else:
changeList.append(change)
return changeList
def getChangeDetailList(self,changeList):
changeDetailList=[]
for k,change in enumerate(changeList,1):
show_progress("Change Detail Extraction", k, len(changeList))
try:
changeDetail = self.rest.get(self.gQ.changeDetailQuery(change['id']))
except:
continue
else:
changeDetail['projectDetail'] = change['projectDetail']
changeDetailList.append(changeDetail)
for i, change in enumerate(changeDetailList, 1):
change['request_id'] = i
return changeDetailList
def getTopic(self,changeDetailList):
for i,change in enumerate(changeDetailList,1):
show_progress("Topic Extraction", i, len(changeDetailList))
try:
topic = self.rest.get(self.gQ.topicQuery(change['id']))
except:
print("Exception: Topic Extraction Failed!")
topic = None
finally:
change['topic'] = topic
return changeDetailList
def getRevisions(self,changeDetailList):
for i,change in enumerate(changeDetailList,1):
show_progress("Revisions Extraction from Change List", i, len(changeDetailList))
try:
patchCount = self.rest.get(self.gQ.revisionsQuery(change['id']))
except:
print("Exception: Required Parameters Extraction Failed!")
if 'current_revision' not in patchCount.keys():
patchCount['current_revision'] = None
if 'revisions' not in patchCount.keys():
patchCount['revisions'] = None
finally:
change['current_revision'] = patchCount['current_revision']
change['revisions'] = patchCount['revisions']
return changeDetailList
def getPatchDetail(self,changeList):
patchDetail=[]
for i,change in enumerate(changeList,1):
show_progress("Required Revisions Extraction from Change List", i, len(changeList))
try:
detail = self.rest.get(self.gQ.reqParamsQuery(change['id']))
except:
print("Exception: Required Parameters for Patch Detail Extraction Failed!")
if 'current_revision' not in detail.keys():
detail['current_revision'] = None
if 'revisions' not in detail.keys():
detail['revisions'] = None
finally:
detail['request_id'] = change['request_id']
patchDetail.append(detail)
return patchDetail
def getReviewerList(self,changeList):
reviewerList = []
for i,change in enumerate(changeList,1):
show_progress("Reviewers Info Extraction from Change List", i, len(changeList))
try:
reviewers = self.rest.get(self.gQ.reviewersQuery(change['id']))
except:
print("Exception: Reviewers Info Extraction Failed!")
else:
if (len(reviewers) != 0):
for reviewer in reviewers:
reviewer['change_id'] = change['change_id']
reviewer['request_id'] = change['request_id']
reviewerList.append(reviewers)
return reviewerList
def getInlineComment(self,changeList):
#cnt = 0
inlineCommentList=[]
commentCount={}
for k,cIter in enumerate(changeList,1):
show_progress("Inline Comment Extraction from Change List", k, len(changeList))
try:
change = self.rest.get(self.gQ.revisionsQuery(cIter['id']))
#print(change)
for r in change['revisions'].keys():
try:
patchset = change['revisions'][r]['_number']
commentCount[str(r)] = 0
try:
commentFile = self.rest.get(self.gQ.inlineCommentQuery(cIter['id'],patchset))
for file in commentFile.keys():
commentList = commentFile[file]
for i,comment in enumerate(commentList,1):
#cnt += 1
#show_progress("Inline Comment No:" str(cnt))
commentCount[str(r)] += 1
inlineComment = {}
try:
inlineComment['request_id'] = cIter['request_id']
except:
inlineComment['request_id'] = None
try:
inlineComment['comment_id'] = comment['id']
except:
inlineComment['comment_id'] = None
try:
inlineComment['in_reply_to'] = comment['in_reply_to']
except:
inlineComment['in_reply_to'] = None
try:
inlineComment['patchset_id'] = change['revisions'][r]['_number']
except:
inlineComment['patchset_id'] = None
try:
inlineComment['file_name'] = file
except:
inlineComment['file_name'] = None
try:
inlineComment['line_number'] = comment['line']
except:
inlineComment['line_number'] = None
try:
inlineComment['author_id'] = comment['author']['_account_id']
except:
inlineComment['author_id'] = None
try:
inlineComment['written_on'] = comment['updated']
except:
inlineComment['written_on'] = None
inlineComment['status'] = None
try:
inlineComment['side'] = comment['side']
except:
inlineComment['side'] = None
try:
inlineComment['message'] = comment['message']
except:
inlineComment['message'] = None
try:
inlineComment['start_line'] = comment['range']['start_line']
except:
inlineComment['start_line'] = None
try:
inlineComment['end_line'] = comment['range']['end_line']
except:
inlineComment['end_line'] = None
try:
inlineComment['start_character'] = comment['range']['start_character']
except:
inlineComment['start_character'] = None
try:
inlineComment['end_character'] = comment['range']['end_character']
except:
inlineComment['end_character'] = None
inlineComment['sentiment_score'] = None
inlineCommentList.append(inlineComment)
except:
print("Exception: " + "Commented file Extraction Failed!")
continue
except:
print("Exception: " + "patchset key not found!")
continue
except:
print("Exception: " + "revisions key not found!")
continue
return inlineCommentList,commentCount
|
11,290 | 43892bf0bf3759ed3cedcc5283678efe76d6612a |
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
from scipy import stats
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
plt.imsave("test-after.png", image)
#plt.savefig('test-after.png')
#mpimg.imsave('test_images/lines-', line_image)
|
11,291 | 0e6efc5a7a1c7aff6ef18406606165a7cfa387ff | # Generated by Django 3.1.3 on 2020-12-13 17:45
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_mri', '0013_auto_20201125_1858'),
]
operations = [
migrations.CreateModel(
name='IrbApproval',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('institution', models.CharField(blank=True, max_length=128, null=True)),
('number', models.CharField(max_length=32)),
('document', models.FileField(blank=True, max_length=1000, null=True, upload_to='mri/irb/')),
],
),
migrations.AddField(
model_name='session',
name='irb',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='django_mri.irbapproval'),
),
]
|
11,292 | 0b9a4c81ce53465026912915e49d759833f0e91f | #!/usr/bin/env python3
#
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2021, Intel Corporation
#
#
# Figure.py -- generate figure-related products (EXPERIMENTAL)
#
import matplotlib.pyplot as plt
import os.path
from textwrap import wrap
from .common import *
from .flat import *
from .Benchmark import *
class Figure:
_figure_kwargs = {'figsize': [6.4, 4.8], 'dpi': 200, \
'tight_layout': {'pad': 6}}
def _series_file(self, result_dir):
return os.path.join(result_dir, self.file + '.json')
def __init__(self, f, result_dir = ""):
self.output = f['output']
self.output['done'] = self.output.get('done', False)
# copies for convenience
self.title = self.output['title']
self.file = self.output['file']
self.x = self.output['x']
self.y = self.output['y']
self.key = self.output['key']
# find the latest series
if not self.output['done']:
self.series = f['series']
else:
self.series = json_from_file(self._series_file(result_dir)) \
[self.key]['series']
def cache(self):
"""Cache the current state of execution"""
return {'output': self.output, 'series': self.series}
def is_done(self):
return self.output['done']
@staticmethod
def get_figure_desc(figure):
"""Getter for accessing the core descriptor of a figure"""
return figure['output']
@staticmethod
def get_oneseries_desc(oneseries):
"""Getter for accessing the core descriptor of a series"""
return oneseries
@staticmethod
def oneseries_derivatives(oneseries):
"""Generate all derived variables of a series"""
output = {}
if 'rw' in oneseries.keys():
output['rw_order'] = 'rand' if oneseries['rw'] else 'seq'
return output
@classmethod
def flatten(cls, figures):
"""Flatten the figures list"""
figures = make_flat(figures, cls.get_figure_desc)
figures = process_fstrings(figures, cls.get_figure_desc)
output = []
for f in figures:
# flatten series
common = f.get('series_common', {})
f['series'] = make_flat(f['series'], cls.get_oneseries_desc, common)
f['series'] = process_fstrings(f['series'], cls.get_oneseries_desc,
cls.oneseries_derivatives)
output.append(cls(f))
return output
def prepare_series(self, result_dir):
"""
Extract all series from the respective benchmark files and append them
to the series file.
"""
output = {}
output['title'] = self.title
output['x'] = self.x
output['y'] = self.y
output['series'] = []
for series in self.series:
idfile = os.path.join(result_dir, 'benchmark_' + str(series['id']) +
'.json')
rows = json_from_file(idfile)
# it is assumed each row has the same names of columns
keys = rows[0].keys()
# skip the series if it does not have required keys
if self.x not in keys or self.y not in keys:
continue
points = [[row[self.x], row[self.y]] for row in rows]
output['series'].append({'label': series['label'], 'points': points})
# save the series to a file
series_path = self._series_file(result_dir)
if os.path.exists(series_path):
figures = json_from_file(series_path)
else:
figures = {}
figures[self.key] = output
with open(series_path, 'w') as file:
json.dump(figures, file, indent=4)
# mark as done
self.output['done'] = True
def _points_to_xy(self, points):
xs = [p[0] for p in points]
ys = [p[1] for p in points]
return xs, ys
def _label(self, column):
"""Translate the name of a column to a label with a unit"""
# XXX
return column
def png_path(self, result_dir):
output = self.file + '_' + self.key + '.png'
return os.path.join(result_dir, output)
def to_png(self, result_dir):
# set output file size, padding and title
fig = plt.figure(**Figure._figure_kwargs)
suptitle = "\n".join(wrap(self.title, 60))
fig.suptitle(suptitle, fontsize='medium', y=0.90)
# get a subplot
ax = plt.subplot(1, 1, 1)
# XXX bw_avg [threads=24, iodepth=2, block size=4096B]
ax.title.set_text('')
xticks = []
for oneseries in self.series:
# draw series ony-by-one
xs, ys = self._points_to_xy(oneseries['points'])
ax.plot(xs, ys, marker='.', label=oneseries['label'])
# collect all existing x values
xticks.extend(xs)
# make values unique (set) and sort them
xticks = sorted(list(set(xticks)))
# XXX linear / log
ax.set_xscale('linear')
ax.set_xticks(xticks)
plt.setp(ax.get_xticklabels(), rotation=45, ha='right')
ax.set_xlabel(self._label(self.x))
ax.set_ylabel(self._label(self.y))
ax.set_ylim(bottom=0)
ax.legend(fontsize=6)
ax.grid(True)
plt.savefig(self.png_path(result_dir))
def html_data_table(self):
"""
Create an HTML snippet string with a table containing the Figure data.
"""
return "XXX"
def to_html(self, result_dir):
"""Combine a Figure's png and data table into a single HTML snippet"""
png_path = self.png_path(result_dir)
data_table = self.html_data_table()
return "XXX figure html"
|
11,293 | 9d99b6fc20d439b421fa1886e17e6290d0cbdfca | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
class for the C code generation
author: Rinse Wester
"""
from csdfgraph import *
class CCodeGen(object):
"""docstring for CCodeGen"""
def __init__(self, arg):
super(CCodeGen, self).__init__()
self.arg = arg
def generateCode(graph, targetdir):
raise NotImplementedError(
'C codegen not implemented yet (target :' + targetdir + ')')
|
11,294 | 17027998e242efc40db22e73fd843eb44df92cf8 | import os
import re
result = []
for root, dirs, files in os.walk('.'):
for filename in files:
if 'rst' == filename.split('.')[-1]:
result.append(os.path.join(root, filename))
if 'md' == filename.split('.')[-1]:
result.append(os.path.join(root, filename))
def extractImages(filename):
lines = []
with open (filename, 'r') as f:
# print(len( f.readlines()))
lines.extend(f.readlines())
p = re.compile('http(.*?)(png|jpg|jpeg|gif)', re.IGNORECASE)
for line in lines:
m = p.search(line)
if m and 'readme' in m.group():
print(m.group())
for element in result:
extractImages(element) |
11,295 | 7954eab0dae80c0e332af58d12862e188890e079 | from apis.v1.models.blacklist import Blacklist
from apis.v1.models.business import BusinessModel
from apis.v1.models.review import ReviewModel
from apis.v1.models.user import User
class Database:
""" class that defines how a database behaves"""
database = dict()
max_ids = dict()
def __init__(self):
self.max_ids = dict()
def commit(self, obj):
self.obj = obj
self.assign_id(obj)
self.database[self.obj.tablename].append(self.obj.__dict__)
def assign_id(self, obj):
max_id = self.max_ids[obj.tablename]
table_length = len(self.database[obj.tablename])
if max_id > table_length:
obj.id = max_id + 1
self.max_ids[obj.tablename] += 1
elif max_id <= table_length:
obj.id = table_length + 1
self.max_ids[obj.tablename] += 1
def get_all(self, clas):
table = self.database[clas.tablename]
return table
def get(self, clas, num):
table = self.database[clas.tablename]
found = False
for item in table:
if item['id'] == num:
found = True
return item
if not found:
return None
def delete(self, clas, dic):
table = self.database[clas.tablename]
for item in table:
if item['id'] == dic['id']:
it = table.index(item)
del table[it]
def update(self, clas, obj):
update_id = obj['id']
table = self.database[clas.tablename]
for item in table:
if item['id'] == update_id:
location = table.index(item)
table[location] = obj
def drop_all(self):
self.database.clear()
def create_all(self, *args):
for clas in args:
self.table_key = clas.tablename
self.database[self.table_key] = list()
self.max_ids[clas.tablename] = int()
def filter_by(self, obj, param, val):
table = self.database[obj.tablename]
found = False
for item in table:
if item[param] == val:
found = True
return item
if not found:
return None
db = Database()
db.create_all(Blacklist, BusinessModel, ReviewModel, User)
|
11,296 | d6c085174b2ab142e81ccdb1dcce3dd82212deaa | # def CountPair(L,R):
# x=(R-L+1)
# print(x//2)
# if __name__=='__main__':
# L,R=1,10
# CountPair(L,R)
import math
#lambda c:[i for i in range(c)if math.gcd(c,i)<2]
# n=int(input())
# l=set()
# for i in range(2,n+1):
# for j in range(i+1,n+1):
# a=set()
# if math.gcd(i,j)<2:
# a.add(i)
# a.add(j)
# l.add(a)
# print(l)
# def coprime(A,B):
# return len([(x,y) for x in range(1,A+1) for y in range(x+1,B+1) if math.gcd(x,y) == 1])
# print(coprime(1,5))
# from collections import deque
# def coprimes():
# tree = deque([[2, 1], [3, 1]])
# while True:
# m, n = tree.popleft()
# yield m, n
# tree.append([2 * m - n, m])
# tree.append([2 * m + n, m])
# tree.append([m + 2 * n, n])
# print(tree)
# coprimes()
from gmpy2 import lcm
from itertools import combinations
for a, b in combinations(range(2, 101), 2):
if lcm(a, b)==a*b:
print('({}, {}), '.format(a,b), end=' ') |
11,297 | 260e92376920b19bf41af7b8c68289ad41798450 | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 20 23:09:48 2019
@author: My Pc
"""
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
def knn(dataset,comments,views,languages):
#split dataset
r_fb=[]
#neg_descriptors = {"Confusing", "Unconvincing", "Longwinded", "Obnoxious", "OK"}
dataset['ratings'] = dataset['ratings'].apply(lambda x: eval(str(x)))
for i in range(len(dataset['ratings'])):
if dataset['comments'][i]<100 and dataset['views'][i]<500000 and dataset['languages'][i]<30:
r_fb.append(0)
else:
r_fb.append(1)
dataset['r_feedback']=r_fb
X=dataset[['comments','views','languages']]
Y=dataset['r_feedback']
# Split data into training and testing sets
X_train,X_test,y_train,y_test = train_test_split(X,Y,random_state=0,test_size=0.5)
#Instantiate the model with 5 neighbors
classifier=KNeighborsClassifier(algorithm='brute',n_neighbors=10)
#Fit the model on the training data
classifier.fit(X_train,y_train)
#See how the model performs on the test data.
print("Accuracy score of the algorithm is:",classifier.score(X_test,y_test)*100)
#prediction results based on input data
prediction = classifier.predict([[comments,views,languages]])
if(prediction[0]==1):
print("Talk will receive positive feedback")
else:
print("Talk will receive negative feedback")
|
11,298 | f55d887128fda6f2ad3180fe79c3c54d9648d3e4 | # generated from rosidl_generator_py/resource/_idl.py.em
# with input from turtlesim:action/RotateAbsolute.idl
# generated code does not contain a copyright notice
# Import statements for member types
import rosidl_parser.definition # noqa: E402, I100
class Metaclass_RotateAbsolute_Goal(type):
"""Metaclass of message 'RotateAbsolute_Goal'."""
_CREATE_ROS_MESSAGE = None
_CONVERT_FROM_PY = None
_CONVERT_TO_PY = None
_DESTROY_ROS_MESSAGE = None
_TYPE_SUPPORT = None
__constants = {
}
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('turtlesim')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'turtlesim.action.RotateAbsolute_Goal')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._CREATE_ROS_MESSAGE = module.create_ros_message_msg__action__rotate_absolute__goal
cls._CONVERT_FROM_PY = module.convert_from_py_msg__action__rotate_absolute__goal
cls._CONVERT_TO_PY = module.convert_to_py_msg__action__rotate_absolute__goal
cls._TYPE_SUPPORT = module.type_support_msg__action__rotate_absolute__goal
cls._DESTROY_ROS_MESSAGE = module.destroy_ros_message_msg__action__rotate_absolute__goal
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# list constant names here so that they appear in the help text of
# the message class under "Data and other attributes defined here:"
# as well as populate each message instance
return {
}
class RotateAbsolute_Goal(metaclass=Metaclass_RotateAbsolute_Goal):
"""Message class 'RotateAbsolute_Goal'."""
__slots__ = [
'_theta',
]
_fields_and_field_types = {
'theta': 'float',
}
SLOT_TYPES = (
rosidl_parser.definition.BasicType('float'), # noqa: E501
)
def __init__(self, **kwargs):
assert all('_' + key in self.__slots__ for key in kwargs.keys()), \
'Invalid arguments passed to constructor: %s' % \
', '.join(sorted(k for k in kwargs.keys() if '_' + k not in self.__slots__))
self.theta = kwargs.get('theta', float())
def __repr__(self):
typename = self.__class__.__module__.split('.')
typename.pop()
typename.append(self.__class__.__name__)
args = []
for s, t in zip(self.__slots__, self.SLOT_TYPES):
field = getattr(self, s)
fieldstr = repr(field)
# We use Python array type for fields that can be directly stored
# in them, and "normal" sequences for everything else. If it is
# a type that we store in an array, strip off the 'array' portion.
if (
isinstance(t, rosidl_parser.definition.AbstractSequence) and
isinstance(t.value_type, rosidl_parser.definition.BasicType) and
t.value_type.typename in ['float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']
):
if len(field) == 0:
fieldstr = '[]'
else:
assert fieldstr.startswith('array(')
prefix = "array('X', "
suffix = ')'
fieldstr = fieldstr[len(prefix):-len(suffix)]
args.append(s[1:] + '=' + fieldstr)
return '%s(%s)' % ('.'.join(typename), ', '.join(args))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.theta != other.theta:
return False
return True
@classmethod
def get_fields_and_field_types(cls):
from copy import copy
return copy(cls._fields_and_field_types)
@property
def theta(self):
"""Message field 'theta'."""
return self._theta
@theta.setter
def theta(self, value):
if __debug__:
assert \
isinstance(value, float), \
"The 'theta' field must be of type 'float'"
self._theta = value
# Import statements for member types
# already imported above
# import rosidl_parser.definition
class Metaclass_RotateAbsolute_Result(type):
"""Metaclass of message 'RotateAbsolute_Result'."""
_CREATE_ROS_MESSAGE = None
_CONVERT_FROM_PY = None
_CONVERT_TO_PY = None
_DESTROY_ROS_MESSAGE = None
_TYPE_SUPPORT = None
__constants = {
}
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('turtlesim')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'turtlesim.action.RotateAbsolute_Result')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._CREATE_ROS_MESSAGE = module.create_ros_message_msg__action__rotate_absolute__result
cls._CONVERT_FROM_PY = module.convert_from_py_msg__action__rotate_absolute__result
cls._CONVERT_TO_PY = module.convert_to_py_msg__action__rotate_absolute__result
cls._TYPE_SUPPORT = module.type_support_msg__action__rotate_absolute__result
cls._DESTROY_ROS_MESSAGE = module.destroy_ros_message_msg__action__rotate_absolute__result
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# list constant names here so that they appear in the help text of
# the message class under "Data and other attributes defined here:"
# as well as populate each message instance
return {
}
class RotateAbsolute_Result(metaclass=Metaclass_RotateAbsolute_Result):
"""Message class 'RotateAbsolute_Result'."""
__slots__ = [
'_delta',
]
_fields_and_field_types = {
'delta': 'float',
}
SLOT_TYPES = (
rosidl_parser.definition.BasicType('float'), # noqa: E501
)
def __init__(self, **kwargs):
assert all('_' + key in self.__slots__ for key in kwargs.keys()), \
'Invalid arguments passed to constructor: %s' % \
', '.join(sorted(k for k in kwargs.keys() if '_' + k not in self.__slots__))
self.delta = kwargs.get('delta', float())
def __repr__(self):
typename = self.__class__.__module__.split('.')
typename.pop()
typename.append(self.__class__.__name__)
args = []
for s, t in zip(self.__slots__, self.SLOT_TYPES):
field = getattr(self, s)
fieldstr = repr(field)
# We use Python array type for fields that can be directly stored
# in them, and "normal" sequences for everything else. If it is
# a type that we store in an array, strip off the 'array' portion.
if (
isinstance(t, rosidl_parser.definition.AbstractSequence) and
isinstance(t.value_type, rosidl_parser.definition.BasicType) and
t.value_type.typename in ['float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']
):
if len(field) == 0:
fieldstr = '[]'
else:
assert fieldstr.startswith('array(')
prefix = "array('X', "
suffix = ')'
fieldstr = fieldstr[len(prefix):-len(suffix)]
args.append(s[1:] + '=' + fieldstr)
return '%s(%s)' % ('.'.join(typename), ', '.join(args))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.delta != other.delta:
return False
return True
@classmethod
def get_fields_and_field_types(cls):
from copy import copy
return copy(cls._fields_and_field_types)
@property
def delta(self):
"""Message field 'delta'."""
return self._delta
@delta.setter
def delta(self, value):
if __debug__:
assert \
isinstance(value, float), \
"The 'delta' field must be of type 'float'"
self._delta = value
# Import statements for member types
# already imported above
# import rosidl_parser.definition
class Metaclass_RotateAbsolute_Feedback(type):
"""Metaclass of message 'RotateAbsolute_Feedback'."""
_CREATE_ROS_MESSAGE = None
_CONVERT_FROM_PY = None
_CONVERT_TO_PY = None
_DESTROY_ROS_MESSAGE = None
_TYPE_SUPPORT = None
__constants = {
}
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('turtlesim')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'turtlesim.action.RotateAbsolute_Feedback')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._CREATE_ROS_MESSAGE = module.create_ros_message_msg__action__rotate_absolute__feedback
cls._CONVERT_FROM_PY = module.convert_from_py_msg__action__rotate_absolute__feedback
cls._CONVERT_TO_PY = module.convert_to_py_msg__action__rotate_absolute__feedback
cls._TYPE_SUPPORT = module.type_support_msg__action__rotate_absolute__feedback
cls._DESTROY_ROS_MESSAGE = module.destroy_ros_message_msg__action__rotate_absolute__feedback
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# list constant names here so that they appear in the help text of
# the message class under "Data and other attributes defined here:"
# as well as populate each message instance
return {
}
class RotateAbsolute_Feedback(metaclass=Metaclass_RotateAbsolute_Feedback):
"""Message class 'RotateAbsolute_Feedback'."""
__slots__ = [
'_remaining',
]
_fields_and_field_types = {
'remaining': 'float',
}
SLOT_TYPES = (
rosidl_parser.definition.BasicType('float'), # noqa: E501
)
def __init__(self, **kwargs):
assert all('_' + key in self.__slots__ for key in kwargs.keys()), \
'Invalid arguments passed to constructor: %s' % \
', '.join(sorted(k for k in kwargs.keys() if '_' + k not in self.__slots__))
self.remaining = kwargs.get('remaining', float())
def __repr__(self):
typename = self.__class__.__module__.split('.')
typename.pop()
typename.append(self.__class__.__name__)
args = []
for s, t in zip(self.__slots__, self.SLOT_TYPES):
field = getattr(self, s)
fieldstr = repr(field)
# We use Python array type for fields that can be directly stored
# in them, and "normal" sequences for everything else. If it is
# a type that we store in an array, strip off the 'array' portion.
if (
isinstance(t, rosidl_parser.definition.AbstractSequence) and
isinstance(t.value_type, rosidl_parser.definition.BasicType) and
t.value_type.typename in ['float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']
):
if len(field) == 0:
fieldstr = '[]'
else:
assert fieldstr.startswith('array(')
prefix = "array('X', "
suffix = ')'
fieldstr = fieldstr[len(prefix):-len(suffix)]
args.append(s[1:] + '=' + fieldstr)
return '%s(%s)' % ('.'.join(typename), ', '.join(args))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.remaining != other.remaining:
return False
return True
@classmethod
def get_fields_and_field_types(cls):
from copy import copy
return copy(cls._fields_and_field_types)
@property
def remaining(self):
"""Message field 'remaining'."""
return self._remaining
@remaining.setter
def remaining(self, value):
if __debug__:
assert \
isinstance(value, float), \
"The 'remaining' field must be of type 'float'"
self._remaining = value
# Import statements for member types
# already imported above
# import rosidl_parser.definition
class Metaclass_RotateAbsolute_SendGoal_Request(type):
"""Metaclass of message 'RotateAbsolute_SendGoal_Request'."""
_CREATE_ROS_MESSAGE = None
_CONVERT_FROM_PY = None
_CONVERT_TO_PY = None
_DESTROY_ROS_MESSAGE = None
_TYPE_SUPPORT = None
__constants = {
}
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('turtlesim')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'turtlesim.action.RotateAbsolute_SendGoal_Request')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._CREATE_ROS_MESSAGE = module.create_ros_message_msg__action__rotate_absolute__send_goal__request
cls._CONVERT_FROM_PY = module.convert_from_py_msg__action__rotate_absolute__send_goal__request
cls._CONVERT_TO_PY = module.convert_to_py_msg__action__rotate_absolute__send_goal__request
cls._TYPE_SUPPORT = module.type_support_msg__action__rotate_absolute__send_goal__request
cls._DESTROY_ROS_MESSAGE = module.destroy_ros_message_msg__action__rotate_absolute__send_goal__request
from turtlesim.action import RotateAbsolute
if RotateAbsolute.Goal.__class__._TYPE_SUPPORT is None:
RotateAbsolute.Goal.__class__.__import_type_support__()
from unique_identifier_msgs.msg import UUID
if UUID.__class__._TYPE_SUPPORT is None:
UUID.__class__.__import_type_support__()
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# list constant names here so that they appear in the help text of
# the message class under "Data and other attributes defined here:"
# as well as populate each message instance
return {
}
class RotateAbsolute_SendGoal_Request(metaclass=Metaclass_RotateAbsolute_SendGoal_Request):
"""Message class 'RotateAbsolute_SendGoal_Request'."""
__slots__ = [
'_goal_id',
'_goal',
]
_fields_and_field_types = {
'goal_id': 'unique_identifier_msgs/UUID',
'goal': 'turtlesim/RotateAbsolute_Goal',
}
SLOT_TYPES = (
rosidl_parser.definition.NamespacedType(['unique_identifier_msgs', 'msg'], 'UUID'), # noqa: E501
rosidl_parser.definition.NamespacedType(['turtlesim', 'action'], 'RotateAbsolute_Goal'), # noqa: E501
)
def __init__(self, **kwargs):
assert all('_' + key in self.__slots__ for key in kwargs.keys()), \
'Invalid arguments passed to constructor: %s' % \
', '.join(sorted(k for k in kwargs.keys() if '_' + k not in self.__slots__))
from unique_identifier_msgs.msg import UUID
self.goal_id = kwargs.get('goal_id', UUID())
from turtlesim.action._rotate_absolute import RotateAbsolute_Goal
self.goal = kwargs.get('goal', RotateAbsolute_Goal())
def __repr__(self):
typename = self.__class__.__module__.split('.')
typename.pop()
typename.append(self.__class__.__name__)
args = []
for s, t in zip(self.__slots__, self.SLOT_TYPES):
field = getattr(self, s)
fieldstr = repr(field)
# We use Python array type for fields that can be directly stored
# in them, and "normal" sequences for everything else. If it is
# a type that we store in an array, strip off the 'array' portion.
if (
isinstance(t, rosidl_parser.definition.AbstractSequence) and
isinstance(t.value_type, rosidl_parser.definition.BasicType) and
t.value_type.typename in ['float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']
):
if len(field) == 0:
fieldstr = '[]'
else:
assert fieldstr.startswith('array(')
prefix = "array('X', "
suffix = ')'
fieldstr = fieldstr[len(prefix):-len(suffix)]
args.append(s[1:] + '=' + fieldstr)
return '%s(%s)' % ('.'.join(typename), ', '.join(args))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.goal_id != other.goal_id:
return False
if self.goal != other.goal:
return False
return True
@classmethod
def get_fields_and_field_types(cls):
from copy import copy
return copy(cls._fields_and_field_types)
@property
def goal_id(self):
"""Message field 'goal_id'."""
return self._goal_id
@goal_id.setter
def goal_id(self, value):
if __debug__:
from unique_identifier_msgs.msg import UUID
assert \
isinstance(value, UUID), \
"The 'goal_id' field must be a sub message of type 'UUID'"
self._goal_id = value
@property
def goal(self):
"""Message field 'goal'."""
return self._goal
@goal.setter
def goal(self, value):
if __debug__:
from turtlesim.action._rotate_absolute import RotateAbsolute_Goal
assert \
isinstance(value, RotateAbsolute_Goal), \
"The 'goal' field must be a sub message of type 'RotateAbsolute_Goal'"
self._goal = value
# Import statements for member types
# already imported above
# import rosidl_parser.definition
class Metaclass_RotateAbsolute_SendGoal_Response(type):
"""Metaclass of message 'RotateAbsolute_SendGoal_Response'."""
_CREATE_ROS_MESSAGE = None
_CONVERT_FROM_PY = None
_CONVERT_TO_PY = None
_DESTROY_ROS_MESSAGE = None
_TYPE_SUPPORT = None
__constants = {
}
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('turtlesim')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'turtlesim.action.RotateAbsolute_SendGoal_Response')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._CREATE_ROS_MESSAGE = module.create_ros_message_msg__action__rotate_absolute__send_goal__response
cls._CONVERT_FROM_PY = module.convert_from_py_msg__action__rotate_absolute__send_goal__response
cls._CONVERT_TO_PY = module.convert_to_py_msg__action__rotate_absolute__send_goal__response
cls._TYPE_SUPPORT = module.type_support_msg__action__rotate_absolute__send_goal__response
cls._DESTROY_ROS_MESSAGE = module.destroy_ros_message_msg__action__rotate_absolute__send_goal__response
from builtin_interfaces.msg import Time
if Time.__class__._TYPE_SUPPORT is None:
Time.__class__.__import_type_support__()
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# list constant names here so that they appear in the help text of
# the message class under "Data and other attributes defined here:"
# as well as populate each message instance
return {
}
class RotateAbsolute_SendGoal_Response(metaclass=Metaclass_RotateAbsolute_SendGoal_Response):
"""Message class 'RotateAbsolute_SendGoal_Response'."""
__slots__ = [
'_accepted',
'_stamp',
]
_fields_and_field_types = {
'accepted': 'boolean',
'stamp': 'builtin_interfaces/Time',
}
SLOT_TYPES = (
rosidl_parser.definition.BasicType('boolean'), # noqa: E501
rosidl_parser.definition.NamespacedType(['builtin_interfaces', 'msg'], 'Time'), # noqa: E501
)
def __init__(self, **kwargs):
assert all('_' + key in self.__slots__ for key in kwargs.keys()), \
'Invalid arguments passed to constructor: %s' % \
', '.join(sorted(k for k in kwargs.keys() if '_' + k not in self.__slots__))
self.accepted = kwargs.get('accepted', bool())
from builtin_interfaces.msg import Time
self.stamp = kwargs.get('stamp', Time())
def __repr__(self):
typename = self.__class__.__module__.split('.')
typename.pop()
typename.append(self.__class__.__name__)
args = []
for s, t in zip(self.__slots__, self.SLOT_TYPES):
field = getattr(self, s)
fieldstr = repr(field)
# We use Python array type for fields that can be directly stored
# in them, and "normal" sequences for everything else. If it is
# a type that we store in an array, strip off the 'array' portion.
if (
isinstance(t, rosidl_parser.definition.AbstractSequence) and
isinstance(t.value_type, rosidl_parser.definition.BasicType) and
t.value_type.typename in ['float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']
):
if len(field) == 0:
fieldstr = '[]'
else:
assert fieldstr.startswith('array(')
prefix = "array('X', "
suffix = ')'
fieldstr = fieldstr[len(prefix):-len(suffix)]
args.append(s[1:] + '=' + fieldstr)
return '%s(%s)' % ('.'.join(typename), ', '.join(args))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.accepted != other.accepted:
return False
if self.stamp != other.stamp:
return False
return True
@classmethod
def get_fields_and_field_types(cls):
from copy import copy
return copy(cls._fields_and_field_types)
@property
def accepted(self):
"""Message field 'accepted'."""
return self._accepted
@accepted.setter
def accepted(self, value):
if __debug__:
assert \
isinstance(value, bool), \
"The 'accepted' field must be of type 'bool'"
self._accepted = value
@property
def stamp(self):
"""Message field 'stamp'."""
return self._stamp
@stamp.setter
def stamp(self, value):
if __debug__:
from builtin_interfaces.msg import Time
assert \
isinstance(value, Time), \
"The 'stamp' field must be a sub message of type 'Time'"
self._stamp = value
class Metaclass_RotateAbsolute_SendGoal(type):
"""Metaclass of service 'RotateAbsolute_SendGoal'."""
_TYPE_SUPPORT = None
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('turtlesim')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'turtlesim.action.RotateAbsolute_SendGoal')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._TYPE_SUPPORT = module.type_support_srv__action__rotate_absolute__send_goal
from turtlesim.action import _rotate_absolute
if _rotate_absolute.Metaclass_RotateAbsolute_SendGoal_Request._TYPE_SUPPORT is None:
_rotate_absolute.Metaclass_RotateAbsolute_SendGoal_Request.__import_type_support__()
if _rotate_absolute.Metaclass_RotateAbsolute_SendGoal_Response._TYPE_SUPPORT is None:
_rotate_absolute.Metaclass_RotateAbsolute_SendGoal_Response.__import_type_support__()
class RotateAbsolute_SendGoal(metaclass=Metaclass_RotateAbsolute_SendGoal):
from turtlesim.action._rotate_absolute import RotateAbsolute_SendGoal_Request as Request
from turtlesim.action._rotate_absolute import RotateAbsolute_SendGoal_Response as Response
def __init__(self):
raise NotImplementedError('Service classes can not be instantiated')
# Import statements for member types
# already imported above
# import rosidl_parser.definition
class Metaclass_RotateAbsolute_GetResult_Request(type):
"""Metaclass of message 'RotateAbsolute_GetResult_Request'."""
_CREATE_ROS_MESSAGE = None
_CONVERT_FROM_PY = None
_CONVERT_TO_PY = None
_DESTROY_ROS_MESSAGE = None
_TYPE_SUPPORT = None
__constants = {
}
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('turtlesim')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'turtlesim.action.RotateAbsolute_GetResult_Request')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._CREATE_ROS_MESSAGE = module.create_ros_message_msg__action__rotate_absolute__get_result__request
cls._CONVERT_FROM_PY = module.convert_from_py_msg__action__rotate_absolute__get_result__request
cls._CONVERT_TO_PY = module.convert_to_py_msg__action__rotate_absolute__get_result__request
cls._TYPE_SUPPORT = module.type_support_msg__action__rotate_absolute__get_result__request
cls._DESTROY_ROS_MESSAGE = module.destroy_ros_message_msg__action__rotate_absolute__get_result__request
from unique_identifier_msgs.msg import UUID
if UUID.__class__._TYPE_SUPPORT is None:
UUID.__class__.__import_type_support__()
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# list constant names here so that they appear in the help text of
# the message class under "Data and other attributes defined here:"
# as well as populate each message instance
return {
}
class RotateAbsolute_GetResult_Request(metaclass=Metaclass_RotateAbsolute_GetResult_Request):
"""Message class 'RotateAbsolute_GetResult_Request'."""
__slots__ = [
'_goal_id',
]
_fields_and_field_types = {
'goal_id': 'unique_identifier_msgs/UUID',
}
SLOT_TYPES = (
rosidl_parser.definition.NamespacedType(['unique_identifier_msgs', 'msg'], 'UUID'), # noqa: E501
)
def __init__(self, **kwargs):
assert all('_' + key in self.__slots__ for key in kwargs.keys()), \
'Invalid arguments passed to constructor: %s' % \
', '.join(sorted(k for k in kwargs.keys() if '_' + k not in self.__slots__))
from unique_identifier_msgs.msg import UUID
self.goal_id = kwargs.get('goal_id', UUID())
def __repr__(self):
typename = self.__class__.__module__.split('.')
typename.pop()
typename.append(self.__class__.__name__)
args = []
for s, t in zip(self.__slots__, self.SLOT_TYPES):
field = getattr(self, s)
fieldstr = repr(field)
# We use Python array type for fields that can be directly stored
# in them, and "normal" sequences for everything else. If it is
# a type that we store in an array, strip off the 'array' portion.
if (
isinstance(t, rosidl_parser.definition.AbstractSequence) and
isinstance(t.value_type, rosidl_parser.definition.BasicType) and
t.value_type.typename in ['float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']
):
if len(field) == 0:
fieldstr = '[]'
else:
assert fieldstr.startswith('array(')
prefix = "array('X', "
suffix = ')'
fieldstr = fieldstr[len(prefix):-len(suffix)]
args.append(s[1:] + '=' + fieldstr)
return '%s(%s)' % ('.'.join(typename), ', '.join(args))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.goal_id != other.goal_id:
return False
return True
@classmethod
def get_fields_and_field_types(cls):
from copy import copy
return copy(cls._fields_and_field_types)
@property
def goal_id(self):
"""Message field 'goal_id'."""
return self._goal_id
@goal_id.setter
def goal_id(self, value):
if __debug__:
from unique_identifier_msgs.msg import UUID
assert \
isinstance(value, UUID), \
"The 'goal_id' field must be a sub message of type 'UUID'"
self._goal_id = value
# Import statements for member types
# already imported above
# import rosidl_parser.definition
class Metaclass_RotateAbsolute_GetResult_Response(type):
"""Metaclass of message 'RotateAbsolute_GetResult_Response'."""
_CREATE_ROS_MESSAGE = None
_CONVERT_FROM_PY = None
_CONVERT_TO_PY = None
_DESTROY_ROS_MESSAGE = None
_TYPE_SUPPORT = None
__constants = {
}
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('turtlesim')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'turtlesim.action.RotateAbsolute_GetResult_Response')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._CREATE_ROS_MESSAGE = module.create_ros_message_msg__action__rotate_absolute__get_result__response
cls._CONVERT_FROM_PY = module.convert_from_py_msg__action__rotate_absolute__get_result__response
cls._CONVERT_TO_PY = module.convert_to_py_msg__action__rotate_absolute__get_result__response
cls._TYPE_SUPPORT = module.type_support_msg__action__rotate_absolute__get_result__response
cls._DESTROY_ROS_MESSAGE = module.destroy_ros_message_msg__action__rotate_absolute__get_result__response
from turtlesim.action import RotateAbsolute
if RotateAbsolute.Result.__class__._TYPE_SUPPORT is None:
RotateAbsolute.Result.__class__.__import_type_support__()
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# list constant names here so that they appear in the help text of
# the message class under "Data and other attributes defined here:"
# as well as populate each message instance
return {
}
class RotateAbsolute_GetResult_Response(metaclass=Metaclass_RotateAbsolute_GetResult_Response):
"""Message class 'RotateAbsolute_GetResult_Response'."""
__slots__ = [
'_status',
'_result',
]
_fields_and_field_types = {
'status': 'int8',
'result': 'turtlesim/RotateAbsolute_Result',
}
SLOT_TYPES = (
rosidl_parser.definition.BasicType('int8'), # noqa: E501
rosidl_parser.definition.NamespacedType(['turtlesim', 'action'], 'RotateAbsolute_Result'), # noqa: E501
)
def __init__(self, **kwargs):
assert all('_' + key in self.__slots__ for key in kwargs.keys()), \
'Invalid arguments passed to constructor: %s' % \
', '.join(sorted(k for k in kwargs.keys() if '_' + k not in self.__slots__))
self.status = kwargs.get('status', int())
from turtlesim.action._rotate_absolute import RotateAbsolute_Result
self.result = kwargs.get('result', RotateAbsolute_Result())
def __repr__(self):
typename = self.__class__.__module__.split('.')
typename.pop()
typename.append(self.__class__.__name__)
args = []
for s, t in zip(self.__slots__, self.SLOT_TYPES):
field = getattr(self, s)
fieldstr = repr(field)
# We use Python array type for fields that can be directly stored
# in them, and "normal" sequences for everything else. If it is
# a type that we store in an array, strip off the 'array' portion.
if (
isinstance(t, rosidl_parser.definition.AbstractSequence) and
isinstance(t.value_type, rosidl_parser.definition.BasicType) and
t.value_type.typename in ['float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']
):
if len(field) == 0:
fieldstr = '[]'
else:
assert fieldstr.startswith('array(')
prefix = "array('X', "
suffix = ')'
fieldstr = fieldstr[len(prefix):-len(suffix)]
args.append(s[1:] + '=' + fieldstr)
return '%s(%s)' % ('.'.join(typename), ', '.join(args))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.status != other.status:
return False
if self.result != other.result:
return False
return True
@classmethod
def get_fields_and_field_types(cls):
from copy import copy
return copy(cls._fields_and_field_types)
@property
def status(self):
"""Message field 'status'."""
return self._status
@status.setter
def status(self, value):
if __debug__:
assert \
isinstance(value, int), \
"The 'status' field must be of type 'int'"
assert value >= -128 and value < 128, \
"The 'status' field must be an integer in [-128, 127]"
self._status = value
@property
def result(self):
"""Message field 'result'."""
return self._result
@result.setter
def result(self, value):
if __debug__:
from turtlesim.action._rotate_absolute import RotateAbsolute_Result
assert \
isinstance(value, RotateAbsolute_Result), \
"The 'result' field must be a sub message of type 'RotateAbsolute_Result'"
self._result = value
class Metaclass_RotateAbsolute_GetResult(type):
"""Metaclass of service 'RotateAbsolute_GetResult'."""
_TYPE_SUPPORT = None
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('turtlesim')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'turtlesim.action.RotateAbsolute_GetResult')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._TYPE_SUPPORT = module.type_support_srv__action__rotate_absolute__get_result
from turtlesim.action import _rotate_absolute
if _rotate_absolute.Metaclass_RotateAbsolute_GetResult_Request._TYPE_SUPPORT is None:
_rotate_absolute.Metaclass_RotateAbsolute_GetResult_Request.__import_type_support__()
if _rotate_absolute.Metaclass_RotateAbsolute_GetResult_Response._TYPE_SUPPORT is None:
_rotate_absolute.Metaclass_RotateAbsolute_GetResult_Response.__import_type_support__()
class RotateAbsolute_GetResult(metaclass=Metaclass_RotateAbsolute_GetResult):
from turtlesim.action._rotate_absolute import RotateAbsolute_GetResult_Request as Request
from turtlesim.action._rotate_absolute import RotateAbsolute_GetResult_Response as Response
def __init__(self):
raise NotImplementedError('Service classes can not be instantiated')
# Import statements for member types
# already imported above
# import rosidl_parser.definition
class Metaclass_RotateAbsolute_FeedbackMessage(type):
"""Metaclass of message 'RotateAbsolute_FeedbackMessage'."""
_CREATE_ROS_MESSAGE = None
_CONVERT_FROM_PY = None
_CONVERT_TO_PY = None
_DESTROY_ROS_MESSAGE = None
_TYPE_SUPPORT = None
__constants = {
}
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('turtlesim')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'turtlesim.action.RotateAbsolute_FeedbackMessage')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._CREATE_ROS_MESSAGE = module.create_ros_message_msg__action__rotate_absolute__feedback_message
cls._CONVERT_FROM_PY = module.convert_from_py_msg__action__rotate_absolute__feedback_message
cls._CONVERT_TO_PY = module.convert_to_py_msg__action__rotate_absolute__feedback_message
cls._TYPE_SUPPORT = module.type_support_msg__action__rotate_absolute__feedback_message
cls._DESTROY_ROS_MESSAGE = module.destroy_ros_message_msg__action__rotate_absolute__feedback_message
from turtlesim.action import RotateAbsolute
if RotateAbsolute.Feedback.__class__._TYPE_SUPPORT is None:
RotateAbsolute.Feedback.__class__.__import_type_support__()
from unique_identifier_msgs.msg import UUID
if UUID.__class__._TYPE_SUPPORT is None:
UUID.__class__.__import_type_support__()
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# list constant names here so that they appear in the help text of
# the message class under "Data and other attributes defined here:"
# as well as populate each message instance
return {
}
class RotateAbsolute_FeedbackMessage(metaclass=Metaclass_RotateAbsolute_FeedbackMessage):
"""Message class 'RotateAbsolute_FeedbackMessage'."""
__slots__ = [
'_goal_id',
'_feedback',
]
_fields_and_field_types = {
'goal_id': 'unique_identifier_msgs/UUID',
'feedback': 'turtlesim/RotateAbsolute_Feedback',
}
SLOT_TYPES = (
rosidl_parser.definition.NamespacedType(['unique_identifier_msgs', 'msg'], 'UUID'), # noqa: E501
rosidl_parser.definition.NamespacedType(['turtlesim', 'action'], 'RotateAbsolute_Feedback'), # noqa: E501
)
def __init__(self, **kwargs):
assert all('_' + key in self.__slots__ for key in kwargs.keys()), \
'Invalid arguments passed to constructor: %s' % \
', '.join(sorted(k for k in kwargs.keys() if '_' + k not in self.__slots__))
from unique_identifier_msgs.msg import UUID
self.goal_id = kwargs.get('goal_id', UUID())
from turtlesim.action._rotate_absolute import RotateAbsolute_Feedback
self.feedback = kwargs.get('feedback', RotateAbsolute_Feedback())
def __repr__(self):
typename = self.__class__.__module__.split('.')
typename.pop()
typename.append(self.__class__.__name__)
args = []
for s, t in zip(self.__slots__, self.SLOT_TYPES):
field = getattr(self, s)
fieldstr = repr(field)
# We use Python array type for fields that can be directly stored
# in them, and "normal" sequences for everything else. If it is
# a type that we store in an array, strip off the 'array' portion.
if (
isinstance(t, rosidl_parser.definition.AbstractSequence) and
isinstance(t.value_type, rosidl_parser.definition.BasicType) and
t.value_type.typename in ['float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']
):
if len(field) == 0:
fieldstr = '[]'
else:
assert fieldstr.startswith('array(')
prefix = "array('X', "
suffix = ')'
fieldstr = fieldstr[len(prefix):-len(suffix)]
args.append(s[1:] + '=' + fieldstr)
return '%s(%s)' % ('.'.join(typename), ', '.join(args))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.goal_id != other.goal_id:
return False
if self.feedback != other.feedback:
return False
return True
@classmethod
def get_fields_and_field_types(cls):
from copy import copy
return copy(cls._fields_and_field_types)
@property
def goal_id(self):
"""Message field 'goal_id'."""
return self._goal_id
@goal_id.setter
def goal_id(self, value):
if __debug__:
from unique_identifier_msgs.msg import UUID
assert \
isinstance(value, UUID), \
"The 'goal_id' field must be a sub message of type 'UUID'"
self._goal_id = value
@property
def feedback(self):
"""Message field 'feedback'."""
return self._feedback
@feedback.setter
def feedback(self, value):
if __debug__:
from turtlesim.action._rotate_absolute import RotateAbsolute_Feedback
assert \
isinstance(value, RotateAbsolute_Feedback), \
"The 'feedback' field must be a sub message of type 'RotateAbsolute_Feedback'"
self._feedback = value
class Metaclass_RotateAbsolute(type):
"""Metaclass of action 'RotateAbsolute'."""
_TYPE_SUPPORT = None
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('turtlesim')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'turtlesim.action.RotateAbsolute')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._TYPE_SUPPORT = module.type_support_action__action__rotate_absolute
from action_msgs.msg import _goal_status_array
if _goal_status_array.Metaclass_GoalStatusArray._TYPE_SUPPORT is None:
_goal_status_array.Metaclass_GoalStatusArray.__import_type_support__()
from action_msgs.srv import _cancel_goal
if _cancel_goal.Metaclass_CancelGoal._TYPE_SUPPORT is None:
_cancel_goal.Metaclass_CancelGoal.__import_type_support__()
from turtlesim.action import _rotate_absolute
if _rotate_absolute.Metaclass_RotateAbsolute_SendGoal._TYPE_SUPPORT is None:
_rotate_absolute.Metaclass_RotateAbsolute_SendGoal.__import_type_support__()
if _rotate_absolute.Metaclass_RotateAbsolute_GetResult._TYPE_SUPPORT is None:
_rotate_absolute.Metaclass_RotateAbsolute_GetResult.__import_type_support__()
if _rotate_absolute.Metaclass_RotateAbsolute_FeedbackMessage._TYPE_SUPPORT is None:
_rotate_absolute.Metaclass_RotateAbsolute_FeedbackMessage.__import_type_support__()
class RotateAbsolute(metaclass=Metaclass_RotateAbsolute):
# The goal message defined in the action definition.
from turtlesim.action._rotate_absolute import RotateAbsolute_Goal as Goal
# The result message defined in the action definition.
from turtlesim.action._rotate_absolute import RotateAbsolute_Result as Result
# The feedback message defined in the action definition.
from turtlesim.action._rotate_absolute import RotateAbsolute_Feedback as Feedback
class Impl:
# The send_goal service using a wrapped version of the goal message as a request.
from turtlesim.action._rotate_absolute import RotateAbsolute_SendGoal as SendGoalService
# The get_result service using a wrapped version of the result message as a response.
from turtlesim.action._rotate_absolute import RotateAbsolute_GetResult as GetResultService
# The feedback message with generic fields which wraps the feedback message.
from turtlesim.action._rotate_absolute import RotateAbsolute_FeedbackMessage as FeedbackMessage
# The generic service to cancel a goal.
from action_msgs.srv._cancel_goal import CancelGoal as CancelGoalService
# The generic message for get the status of a goal.
from action_msgs.msg._goal_status_array import GoalStatusArray as GoalStatusMessage
def __init__(self):
raise NotImplementedError('Action classes can not be instantiated')
|
11,299 | d5d3e00478671caa166eeaadafe6688dd18b9e5c | import unittest
import mock
import sublime
import ctypes
sublime.packagesPath = mock.Mock()
sublime.packagesPath.return_value = "XXX"
import sublimeplugin
import executepscommand
class SimpleTestCase(unittest.TestCase):
def test_regionsToPoShArray(self):
view = mock.Mock()
rgs = ["'one'", "'two'", "three"]
view.substr = mock.Mock()
view.substr.side_effect = lambda x: x
expected = "'''one''','''two''','three'"
actual = executepscommand.regionsToPoShArray(view, rgs)
self.assertEquals(expected, actual)
def test_getThisPackageNameNonDebug(self):
executepscommand.DEBUG = True
expected = "XXXPowershellUtils"
actual = executepscommand.getThisPackageName()
self.assertEquals(expected, actual)
def test_getThisPackageNameNonDebug(self):
executepscommand.DEBUG = False
expected = "PowershellUtils"
actual = executepscommand.getThisPackageName()
self.assertEquals(expected, actual)
def test_getPathToPoShScript(self):
expected = r"XXX\PowershellUtils\psbuff.ps1"
actual = executepscommand.getPathToPoShScript()
self.assertEquals(expected, actual)
def test_getPathToPoShHistoryDB(self):
expected = r"XXX\PowershellUtils\pshist.txt"
actual = executepscommand.getPathToPoShHistoryDB()
self.assertEquals(expected, actual)
def test_getPathToOutputSink(self):
expected = r"XXX\PowershellUtils\out.xml"
actual = executepscommand.getPathToOutputSink()
self.assertEquals(expected, actual)
def test_buildPoShCmdLine(self):
expected = ["powershell",
"-noprofile",
"-nologo",
"-noninteractive",
# PoSh 2.0 lets you specify an ExecutionPolicy
# from the cmdline, but 1.0 doesn't.
"-executionpolicy", "remotesigned",
"-file", executepscommand.getPathToPoShScript(), ]
actual = executepscommand.buildPoShCmdLine()
self.assertEquals(expected, actual)
class TestCase_Helpers(unittest.TestCase):
def test_getOEMCP(self):
expected = str(ctypes.windll.kernel32.GetOEMCP())
actual = executepscommand.getOEMCP()
self.assertEquals(expected, actual)
class TestCase_HistoryFunctionality(unittest.TestCase):
def setUp(self):
self.command = executepscommand.RunExternalPSCommandCommand()
def test_NewCommandIsAppendedToHistory(self):
self.command._addToPSHistory("1")
self.assertEquals(["1"], self.command.PSHistory)
def test_ExistingCommandIsDiscarded(self):
self.command._addToPSHistory("1")
self.command._addToPSHistory("1")
self.assertEquals(len(self.command.PSHistory), 1)
def test_HistoryIsPoppedIfUpperLimitIsExceeded(self):
historyMaxCount = executepscommand.RunExternalPSCommandCommand.PoSh_HISTORY_MAX_LENGTH
newCommands = [str(x) for x in range(historyMaxCount)]
map(self.command._addToPSHistory, newCommands)
actual = newCommands[0]
lastCommand = self.command.PSHistory[0]
self.command._addToPSHistory("NEW_COMMAND")
self.assertNotEquals(lastCommand, self.command.PSHistory[0])
if __name__ == "__main__":
unittest.main() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.