index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
10,400 | 8db3a0e8a4d36176e9d8593226b6a883d688d03e | import requests
from multiprocessing import Pool
import math
import logging
import os
import pickle
import clip
PROCESS_NUM = 20
RETRY_TIME = 5
MAGIC_CLIENT_ID = "jzkbprff40iqj646a697cyrvl0zt2m6"
def init_directory_structure():
if not os.path.exists('data'):
os.mkdir('data')
if not os.path.exists('data/chats'):
os.mkdir('data/chats')
if not os.path.exists('data/clips'):
os.mkdir('data/clips')
if not os.path.exists('data/videos'):
os.mkdir('data/videos')
def twitch_get_video_info(video_id):
cnt = 0
while True:
try:
cnt += 1
req = requests.get("https://api.twitch.tv/helix/videos", headers={"client-id":MAGIC_CLIENT_ID}, params={"id":video_id} )
break
except:
if cnt > RETRY_TIME:
raise
return req.json()['data'][0]
def duration_str_to_seconds(d_str):
ans = 0
tmp = d_str.split('h')
if len(tmp) > 1:
ans += int(tmp[0]) * 3600
d_str = tmp[1]
tmp = d_str.split('m')
if len(tmp) > 1:
ans += int(tmp[0]) * 60
d_str = tmp[1]
tmp = d_str.split('s')
if len(tmp) > 1:
ans += int(tmp[0])
return ans
def twitch_crawl_chat_in_interval(args):
video_id, start, end = args
url = "https://api.twitch.tv/v5/videos/%s/comments" % video_id
cnt = 0
while True:
try:
cnt +=1
req = requests.get(url, headers={"client-id":MAGIC_CLIENT_ID}, params={"content_offset_seconds":start})
break
except:
if cnt > RETRY_TIME:
raise
comments = []
while True:
if req.status_code != 200:
logging.error(req.text)
raise
json_data = req.json()
if len(json_data['comments']) == 0:
break
comments += json_data['comments']
if comments[-1]['content_offset_seconds'] >= end or not '_next' in json_data:
break
cursor_str = json_data['_next']
cnt = 0
while True:
try:
cnt += 1
req = requests.get(url, headers={"client-id":MAGIC_CLIENT_ID}, params={"cursor":cursor_str})
break
except:
if cnt > RETRY_TIME:
raise
if len(comments) > 0:
n = 0
while n < len(comments) and comments[-n-1]['content_offset_seconds'] >= end:
n += 1
if n > 0:
comments = comments[:-n]
n = 0
while n < len(comments) and comments[n]['content_offset_seconds'] < start:
n += 1
comments = comments[n:]
return comments
def twitch_crawl_chat(video_id):
video_info = twitch_get_video_info(video_id)
video_length = duration_str_to_seconds(video_info['duration'])
step = math.ceil(video_length / PROCESS_NUM)
pool = Pool(PROCESS_NUM)
chats = pool.map(twitch_crawl_chat_in_interval, [(video_id, s, s+step) for s in range(0, video_length, step)])
pool.close()
pool.join()
chats = sum(chats, [])
return chats
def twitch_get_user_profile(login_name):
cnt = 0
while True:
try:
cnt += 1
req = requests.get("https://api.twitch.tv/helix/users/", headers={"client-id":MAGIC_CLIENT_ID}, params={"login":login_name})
break
except:
if cnt > RETRY_TIME:
raise
return req.json()['data'][0]
def twitch_get_user_clips(user_id):
params = {"broadcaster_id":user_id, "first":100}
clips = []
while True:
cnt = 0
while True:
try:
cnt += 1
req = requests.get("https://api.twitch.tv/helix/clips/", headers={"client-id":MAGIC_CLIENT_ID}, params = params)
break
except:
if cnt > RETRY_TIME:
raise
json_data = req.json()
clips += json_data['data']
if 'cursor' in json_data["pagination"]:
cursor = json_data["pagination"]['cursor']
params['after'] = cursor
else:
break
return clips
def twitch_get_game_info(game_id_list):
cnt = 0
while True:
try:
cnt += 1
req = requests.get("https://api.twitch.tv/helix/games/", headers={"client-id":MAGIC_CLIENT_ID}, params={"id":game_id_list})
break
except:
if cnt > RETRY_TIME:
raise
return req.json()['data']
def load_chats(video_id):
chats_file_path = 'data/chats/%s.pickle' % video_id
if not os.path.exists(chats_file_path):
logging.warning("Chats of %s don't exist, downloading..." % video_id)
chats = twitch_crawl_chat(video_id)
with open(chats_file_path, 'wb') as fout:
pickle.dump(chats, fout)
else:
with open(chats_file_path, 'rb') as fin:
chats = pickle.load(fin)
return chats
def load_clips(user_id):
clips_file_path = 'data/clips/%s.pickle' % user_id
if not os.path.exists(clips_file_path):
logging.warning("Clips of %s don't exist, downloading..." % user_id)
clips = clip.Clip.get_top(user_id)
with open(clips_file_path, 'wb') as fout:
pickle.dump(clips, fout)
else:
with open(clips_file_path, 'rb') as fin:
clips = pickle.load(fin)
return clips
def load_video_infos(user_id):
username = user_id
user_profile = twitch_get_user_profile(user_id)
user_id = user_profile['id']
video_infos_path = "data/videos/%s.pickle" % user_id
if not os.path.exists(video_infos_path):
logging.warning("Video infos of %s don't exist, downloading..." % username)
params = {"user_id":user_id, "first":100}
video_infos = []
while True:
cnt = 0
while True:
try:
cnt += 1
req = requests.get("https://api.twitch.tv/helix/videos", headers={"client-id":MAGIC_CLIENT_ID}, params = params)
break
except:
if cnt > RETRY_TIME:
raise
json_data = req.json()
if len(json_data['data']) == 0:
break
video_infos += json_data['data']
if 'cursor' in json_data["pagination"]:
cursor = json_data["pagination"]['cursor']
params['after'] = cursor
else:
break
with open(video_infos_path, 'wb') as fout:
pickle.dump(video_infos, fout)
else:
with open(video_infos_path, 'rb') as fin:
video_infos = pickle.load(fin)
return video_infos
|
10,401 | bedd53d964519e5424447a9af0449abf31ab5eee | from typing import List, Tuple, Union, Dict, Any
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from manual_ml.base import BaseModel
from manual_ml.helpers.metrics import accuracy
class Tree(BaseModel):
"""
Binary classification tree
"""
def __init__(self,
min_data: int=2,
max_depth: int =3,
dynamic_bias: bool=True,
bias: float=0.5):
self.params = {'max_depth': max_depth,
'min_data': min_data,
'dynamic_bias': dynamic_bias,
'bias': bias}
self.feature_names = []
self.tree: Dict[str, Any] = None
def _print(self,
nodes: List[str]=None,
root: bool=True):
if nodes is None:
nodes = []
if root:
nodes = self.tree
Tree.print_node(nodes)
if nodes['class'] == -1:
self._print(nodes=nodes['l_node'],
root=False)
self._print(nodes=nodes['r_node'],
root=False)
@staticmethod
def print_node(node):
bs =' ' * node['depth'] * 2
bs2 = '-' * 2
print(bs+'|'+bs2+'*' * 50)
print(bs+'|'+bs+node['node_str'])
print(bs+'|'+bs+'Depth:', node['depth'])
print(bs+'|'+bs+'n samples:', node['n'])
if not node['terminal']:
print(bs+'|'+bs+'Name: '+node['name'])
print(bs+'|'+bs+'Split Value:', node['split_val'])
print(bs+'|'+bs+'Gini coeff:', node['gini'])
print(bs+'|'+bs+'Class prop requirement:', node['bias'],
'('+node['biasMode']+')')
print(bs+'|'+bs+'Prop L:', node['prop_l'])
print(bs+'|'+bs+'Prop R:', node['prop_r'])
else:
print(bs+'|'+bs+'Leaf')
print(bs+'|'+bs+node['note'])
def fit(self, x, y, debug=False):
"""
Convert data to matrix if dataframe
Recursively create nodes using tree.buildNodes()
"""
# Set feature names
self.set_names(x)
# Convert to mats if not
x = self.strip_df(x)
y = self.strip_df(y)
self.tree = Tree.build_nodes(x, y,
max_depth=self.params['max_depth'],
min_data=self.params['min_data'],
dynamic_bias=self.params['dynamic_bias'],
debug=debug, names=self.feature_names)
return self
@staticmethod
def build_nodes(x, y, names: List[str],
max_depth: int=2,
min_data: int=2,
dynamic_bias: bool=False,
depth: int=0,
nom_class: int=-777,
bias: float=0.5,
d_str: str= 'Root',
debug: bool=False):
"""
Recursively all branches of nodes. Each branch continues adding nodes until a terminal condition is met.
:param x: Features.
:param y: Labels.
:param names: Feature column names.
:param max_depth: Max branch depth. Default=2.
:param min_data: Min number of observations left to build another node. Default=2.
:param dynamic_bias:
:param depth: Current depth.
:param nom_class:
:param bias:
:param d_str: String name for node.
:param debug:
:return:
"""
if dynamic_bias:
bias = Tree.prop(y)
ds = 'dynamic'
else:
if bias == '':
ds = 'highest'
else:
ds = 'static'
# Add terminal checks here
# If a terminal node, return a node (dict) containing just the class label
# This label is set by highest represented label in subset
if depth > max_depth:
# Too deep: Terminal
cla = Tree.high_class(y, bias)
node = {'class': cla,
'depth': depth,
'note': 'Max depth reached, class is: '+ str(cla),
'terminal': True,
'n': len(x),
'node_str': d_str}
elif x.shape[0] < min_data:
if x.shape[0] == 0:
# Too few data points: Terminal
cla = nom_class
else:
cla = Tree.high_class(y, bias)
node = {'class': cla,
'depth': depth,
'note': f'Too few data points, class is: {cla}',
'terminal': True,
'n': len(x),
'node_str': d_str}
# In this case, y will be empty
# So use nominal class that will be the opposite of the other side node
elif x.shape[1] < 1:
# Too few features: Terminal
cla = Tree.high_class(y, bias)
node = {'class': cla,
'depth': depth,
'note': f'No features remaining, class is: {cla}',
'terminal': True,
'n': len(x),
'node_str': d_str}
elif len(np.unique(y)) == 1:
# Only one class: Terminal
cla = Tree.high_class(y, bias)
node = {'class': cla,
'depth': depth,
'note': f'One class at depth, class is: {cla}',
'terminal': True,
'n': len(x),
'node_str': d_str}
else:
# Not terminal. Build next node.
# First find best split to run
col_idx, best_x, gini = Tree.get_best_split_all(x, y)
# Split into left and right subsets
l_idx = (x[:, col_idx] < best_x).squeeze()
r_idx = (x[:, col_idx] >= best_x).squeeze()
nom_class_l = -999
nom_class_r = -999
if np.sum(l_idx) == 0:
nom_class_l = np.int8(not Tree.high_class(y[r_idx], bias))
if np.sum(r_idx) == 0:
nom_class_r = np.int8(not Tree.high_class(y[l_idx], bias))
# Build next node, leaving out used feature and data not in this split
l_node = Tree.build_nodes(x[l_idx][:, ~col_idx], y[l_idx],
max_depth=max_depth,
min_data=min_data,
depth=depth + 1,
nom_class=nom_class_l,
dynamic_bias=dynamic_bias,
bias=bias,
d_str=d_str + '->L',
names=[n for ni, n in enumerate(names) if ni != np.argmax(col_idx)])
r_node = Tree.build_nodes(x[r_idx][:, ~col_idx], y[r_idx],
max_depth=max_depth,
min_data=min_data,
depth=depth + 1,
nom_class=nom_class_r,
dynamic_bias=dynamic_bias,
bias=bias,
d_str=d_str + '->R',
names=[n for ni, n in enumerate(names) if ni != np.argmax(col_idx)])
# Return a full node containing meta/debug data
# As this isn't a leaf/terminal node, set class to -1
node = {'name': names[np.argmax(col_idx)],
'n': len(x),
'n_l': np.sum(l_idx),
'n_r': np.sum(r_idx),
'l_idx': l_idx,
'r_idx': r_idx,
'split_val': best_x.squeeze(),
'gini': gini.squeeze(),
'depth': depth,
'l_node': l_node,
'r_node': r_node,
'class': -1,
'prop_l': Tree.prop(y[l_idx]),
'prop_r': Tree.prop(y[r_idx]),
'biasMode': ds,
'bias': bias,
'node_str' : d_str,
'terminal': False}
if debug:
Tree.print_node(node)
return node
def predict(self, x) -> np.ndarray:
"""Predict from tree."""
y_pred = x.apply(Tree._predict,
args=(self.tree,),
axis=1)
return y_pred
@staticmethod
def _predict(x, mod) -> np.ndarray:
# If this is a leaf node, return class
if mod['class'] > -1:
return mod['class']
# If this isn't a leaf node, check X against split value
# and follow tree
if x.loc[mod['name']] < mod['split_val']:
# If less than split val, go left
y_pred = Tree._predict(x, mod['l_node'])
else:
# If greater than split val, go right
y_pred = Tree._predict(x, mod['r_node'])
return y_pred
@staticmethod
def gi(groups: Union[List[int], np.array],
classes: Union[List[int], np.array]) -> float:
"""Calculate Gini."""
groups = np.array(groups)
classes = np.array(classes)
# For each group
sum_p = 0.0
for g in np.unique(groups):
# print('G:',g)
g_idx = groups == g
# Calculate and sum class proportions
p = 0.0
# For each class
for c in np.unique(classes):
# print('C:',c)
c_idx = classes[g_idx] == c
# Get proportions and square
# And sum across classes
p += (np.sum(c_idx) / np.sum(g_idx)) ** 2
# print('P:',P)
# Weight by size of group
# And sum across groups
sum_p += (1 - p) * sum(g_idx) / len(g_idx)
return sum_p
@staticmethod
def split(x, y, split_val) -> float:
groups = np.int8(x < split_val)
return Tree.gi(groups, y)
@staticmethod
def get_best_split_all(x, y) -> Tuple[int, float, float]:
"""
This function calculates all splits on all columns
Returns the column index with best split and the values to use
"""
m = x.shape[1]
col_best_gin = np.ones(shape=m)
col_best_val = np.ones(shape=m)
for c in range(m):
best = 1
best_x = 0
for i in np.unique(x[:, c]):
gini = Tree.split(x[:, c], y, i)
if gini < best:
best = gini
best_x = i
col_best_gin[c] = best
col_best_val[c] = best_x
# Select best feature to split on
col_idx = np.argmin(col_best_gin)
# Convert to bool index
col_idx = np.array(range(x.shape[1])) == col_idx
return col_idx, col_best_val[col_idx], col_best_gin[col_idx]
@staticmethod
def prop(y: np.array) -> Union[int, float]:
if np.sum(y) > 0:
return y.sum() / y.shape[0]
else:
return 0
@staticmethod
def high_class(y,
bias: str='') -> int:
if bias == '':
# Just return highest class
return np.argmax(y.value_counts())
else:
# Return logical of class prop>bias
if len(y) > 0:
return np.int8(Tree.prop(y) > bias)
else:
return 0
if __name__ == '__main__':
data = pd.DataFrame({'x1': [2.77, 1.73, 3.68, 3.96, 2.99, 7.50, 9.00, 7.44,
10.12, 6.64],
'x2': [1.78, 1.17, 2.81, 2.62, 2.21, 3.16, 3.34, 0.48,
3.23, 3.32],
'y': [1, 0, 0, 0, 0, 1, 1, 1, 0, 0]})
y = data.y
x = data[['x1', 'x2']]
mod = Tree(max_depth=2,
min_data=2,
dynamic_bias=False)
mod.fit(x, y)
mod._print()
y_pred = mod.predict(x)
accuracy(y, y_pred)
plt.scatter(data.x1[data.y == 0], data.x2[data.y == 0])
plt.scatter(data.x1[data.y == 1], data.x2[data.y == 1])
plt.show()
plt.scatter(data.x1[y_pred == 0], data.x2[y_pred == 0])
plt.scatter(data.x1[y_pred == 1], data.x2[y_pred == 1])
plt.show()
|
10,402 | 4c294684d97014057fe4953779e166e64ea8a98b | from mutagen.mp3 import MP3
from mutagen import MutagenError
import os
from stat import *
path = input("path to directory with mp3: ")
path = os.path.abspath(path)
def iterate_in_folder(path):
ln = 0
num = 0
for item in os.listdir(path):
if S_ISDIR(os.stat(os.path.join(path,item))[ST_MODE]):
ln_temp, num_temp = iterate_in_folder(os.path.join(path,item))
ln += ln_temp
num += num_temp
elif item.endswith("mp3"):
audio = MP3(os.path.join(path,item))
print(str(item) + " " + str(audio.info.length))
num += 1
ln += audio.info.length
return ln, num
total_time_s, num_of_mp3 = iterate_in_folder(path)
total_hours = int(total_time_s/3600)
zbytek_minut_int = total_time_s/3600 - int(total_time_s/3600)
minuty = int(zbytek_minut_int*60)
sec_int = zbytek_minut_int*60 - int(zbytek_minut_int*60)
sec = sec_int*60
print(f"Total time: {total_hours}:{minuty}:{sec} ve {num_of_mp3} mp3 souborech") |
10,403 | 16e315113a61fdab87ee67f4ffca38101e9d09d6 | # Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kmip.core import enums
from kmip.core.enums import KeyFormatType
from kmip.core.enums import ObjectType
from kmip.core.enums import Operation
from kmip.core.enums import ResultStatus
from kmip.core.factories.attributes import AttributeFactory
from kmip.core.objects import TemplateAttribute
from kmip.demos import utils
from kmip.services.kmip_client import KMIPProxy
import logging
import sys
if __name__ == '__main__':
logger = utils.build_console_logger(logging.INFO)
parser = utils.build_cli_parser(Operation.REGISTER)
opts, args = parser.parse_args(sys.argv[1:])
username = opts.username
password = opts.password
config = opts.config
object_type = opts.type
format_type = opts.format
# Exit early if the arguments are not specified
object_type = getattr(ObjectType, object_type, None)
if object_type is None:
logger.error("Invalid object type specified; exiting early from demo")
sys.exit()
key_format_type = getattr(KeyFormatType, format_type, None)
if key_format_type is None:
logger.error(
"Invalid key format type specified; exiting early from demo")
attribute_factory = AttributeFactory()
# Create the template attribute for the secret and then build the secret
usage_mask = utils.build_cryptographic_usage_mask(logger, object_type)
attributes = [usage_mask]
if opts.operation_policy_name is not None:
opn = attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
opts.operation_policy_name
)
attributes.append(opn)
template_attribute = TemplateAttribute(attributes=attributes)
secret = utils.build_object(logger, object_type, key_format_type)
# Build the client, connect to the server, register the secret, and
# disconnect from the server
client = KMIPProxy(config=config, config_file=opts.config_file)
client.open()
result = client.register(object_type, template_attribute, secret)
client.close()
# Display operation results
logger.info('register() result status: {0}'.format(
result.result_status.value))
if result.result_status.value == ResultStatus.SUCCESS:
logger.info('registered UUID: {0}'.format(result.uuid))
logger.info('registered template attribute: {0}'.
format(result.template_attribute))
else:
logger.info('register() result reason: {0}'.format(
result.result_reason.value))
logger.info('register() result message: {0}'.format(
result.result_message.value))
|
10,404 | 30126431bc014950669f552b430c6626c6e32c9a | from django import template
from ..models import Post
from ..forms import SearchForm
from django.db.models import Count
register = template.Library()
@register.simple_tag
def total_posts():
return Post.published.count()
# Tag to display the latest posts (default nimber of posts 5)
@register.inclusion_tag('blog/post/latest_posts.html')
def show_latest_posts(count=5):
latest_posts = Post.published.order_by('-publish')[:count]
return {'latest_posts': latest_posts}
# Tag to display the most commented posts
@register.simple_tag
def get_most_commented_posts(count=5):
return Post.published.annotate(
total_comments=Count('comments')).order_by('-total_comments')[:count]
# Tag to display the serch form
@register.inclusion_tag('search_form.html')
def show_search_form():
search_form = SearchForm()
return {'search_form': search_form}
|
10,405 | b88b46b5789d08bdad07f7ee14d570f02cecaa37 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pip install console-menu
from consolemenu import *
from consolemenu.items import *
from converter import *
def cm_hexadecimal2decimal():
Screen().println(str(hexadecimal2decimal(Screen().input('Enter a hexadecimal without Ox : ').input_string)))
Screen().input('Press [Enter] to continue')
def cm_decimal2hexadecimal():
Screen().println(decimal2hexadecimal(int(Screen().input('Enter a decimal : ').input_string)))
Screen().input('Press [Enter] to continue')
def cm_hexadecimal_little_endian2decimal():
Screen().println(str(hexadecimal_little_endian2decimal(Screen().input('Enter a hexadecimal little-endian without 0x : ').input_string)))
Screen().input('Press [Enter] to continue')
def cm_varInt2decimal():
Screen().println(str(varInt2decimal(Screen().input('Enter a varInt without 0x : ').input_string)))
Screen().input('Press [Enter] to continue')
def cm_bits2target():
Screen().println(str(bits2target(Screen().input('Enter a bits without 0x : ').input_string)))
Screen().input('Press [Enter] to continue')
def cm_target2difficulty():
Screen().println(str(target2difficulty(int(Screen().input('Enter a target in decimal : ').input_string))))
Screen().input('Press [Enter] to continue')
def cm_decode_transaction():
decode_transaction(Screen().input('Enter a raw bitcoin hexadecimal transaction : ').input_string)
Screen().input('Press [Enter] to continue')
def cm_decode_block():
decode_block(Screen().input('Enter a block hash : ').input_string)
Screen().input('Press [Enter] to continue')
def cm_navigate():
Screen().println(str(navigate(Screen().input('Enter a hexadecimal : ').input_string)))
Screen().input('Press [Enter] to continue')
def menu() :
menu = ConsoleMenu("Challenge 1", "Bitcoin analysis tool")
submenu1 = ConsoleMenu("Converter")
submenu1_item = SubmenuItem("Converter", submenu1, menu)
submenu1.append_item(FunctionItem("Hexadécimal -> décimal", cm_hexadecimal2decimal))
submenu1.append_item(FunctionItem("Décimal -> hexadécimal", cm_decimal2hexadecimal))
submenu1.append_item(FunctionItem("Hexadécimal little endian -> hexadécimal", cm_hexadecimal_little_endian2decimal))
submenu1.append_item(FunctionItem("varInt -> décimal", cm_varInt2decimal))
submenu1.append_item(FunctionItem("Champ Bits -> Cible correspondante", cm_bits2target))
submenu1.append_item(FunctionItem("Cible -> Difficulté", cm_target2difficulty))
submenu2 = ConsoleMenu("Decode Raw Bitcoin Hexadecimal Transaction or Block")
submenu2_item = SubmenuItem("Decode Raw Bitcoin Hexadecimal Transaction or Block", submenu2, menu)
submenu2.append_item(FunctionItem("Decode Raw Bitcoin Hexadecimal Transaction", cm_decode_transaction))
submenu2.append_item(FunctionItem("Decode Raw Bitcoin Hexadecimal Block", cm_decode_block))
submenu3 = ConsoleMenu("Navigate the bitcoin blockchain")
submenu3_item = SubmenuItem("Navigate the bitcoin blockchain", submenu3, menu)
submenu3.append_item(FunctionItem("Navigate the bitcoin blockchain", cm_navigate))
menu.append_item(submenu1_item)
menu.append_item(submenu2_item)
#menu.append_item(submenu3_item)
menu.show()
|
10,406 | 139d129ce1220cf0b78416b12f813e15f313139d | '''
Created on 19 apr 2017
@author: Conny
'''
from FunctionalLayer.PDS import PDS
class LIGHT(PDS):
def __init__(self,dev_id="+",dev_type="LightSensor",dev_location="+",shadow=True,starter=None,remote=True):
super(LIGHT, self).__init__(dev_id,dev_type,dev_location,shadow,starter,remote) |
10,407 | f20c1057d85210fed30923dbf467e6f8f442b79b | import os
from setuptools import setup, find_packages
from setuptools.command.install import install
class CustomInstall(install):
def run(self):
install.run(self)
for filepath in self.get_outputs():
if os.path.expanduser('~/.lw') in filepath:
os.chmod(os.path.dirname(filepath), 0o777)
setup(
name='ListingsWatch',
packages=find_packages(),
version='1.0',
description='Command line application watching listings for keywords',
author='Tommy Lundgren',
author_email='tomolia86@yahoo.se',
install_requires=['beautifulsoup4'],
entry_points = {
'console_scripts': ['lw = lw.__main__:main']
},
data_files=[
(os.path.expanduser('~/.lw'), ['lw.conf'])
],
cmdclass={'install': CustomInstall}
)
|
10,408 | 48ab70febf4c0ab898ae58a96e6a95cb4566c21b | #!/bin/python3
# copyright Lauri K. Friberg 2021
"""System module."""
import sys
def is_even(number):
"""Find out if number is even."""
return (number%2)==0
def is_odd(number):
"""Find out if number is odd."""
return (number%2)==1
print ("©2021 Lauri K. Friberg. Weird algorithm. Look at https://cses.fi/problemset/task/1068/")
print ("View the source code, too: cat weird_algorithm.py")
weird_algorithm_number=int(input("Give an integer number (1 =< number =< 1000000): "))
if weird_algorithm_number<1:
print ("Error: Give an integer equal to or bigger than 1,")
print ("and equal to or smaller than 1000000.")
sys.exit()
elif weird_algorithm_number>1000000:
print ("Error: Give an integer equal to or smaller than")
print ("1000000, and bigger than or equal to 1.")
sys.exit()
print (weird_algorithm_number, end=" ", flush=True)
while weird_algorithm_number!=1:
if is_even(weird_algorithm_number):
weird_algorithm_number=int(weird_algorithm_number/2)
print (weird_algorithm_number, end=" ", flush=True)
elif is_odd(weird_algorithm_number):
weird_algorithm_number=int(weird_algorithm_number*3+1)
print (weird_algorithm_number, end=" ", flush=True)
else:
print ("Error!")
sys.exit()
print ("")
|
10,409 | 349f47e053cc529018782e6b05b975ba8c0431ff | numList = list(map(int, input().split()))
first = True
for i in numList:
if i % 2 != 0:
if first:
first = False
minOdd = i
else:
if i < minOdd:
minOdd = i
print(minOdd)
|
10,410 | 65833e4e9f17b32fd00f27bc62d38ff06a16b5e7 | import os
import re
import sys
import numpy
import netCDF4
import rasterio
from glob import glob
from datetime import datetime
from collections import namedtuple
from osgeo import osr
TileInfo = namedtuple('TileInfo', ['filename', 'datetime'])
def parse_filename(filename):
fields = re.match(
(
r"(?P<vehicle>LS[578])"
r"_(?P<instrument>OLI_TIRS|OLI|TIRS|TM|ETM)"
r"_(?P<type>WATER)"
r"_(?P<longitude>[0-9]{3})"
r"_(?P<latitude>-[0-9]{3})"
r"_(?P<date>.*)"
"\.tif$"
),
filename).groupdict()
return fields
def make_tileinfo(filename):
basename = os.path.basename(filename)
fields = parse_filename(os.path.basename(basename))
dt = datetime.strptime(fields['date'][:19], '%Y-%m-%dT%H-%M-%S')
return TileInfo(filename, datetime=dt)
###############################
#issue with timechecksize=100? def create_netcdf(filename, tiles, zlib_flag=True, timechunksize0=100):
def create_netcdf(filename, tiles, zlib_flag=True, timechunksize0=100):
timechunksize = min(timechunksize0, len(tiles))
# open the first datatset to pull out spatial information
first = rasterio.open(tiles[0].filename)
crs = osr.SpatialReference(first.crs_wkt.encode('utf8'))
affine = first.affine
width, height = first.width, first.height
with netCDF4.Dataset(filename, 'w') as nco:
nco.date_created = datetime.today().isoformat()
nco.Conventions = 'CF-1.6'
# crs variable
crs_var = nco.createVariable('crs', 'i4')
crs_var.long_name = crs.GetAttrValue('GEOGCS')
crs_var.grid_mapping_name = 'latitude_longitude'
crs_var.longitude_of_prime_meridian = 0.0
crs_var.spatial_ref = crs.ExportToWkt()
crs_var.semi_major_axis = crs.GetSemiMajor()
crs_var.semi_minor_axis = crs.GetSemiMinor()
crs_var.inverse_flattening = crs.GetInvFlattening()
crs_var.GeoTransform = affine.to_gdal()
# latitude coordinate
nco.createDimension('latitude', height)
lat_coord = nco.createVariable('latitude', 'float64', ['latitude'])
lat_coord.standard_name = 'latitude'
lat_coord.long_name = 'latitude'
lat_coord.axis = 'Y'
lat_coord.units = 'degrees_north'
lat_coord[:] = numpy.arange(height) * affine.e + affine.f + affine.e / 2
# longitude coordinate
nco.createDimension('longitude', width)
lon_coord = nco.createVariable('longitude', 'float64', ['longitude'])
lon_coord.standard_name = 'longitude'
lon_coord.long_name = 'longitude'
lon_coord.axis = 'X'
lon_coord.units = 'degrees_east'
lon_coord[:] = numpy.arange(width) * affine.a + affine.c + affine.a / 2
# time coordinate
nco.createDimension('time', len(tiles))
time_coord = nco.createVariable('time', 'double', ['time'])
time_coord.standard_name = 'time'
time_coord.long_name = 'Time, unix time-stamp'
time_coord.axis = 'T'
time_coord.calendar = 'standard'
time_coord.units = 'seconds since 1970-01-01 00:00:00'
time_coord[:] = [(tile.datetime-datetime(1970, 1, 1, 0, 0, 0)).total_seconds() for tile in tiles]
# wofs data variable
data_var = nco.createVariable('Data',
#'uint8',
'int8',
['latitude', 'longitude', 'time'],
chunksizes=[100, 100, timechunksize],
zlib=True,
complevel=1)
data_var.grid_mapping = 'crs'
#data_var.valid_range = [0, 255]
#data_var.flag_masks = [1, 2, 4, 8, 16, 32, 64, 128] #cause gdalinfo seg fault
data_var.flag_meanings = "water128 cloud64 cloud_shadow32 high_slope16 terrain_shadow8 over_sea4 no_contiguity2 nodata1 dry0"
#tmp = numpy.empty(dtype='uint8', shape=(height, width, timechunksize))
tmp = numpy.empty(dtype='int8', shape=(height, width, timechunksize))
for start_idx in range(0, len(tiles), timechunksize):
#read `timechunksize` worth of data into a temporary array
end_idx = min(start_idx+timechunksize, len(tiles))
for idx in range(start_idx, end_idx):
with rasterio.open(tiles[idx].filename) as tile_data:
tmp[:,:,idx-start_idx] = tile_data.read(1)
#write the data into necdffile
data_var[:,:,start_idx:end_idx] = tmp[:,:,0:end_idx-start_idx]
sys.stdout.write("\r%d out of %d done\r" % (end_idx, len(tiles)))
sys.stdout.flush()
def create_netcdf_from_dir(extents_dir, out_ncfile=None):
zlib_flagv = True
#zlib_flagv = False
tiles = [make_tileinfo(filename) for filename in glob(os.path.join(extents_dir, '*.tif'))]
tiles.sort(key=lambda t: t.datetime)
path2ncfile = out_ncfile
if out_ncfile is None:
#makeup a nc file name like LS_WATER_149_-036_1987-05-22T23-08-20_2014-03-28T23-47-03.nc
cellid = os.path.basename(os.path.normpath(extents_dir)) #assumed like 149_-036
begindt =tiles[0].datetime.isoformat().replace(':','-')
enddt = tiles[-1].datetime.isoformat().replace(':','-')
ncfile_name = "LS_WATER_%s_%s_%s.nc"%(cellid,begindt,enddt)
path2ncfile= os.path.join(extents_dir,ncfile_name)
create_netcdf(path2ncfile, tiles, zlib_flagv)
def verify_netcdf(extents_dir, out_ncfile):
"""verify the stacked nc file's pixel values agaist the tiff files
"""
netcdf_old=out_ncfile #'/g/data/fk4/wofs/water_f7q/extents/149_-036/LS_WATER_149_-036_1987-05-22T23-08-20.154_2014-03-28T23-47-03.171.nc'
tiles = [make_tileinfo(filename) for filename in glob(os.path.join(extents_dir, '*.tif'))]
tiles.sort(key=lambda t: t.datetime)
with netCDF4.Dataset(netcdf_old) as nco:
for i in range(0,len(tiles)):
print nco['time'][i]
print tiles[i]
with rasterio.open(tiles[i].filename) as tile_data:
print "Any difference? "
print numpy.sum(nco['Data'][:,:,i])
print numpy.sum(tile_data.read(1))
print type(nco['Data'][:,:,i]), type(tile_data.read(1))
print nco['Data'][:,:,i].shape, tile_data.read(1).shape
print numpy.sum(nco['Data'][:,:,i] - tile_data.read(1)[:,:])
#print tile_data.read(1)[0:100,0:100]
#print (nco['Data'][:,:,i] == tile_data.read(1)).all()
###################################################################
# Usage python thiscript.py /g/data/u46/fxz547/wofs/extents/149_-036 /g/data/u46/fxz547/wofs/extents/149_-036/stacked.nc
if __name__ == "__main__":
#extents_dir = '/g/data/u46/wofs/extents/149_-036'
#extents_dir = '/g/data/u46/fxz547/wofs/extents/149_-036'
extents_dir = sys.argv[1]
#optional out_ncfile =sys.argv[2]
create_netcdf_from_dir(extents_dir)
#create_netcdf_from_dir(extents_dir, out_ncfile)
#verify_netcdf(extents_dir, out_ncfile)
|
10,411 | 0da06cefb231e9f2d4a910310966eb6cac752136 | # 修改串列中的元素
motorcycle = ["honda", "yamaha", "suzuki"]
print(motorcycle)
motorcycle[0] = 'ducati'
print(motorcycle)
# 在串列尾端新增元素
motorcycle.append('honda')
print(motorcycle)
# 在空串列中新增元素
motorcycle = []
motorcycle.append('honda')
motorcycle.append('yamaha')
motorcycle.append('suzuki')
print(motorcycle)
# 在串列中插入元素
motorcycles = ['honda', 'yamaha', 'suzuki']
motorcycles.insert(0, 'ducati')
print(motorcycles)
# 使用del陳述句刪除元素
motorcycles = ['honda', 'yamaha', 'suzuki']
print(motorcycles)
del motorcycles[0]
print(motorcycles)
# #使用del陳述句刪除之後就不能存取
# 使用 pop()方法刪除
motorcycles = ['honda', 'yamaha', 'suzuki']
print(motorcycles)
popped_motorcycle = motorcycles.pop()
print(motorcycles)
print(popped_motorcycle)
last_owned = motorcycles.pop()
print("The last motorcycle I owned was a " + last_owned.title() + ".")
# 彈出串列中任一位置的項目
first_owned = motorcycles.pop(0)
print("The first motorcycle I owned was a " + first_owned.title() + ".")
# 依據數值來刪除項目
motorcycles = ['honda', 'yamaha', 'suzuki', 'ducati']
print(motorcycles)
motorcycles.remove('ducati')
print(motorcycles)
# 加入變數
too_expensive = 'yamaha'
motorcycles.remove(too_expensive)
print(motorcycles)
print("\nA " + too_expensive.title() + " is too expensive for me.")
|
10,412 | b187884e63b485c5cb62abdc787adef4bb73b20b | import numpy as np
class Clue:
def __init__(self, x_pos, y_pos, num, _direction, _length):
self.x_position = x_pos
self.y_position = y_pos
self.number = num
self.direction = _direction
self.length = _length
self.clue = ""
self.answer = np.ndarray(self.length, dtype='<U1')
self.answer[:] = 'a'
self.index = -1
def arr_to_string(arr):
return "".join(str(a) for a in arr)
def __str__(self):
return f'{{d":"{self.direction}", "n":{self.number}, "x":{self.x_position}, "y":{self.y_position}, "a":"{clue.arr_to_string(self.answer)}", "c":"{self.clue} }}"'
class Board:
def __init__(self, sz):
self.size = sz
self.start_board = np.zeros((sz,sz))
self.letter_board = np.zeros((sz,sz), dtype='>U1')
class Crossword:
def __init__(self, sz):
self.size = sz
self.board = Board(self.size)
self.clues = []
def __str__(self):
return f'{{\n"title": "Randomly Generated Crossword",\n"by": "#1 POOSD Group",\n"clues":{self.clues}\n}}'
|
10,413 | 65a7b71fc54fe0f6a06b13053517929daa5054fc | import cv2 as cv
image_path = "C:/Users/xwen2/Desktop/109.JPG"
image = cv.imread(image_path)
image_size = image.shape
print(image_size) |
10,414 | f615ef8645204075d4157c1eb1ba471a550ba165 | def divisors(num):
sum = 1;
for i in range(2,int(num/2)+1):
if(num%i==0):
sum = sum +i;
return sum;
def is_abundant(num):
if(divisors(num)>num):
return True;
return False;
def do_loop(list,abundants,num,MAX):
res_list = list;
for i in range(num+1):
if(abundants[i]==1):
if(i+num<MAX):
res_list[i+num] = 1;
return res_list;
import timeit
start = timeit.default_timer()
MAX = 28200;
zeroes_list = [0]*MAX;
abundant_zeroes = [0]*MAX;
for i in range(1,MAX):
if(abundant_zeroes[i]==0):
if(is_abundant(i)):
abundant_zeroes[i] = 1;
zeroes_list = do_loop(zeroes_list,abundant_zeroes,i,MAX);
sum = 0;
for i in range(MAX):
if zeroes_list[i] ==0:
sum = sum + i;
print(sum)
stop = timeit.default_timer()
print('Time: ', stop - start)
|
10,415 | dfcc47eb3b83816855612bd0ee99c0171d47ef8d | import pdfquery
import xml.etree.ElementTree as et
from django.conf import settings
import os
import pandas as pd
from .models import Image
import json
from wand.image import Image as wi
import cv2
from .face_api import face_detect, face_verify
def processPdf(filename):
pdfPath = settings.UPLOAD_DIR + '/' + filename
pdfName, pdfExtension = os.path.splitext(filename)
pdf = pdfquery.PDFQuery(pdfPath) # Fs_1.pdf is the name of the PDF
pdf.load()
pdf.tree.write(settings.PROCESS_DIR + '/' + pdfName + '.xml', pretty_print=True, encoding='UTF-8')
tree = et.parse(settings.PROCESS_DIR + '/' + pdfName + '.xml')
root = tree.getroot()
keys = []
values = []
for movie in root.iter('Annot'):
keys.append(movie.get("T"))
values.append(movie.get("V"))
dict_form = dict(zip(keys, values))
df = pd.read_csv(settings.PROCESS_DIR + '/' + 'xml_def.csv')
for i in range(df.shape[0]):
df.loc[i, 'values'] = dict_form[df.loc[i, 'Tag']]
df1 = df[['Def','values']]
dict_form1 = dict(zip(df1['Def'], df1['values']))
rowImage = Image(fileName = filename, values = json.dumps(dict_form1), image = '', flag = 'G')
rowImage.save()
df2 = pd.read_csv(settings.PROCESS_DIR + '/' + 'master.csv')
for i in range(df2.shape[0]):
a = df.loc[df['Def'] == df2.loc[i, 'Def']].index[0]
df2.loc[a, 'file'] = df.loc[a, 'values']
for z in range(df2.shape[0]):
if df2.loc[z, 'values'] == df2.loc[z, 'file']:
df2.loc[z, 'score'] = 1
else:
df2.loc[z, 'score'] = 0
total_acc = df2['score'].sum() / df2.shape[0]
print(df2)
json_df2 = df2.to_json(orient='records')
pdf = wi(filename = pdfPath, resolution = 300)
pdfImage = pdf.convert('jpeg')
i = 1
for img in pdfImage.sequence:
page = wi(image = img)
page.save(filename = settings.PROCESS_DIR + '/' + pdfName + '_' + str(i) + '.jpg')
i += 1
image = cv2.imread(settings.PROCESS_DIR + '/' + pdfName + '_' + "1.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edged = cv2.Canny(image, 10, 250)
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
dilated = cv2.dilate(edged, kernel, iterations = 5)
_, thresh = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY_INV)
(_, cnts, _) = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
idx = 5
for c in cnts:
x, y, w, h = cv2.boundingRect(c)
if w > 200 and h > 200 and w < 500 and h < 500:
idx += 1
new_img = image[y:y+h, x:x+w]
cv2.imwrite(settings.PROCESS_DIR + '/' + pdfName + '_' + str(idx) + '.jpg', new_img)
rowImage.image = pdfName + '_' + str(idx) + '.jpg'
rowImage.save()
face1 = face_detect(settings.PROCESS_DIR + '/' + 'master.jpg')
face2 = face_detect(settings.PROCESS_DIR + '/' + rowImage.image)
res = {}
if face1 != {} and face2 != {}:
res = face_verify(face1[0]['faceId'], face2[0]['faceId'])
if res != {}:
ret = {}
ret['accuracy'] = total_acc
ret['image'] = rowImage.image
ret['match'] = res['confidence']
ret['ismatch'] = res['isIdentical']
ret['score'] = json_df2
return ret
return res
|
10,416 | 5d6fc5369a6c9514e8d386d37e0c02281a46da7f | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 24 21:59:06 2016
@author: Eirik
"""
import numpy as np
import matplotlib.pyplot as plt
import pylab as p
import mpl_toolkits.mplot3d.axes3d as p3
e = 1.6e-19
m_p = 1.672621637e-27 #Reference Pearson
r_D = 50.0E-03# radius
D = 90.0E-06 #valleygap
c = 3.0E08 #speed of light
start = 0
stop = 300.0e-09
dt = 100.0e-15
t = np.linspace(start,stop,(stop-start)/dt)
t_test = np.linspace(start,100000,10000000)
n = len(t)
E_o = (25.0E03/90.0e-6) #V/m
B = np.array([0.0,0.0,2.0])
w = (np.abs(e)/m_p)*np.linalg.norm(B)
f = w/(2*np.pi)
def oppgave3():
A = np.zeros((n,3))
V = np.zeros((n,3))
R = np.zeros((n,3))
E = np.zeros((n,3))
tid = np.zeros(n)
V[0] = np.array([0.0 , 0.0 , 0.0])
R[0] = np.array([0.0 , 0.0 , 0.0])
tid[0] = 0
#omega
f = w/(2*np.pi) #cyclotron frekvens
for i in xrange(n-1):
if R[i,0] > -D/2 and R[i,0]< D/2:
E = np.array([E_o*np.cos(w*tid[i]) , 0, 0])
else:
E = np.zeros(3)
if not np.linalg.norm(R[i,:]) > R_D:
A = (e/m_p)*(E + np.cross(V[i],B))
else:
A = 0
V[i+1] = V[i] + A*dt
R[i+1] = R[i] +V[i+1]*dt
tid[i+1] = tid[i] + dt
dropout = np.linalg.norm(R[-1,:])
print "Escape velocity is: %g m/s" %(np.linalg.norm(V[-1,:]))
print "Percentage achieved of speed of light: %g" %((np.linalg.norm(V[-1,:])/c) *100)
return R,V,tid, dropout,E,A
"""
def sammenligning():
R_D =(4.5 - 2.1) #extract minus inject
Breal = np.array([0.0 , 0.0 , 8*1.1]) #8 magnets with 1.1 T each
harmonic_num = 6
freq = 50.0e06 #Hz
omega = freq*2*np.pi
rel_gam = (1.0/npsqrt(1-(vel/c)**2))
rel_m = rel_gam*m_p
rel_w =rel_gam*omega
rel_r = vel/rel_w
for i in xrange(np.len(t_test)-1):
if R2[i,0] > -D/2 and R[i,0]< D/2:
E = np.array([E_o*np.cos(w*tid[i]) , 0, 0])
else:
E = np.zeros(3)
if not np.linalg.norm(R[i,:]) > r_D:
A = (e/m_p)*(E + np.cross(V[i],B))
else:
A = 0
"""
if __name__ == "__main__":
R,V,tid,dropout,E,A = oppgave3()
fig1 = plt.figure("Oppgave 3", figsize=(9,9))
ax1 = fig1.add_subplot(1,1,1)
#ax1.scatter(R[:,0], R[:,1], color = 'blue')
ax1.set_xlabel("X Pos[m]"), ax1.set_ylabel("Y Pos[m]")
p.title("Oppg 3 - $\delta t = 100fS$")
p.plot(R[:,0], R[:,1])
p.show()
fig2 = plt.figure("Oppgave 3", figsize=(7,7))
plt.plot(tid,R[:,0],label="$x(t)$")
plt.plot(tid,R[:,1],label="$y(t)$")
plt.plot(tid,R[:,2],label="$z(t)$")
plt.title("Oppg3 - $\delta t = 100fS$")
plt.xlabel("Tid $nS$")
plt.ylabel("Posisjon [m]")
plt.legend()
plt.show()
fig3 = plt.figure("Oppgave 3", figsize=(7,7))
plt.plot(tid, V[:,0], "y", label="$V_{x}(t)$")
plt.plot(tid, V[:,1], "r", label = "$V_{y}(t)$")
plt.plot(tid, V[:,2], "b", label = "$V_{z}$(t)")
plt.title("Oppg 3 - $\delta t = 100fS$")
plt.xlabel("Tid $nS$")
plt.ylabel("V, $m/S$")
plt.legend()
plt.show()
|
10,417 | 832caa8c3e815782b98143a08c3f8226f8db8536 | dicionario = {"um": 1, "dois": 2, "tres": 3, "quatro": 4, "cinco": 5, "seis": 6,
"sete": 7, "oito": 8, "nove": 9, "dez": 10, "onze": 11,
"doze": 12, "treze": 13, "catorze": 14, "quatorze": 14,
"quinze": 15, "dezesseis": 16, "dezessete": 17, "dezoito": 18,
"dezenove": 19, "vinte": 20, "trinta": 30, "quarenta": 40,
"cinquenta": 50, "sessenta": 60, "setenta": 70, "oitenta":80,
"noventa": 90, "cem":100, "cento":100, "duzentos": 200,
"trezentos": 300, "quatrocentos": 400, "quinhentos": 500,
"seissentos": 600, "setecentos": 700, "oitocentos": 800,
"novecentos": 900, "mil": 1000}
palavras_proibidas = ("e", "centavos", "centavo")
def numero_por_extenso(numero_como_string):
numero_como_string = numero_como_string.replace("real", "reais")
if "reais" in numero_como_string:
partes = numero_como_string.split("reais")
valor = numero_inteiro_por_extenso(partes[0])
if len(partes) > 1:
valor += numero_inteiro_por_extenso(partes[1]) / 100.0
else:
valor= numero_inteiro_por_extenso (numero_como_string)/100.0
return valor
def numero_inteiro_por_extenso(numero_como_string):
valor = 0
lista_com_numeros = numero_como_string.split()
lista_com_numeros = filter(lambda x: x not in palavras_proibidas,
lista_com_numeros)
for palavra in lista_com_numeros:
if palavra == "mil":
if valor == 0:
valor = 1000
else:
valor *= 1000
else:
valor += dicionario[palavra]
return valor
|
10,418 | c737a456cc6c0c35418221bc39b84721fafe20df | from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework import status
from rest_framework.decorators import detail_route, list_route
from django.contrib.auth.models import User
from rest_framework import serializers
from jose import jwt
from rest_framework.views import APIView
import helios_auth.models
import sys, json, bcrypt, datetime, json
from auth_utils import *
from api_utils import *
from .serializers import UserSerializer
auth = sys.modules['helios_auth.models']
class AuthSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('username', 'password')
class LoginViewSet(viewsets.ModelViewSet):
queryset = User.objects.none()
serializer_class = AuthSerializer
def password_check(self, user, password):
return (user and user.info['password'] == bcrypt.hashpw(password.encode('utf8'), user.info['password'].encode('utf8')))
def create(self, request):
try:
print >>sys.stderr, request.body
login = json.loads(request.body)
username = login['username'].strip()
password = login['password'].strip()
user = auth.User.get_by_type_and_id('password', username)
user_Serializer = UserSerializer(instance=user, context={'request': request})
if self.password_check(user, password):
expiry = datetime.date.today() + datetime.timedelta(days=50)
token = jwt.encode({'username': username, 'expiry':str(expiry)}, 'seKre8', algorithm='HS256')
return Response({'status': '201', 'token':token, 'user': user_Serializer.data})
raise ValueError('Bad Username or Password')
except ValueError as err:
return Response({'status': '400', 'message':str(err)}, status=status.HTTP_400_BAD_REQUEST)
class IsAuthViewsSet(APIView):
def get(self, request):
try:
user = check_auth(request.META.get('HTTP_AUTHORIZATION'))
if (user):
return response(201, {'status': '201', 'hasLogged': True, 'username': user})
else:
raise_exception(401,'User not logged in to the system.')
except Exception as err:
return get_error(err)
|
10,419 | b6d7654052b94d3282fb19872e575b7c104ceb7f | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libsentry.client import SentryClient
from libsentry.conf import HOSTNAME, PORT
import logging
LOG = logging.getLogger(__name__)
class SentryException(Exception):
def __init__(self, e):
super(SentryException, self).__init__(e)
self.message = e.status.message
def __str__(self):
return self.message
def get_api(user):
return SentryApi(SentryClient(HOSTNAME.get(), PORT.get(), user.username))
class SentryApi(object):
def __init__(self, client):
self.client = client
def create_sentry_role(self, roleName):
response = self.client.create_sentry_role(roleName)
if response.status.value == 0:
return response
else:
raise SentryException(response)
def drop_sentry_role(self, roleName):
response = self.client.drop_sentry_role(roleName)
if response.status.value == 0:
return response
else:
raise SentryException(response)
def alter_sentry_role_grant_privilege(self, roleName, tSentryPrivilege):
response = self.client.alter_sentry_role_grant_privilege(roleName, tSentryPrivilege)
if response.status.value == 0:
return response
else:
raise SentryException(response)
def alter_sentry_role_revoke_privilege(self, roleName, tSentryPrivilege):
response = self.client.alter_sentry_role_revoke_privilege(roleName, tSentryPrivilege)
if response.status.value == 0:
return response
else:
raise SentryException(response)
def alter_sentry_role_add_groups(self, roleName, groups):
response = self.client.alter_sentry_role_add_groups(roleName, groups)
if response.status.value == 0:
return response
else:
raise SentryException(response)
def alter_sentry_role_delete_groups(self, roleName, groups):
response = self.client.alter_sentry_role_delete_groups(roleName, groups)
if response.status.value == 0:
return response
else:
raise SentryException(response)
def list_sentry_roles_by_group(self, groupName=None):
response = self.client.list_sentry_roles_by_group(groupName)
if response.status.value == 0:
roles = []
for role in response.roles:
roles.append({
'name': role.roleName,
'groups': [group.groupName for group in role.groups]
})
return roles
else:
raise SentryException(response)
def list_sentry_privileges_by_role(self, roleName, authorizableHierarchy=None):
response = self.client.list_sentry_privileges_by_role(roleName, authorizableHierarchy)
if response.status.value == 0:
return [self._massage_priviledge(privilege) for privilege in response.privileges]
else:
raise SentryException(response)
def list_sentry_privileges_for_provider(self, groups, roleSet=None, authorizableHierarchy=None):
response = self.client.list_sentry_privileges_for_provider(groups, roleSet, authorizableHierarchy)
if response.status.value == 0:
return response
else:
raise SentryException(response)
def list_sentry_privileges_by_authorizable(self, authorizableSet, groups=None, roleSet=None):
response = self.client.list_sentry_privileges_by_authorizable(authorizableSet, groups, roleSet)
_privileges = []
for authorizable, roles in response.privilegesMapByAuth.iteritems():
_roles = {}
for role, privileges in roles.privilegeMap.iteritems():
_roles[role] = [self._massage_priviledge(privilege) for privilege in privileges]
_privileges.append((self._massage_authorizable(authorizable), _roles))
if response.status.value == 0:
return _privileges
else:
raise SentryException(response)
def drop_sentry_privileges(self, authorizableHierarchy):
response = self.client.drop_sentry_privilege(authorizableHierarchy)
if response.status.value == 0:
return response
else:
raise SentryException(response)
def rename_sentry_privileges(self, oldAuthorizable, newAuthorizable):
response = self.client.rename_sentry_privilege(oldAuthorizable, newAuthorizable)
if response.status.value == 0:
return response
else:
raise SentryException(response)
def _massage_priviledge(self, privilege):
return {
'scope': privilege.privilegeScope,
'server': privilege.serverName,
'database': privilege.dbName,
'table': privilege.tableName,
'URI': privilege.URI,
'action': 'ALL' if privilege.action == '*' else privilege.action.upper(),
'timestamp': privilege.createTime,
'grantOption': privilege.grantOption == 1,
}
def _massage_authorizable(self, authorizable):
return {
'server': authorizable.server,
'database': authorizable.db,
'table': authorizable.table,
'URI': authorizable.uri,
}
|
10,420 | 225d65394029d91972a9c65d82180a8c48b6657e | from Factories.FirebaseFactory.FirebaseClient import FirebaseClient
class WalletFirebaseRepository(FirebaseClient):
def __init__(self):
super().__init__()
self.collection = "wallets"
|
10,421 | 90371d77c7c43381281c301fdac05bd3412a7eac | from tkinter import *
root = Tk()
def f():
root.geometry("20x20")
print("A")
root.after(50, f)
root.mainloop() |
10,422 | e4c7195fc2eb653bcb52843761209b0141e6b259 | import os, unittest, argparse, json, client
import xml.etree.ElementTree as ET
class TestPerson(unittest.TestCase):
test_data = ('first_name,surname,age,nationality,favourite_color,interest\n'
'John,Keynes,29,British,red,cricket\n'
'Sarah,Robinson,54,,blue,badminton\n')
def test_json_file(self):
"""
Test if the JSON response returned is valid - Input through file (sample_data.csv)
"""
#response = os.system("python3 client.py -f filename.csv")
response = client.result(False, 'json', 'unittest',file = 'test_file.csv')
response = json.loads(response)
first_name = response['person'][0]['first_name']
self.assertEqual(first_name,'John','Should print John')
length = len(response['person'])
for count in range(0,length):
self.assertNotIn('nationality',response['person'][count], 'Nationality should not be present')
def test_xml_file(self):
"""
Test if the XML response returned is valid - Input through file (sample_data.csv)
"""
response = client.result(False, 'xml', 'unittest', file = 'test_file.csv')
root = ET.fromstring(response)
first_name = root[0][0][0].text
self.assertEqual(first_name,'John', 'Should print John')
nationality = '<nationality>' in response
self.assertFalse(nationality, 'Nationality should not be present')
def test_json_direct(self):
"""
Test if the JSON response returned is valid - Input directly
"""
response = client.result(True, 'json', 'unittest', test_data = self.test_data)
response = json.loads(response)
first_name = response['person'][0]['first_name']
self.assertEqual(first_name,'John','Should print John')
length = len(response['person'])
for count in range(0,length):
self.assertNotIn('nationality',response['person'][count], 'Nationality should not be present')
def test_xml_direct(self):
"""
Test if the XML response returned is valid - Input directly
"""
response = client.result(True, 'xml', 'unittest', test_data = self.test_data)
root = ET.fromstring(response)
first_name = root[0][0][0].text
self.assertEqual(first_name,'John', 'Should print John')
nationality = '<nationality>' in response
self.assertFalse(nationality, 'Nationality should not be present')
if __name__ == "__main__":
unittest.main()
|
10,423 | 0091a099e73fd55adfc7899bbdb86d7ae6171854 | import cv2
rect_width = 10
rect_height = 5
def get_vertical_lines_2(image, line_count):
row = image.shape[0]
col = image.shape[1]
vertical_lines = []
for i in range(col):
count = 0
for j in range(row):
px = image[j, i]
if px == 255:
count += 1
if count > row - int(rect_height * line_count * 1.6):
vertical_lines.append(i)
print("一共找到如下竖线" + str(vertical_lines))
return vertical_lines
def get_continuous_lines(vertical_list):
res_list = []
i = 0
while i < len(vertical_list):
temp = []
for j in range(vertical_list[i], vertical_list[-1]):
if j in vertical_list:
temp.append(j)
i += 1
else:
i -= 1
break
i += 1
if len(temp) > 5:
res_list.append(temp)
print("一共找到{}条连续线".format(str(len(res_list))))
print(res_list)
return res_list
img = cv2.imread('../test3.png')
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# ret, binary = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (rect_width, rect_height))
dst = cv2.dilate(binary, kernel)
contours, hierarchy = cv2.findContours(dst, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# epsilon = cv2.arcLength(contours[1], True)
# approx = cv2.approxPolyDP(contours[1], epsilon, True)
cv2.drawContours(img, contours, -1, (0, 0, 0), 2)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, binary = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY)
vertical_liens = get_vertical_lines_2(binary, 4)
res_list = get_continuous_lines(vertical_liens)
for i in res_list:
num = int((i[0] + i[-1])/2)
cv2.line(img, (num, 0), (num, img.shape[1]), (0, 0, 0), 1)
cv2.imshow("img", img)
cv2.waitKey(0) |
10,424 | a2a0c4db2cce39bc3426f3bd6d39957dcb8df655 | from __future__ import absolute_import, unicode_literals
from django.contrib.auth.models import User, Group
from rest_framework.viewsets import ReadOnlyModelViewSet
from .serializers import UserSerializer, GroupSerializer
class UserViewSet(ReadOnlyModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
class GroupViewSet(ReadOnlyModelViewSet):
queryset = Group.objects.all()
serializer_class = GroupSerializer
|
10,425 | b27879a5677a2108f02ea41fecededc280c04162 | from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi
from random import randrange
# the angles of rotations
theta1 = 3*2*pi/31
theta2 = 7*2*pi/31
theta3 = 11*2*pi/31
# we read streams of length from 1 to 30
for i in range(1,31):
# quantum circuit with three qubits and three bits
qreg = QuantumRegister(3)
creg = ClassicalRegister(3)
mycircuit = QuantumCircuit(qreg,creg)
# the stream of length i
for j in range(i):
# apply rotations for each symbol
mycircuit.ry(2*theta1,qreg[0])
mycircuit.ry(2*theta2,qreg[1])
mycircuit.ry(2*theta3,qreg[2])
# we measure after reading the whole stream
mycircuit.measure(qreg,creg)
# execute the circuit N times
N = 1000
job = execute(mycircuit,Aer.get_backend('qasm_simulator'),shots=N)
counts = job.result().get_counts(mycircuit)
print(counts)
if '000' in counts.keys():
c = counts['000']
else:
c = 0
print('000 is observed',c,'times out of',N)
percentange = round(c/N*100,1)
print("the ratio of 000 is ",percentange,"%")
print()
|
10,426 | f032da298207c0e38945f673148cd92dc6d50c5e | from numpy import *
def predict_ratings(US, SV, m, u, v, tt, num_test):
pr = [0]*num_test
for j in range(num_test):
user_id = tt[j, 0]
movie_id = tt[j, 1]
r = u[user_id]
c = v[movie_id]
pr[j] = float("{0:.4f}".format(dot(US[r, :], SV[:, c]) + m[0, c]))
return pr |
10,427 | fbb94abedcccc82c76cbbd6fc1c98414cd1f7050 | # Python
from __future__ import annotations
# Internal
from server import server
|
10,428 | 8efd15dc91886dc5418852bad00e4455298e3044 | ''' split.py
Split Data into train (80%) and validate (20%)
Run from root of project as
python scripts/split.py
'''
import os
import glob
import shutil
def mkdir(dirname):
if not os.path.isdir(dirname):
os.mkdir(dirname)
VALIDATE_DIR = "data/validate"
mkdir(VALIDATE_DIR)
TRAIN_DIR = "data/train"
mkdir(TRAIN_DIR)
DOG_FILENAMES = glob.glob("data/raw/dog*")
CAT_FILENAMES = glob.glob("data/raw/cat*")
# N_DOG_TEST = int(len(DOG_FILENAMES) * .2)
# N_CAT_TEST = int(len(CAT_FILENAMES) * .2)
# Move 20% to validate directory
DOG_TRAIN = "data/train/dogs"
mkdir(DOG_TRAIN)
DOG_VALIDATE = "data/validate/dogs"
mkdir(DOG_VALIDATE)
for f in DOG_FILENAMES:
_, index, _ = f.split(".")
if 0 <= int(index) < 1000:
shutil.copy(f, DOG_TRAIN)
elif 1000 <= int(index) < 1400:
shutil.copy(f, DOG_VALIDATE)
CAT_TRAIN = "data/train/cats"
mkdir(CAT_TRAIN)
CAT_VALIDATE = "data/validate/cats"
mkdir(CAT_VALIDATE)
for f in CAT_FILENAMES:
_, index, _ = f.split(".")
if 0 <= int(index) < 1000:
shutil.copy(f, CAT_TRAIN)
elif 1000 <= int(index) < 1400:
shutil.copy(f, CAT_VALIDATE)
print "{0}: {1} files, {2}: {3} files".format(DOG_TRAIN, len(os.listdir(DOG_TRAIN)),
DOG_VALIDATE, len(os.listdir(DOG_VALIDATE)))
print "{0}: {1} files, {2}: {3} files".format(CAT_TRAIN, len(os.listdir(CAT_TRAIN)),
CAT_VALIDATE, len(os.listdir(CAT_VALIDATE)))
|
10,429 | 5baae0826b0f6f57ce43b17ec73dd30e15dbc46a | #!/usr/bin/python2
from stadium import ui
import pytest
import mock
class TestListDialogController:
def test_cancel_calls_callback(self):
# Cancel should call cancel_func
a = []
def cancel_func():
a.append('cancelled')
def select_func():
a.append('selected')
sut = ui.ListDialogController(None, cancel_func, select_func)
sut.cancel()
assert len(a) == 1
assert a[0] == 'cancelled'
def test_enter_calls_callback(self):
# enter() should call select function with value
a = []
def cancel_func():
a.append('cancelled')
def select_func(value):
a.append(value)
sut = ui.ListDialogController(None, cancel_func, select_func)
sut.enter("Option 1")
assert len(a) == 1
assert a[0] == "Option 1"
class TestLabeledMeterController:
def setup_method(self):
self.left_bound = 0
self.right_bound = 100
self.initial = 50
self.shift_amount = 10
self.widget = mock.Mock()
self.widget._set_completion = mock.Mock()
self.sut = ui.LabeledMeterController(
self.widget,
self.left_bound, self.right_bound,
self.initial, self.shift_amount,
)
def test_decrement_full(self):
self.sut.decrement(2)
self.widget._set_completion.assert_called_once_with(self.initial - self.shift_amount * 2)
def test_decrement_stopped(self):
# Decrement shouldn't go past left_bound
self.sut.decrement(20)
self.widget._set_completion.assert_called_once_with(self.left_bound)
def test_increment_full(self):
self.sut.increment(2)
self.widget._set_completion.assert_called_once_with(self.initial + self.shift_amount * 2)
def test_increment_stopped(self):
# Increment shouldnt' go past right_bound
self.sut.increment(20)
self.widget._set_completion.assert_called_once_with(self.right_bound)
def test_set_completion(self):
self.sut.set_completion(10)
self.widget._set_completion.assert_called_once_with(10)
def test_set_completion_below_lower(self):
# Set_completion shouldn't go below left_bound
self.sut.set_completion(
self.left_bound - 100
)
self.widget._set_completion.assert_called_once_with( self.left_bound )
def test_set_completion_above_upper(self):
# Set_completion shouldn't go above right_bound
self.sut.set_completion(
self.right_bound + 100
)
self.widget._set_completion.assert_called_once_with( self.right_bound )
|
10,430 | 8fb696d63c786d144c157d5678b55b80f171d98d | #!/usr/bin/python2
import pygame.joystick
class Joystick:
def __init__(self):
print "Creating Joystick"
self.errors = []
self.init()
self.set_buttons(0, 1, 2, 3,8)
def init(self):
print "Initialising joysticks"
if(pygame.joystick.get_init()):
print "Joystick module active - restarting"
pygame.joystick.quit()
pygame.joystick.init()
if(pygame.joystick.get_count()==0):
print "No Joysticks found."
self.errors.append("No Joystick Found")
return
self.active_joystick = pygame.joystick.Joystick(0)
print "Found joystick: " + self.active_joystick.get_name()
self.active_joystick.init()
self.get_joy_stats()
def set_buttons(self,select_button,back_button,left_button,right_button,quit_button):
self.select_button=select_button
self.back_button=back_button
self.left_button=left_button
self.right_button=right_button
self.quit_button=quit_button
def get_input(self):
pygame.event.pump()
return(
self.active_joystick.get_button(self.select_button),
self.active_joystick.get_button(self.back_button),
self.active_joystick.get_button(self.left_button),
self.active_joystick.get_button(self.right_button),
self.active_joystick.get_button(self.quit_button),
self.active_joystick.get_axis(0),
self.active_joystick.get_axis(1))
def get_joy_stats(self):
if (self.active_joystick.get_numaxes>=2):
self.use_axes=True
else:
self.use_axes=False
if (self.active_joystick.get_numbuttons()>=4):
self.use_buttons=True
else:
self.use_buttons=False
if (self.active_joystick.get_numhats()>=1):
self.use_hat=True
else:
self.use_hat=False
if not self.use_axes and not self.use_hat or not self.use_buttons:
print "Unable to use this joystick"
self.errors.append("Unable to use joystick, not enough buttons/axes")
return 2;
else:
return 0
|
10,431 | 19000b8c3d8e3f5bebaa32c98cf694332d2a0f12 | import os
import sys
# 부모디렉토리 참조를 위한 설정추가
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from user import User
user = User('test123','홍길동')
# 초기패스워드 조회
print(user.get_passowrd())
# get full name
if user.get_full_name() == '홍길동':
print('pass => get full name : ', user.get_full_name())
else:
raise ValueError()
# 성 출력
if user.get_last_name() == '홍':
print('pass => get last name : ', user.get_last_name())
else:
raise ValueError()
# 이름 출력
if user.get_first_name() == '길동':
print('pass => get first name : ', user.get_first_name())
else:
raise ValueError() |
10,432 | 38b701c0715ecd70c326c13f97505879c8a2c2c6 | # Generated by Django 2.0.10 on 2019-04-30 11:24
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Alertas',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idAlert', models.IntegerField()),
('titulo', models.TextField()),
('descrip', models.TextField()),
('estado', models.TextField(max_length=1)),
('falta', models.DateTimeField(default=django.utils.timezone.now)),
('ffin', models.DateTimeField(default=django.utils.timezone.now)),
('comment', models.TextField()),
],
),
]
|
10,433 | 5e7273c8f9f5ba54c3f9469612df271c40918a2c | import glob
import os
import numpy as np
from chainer import dataset
from chainer.dataset import download
from chainercv.datasets.cityscapes.cityscapes_utils import cityscapes_labels
from chainercv.utils import read_image
class CityscapesSemanticSegmentationDataset(dataset.DatasetMixin):
"""Semantic segmentation dataset for `Cityscapes dataset`_.
.. _`Cityscapes dataset`: https://www.cityscapes-dataset.com
.. note::
Please manually downalod the data because it is not allowed to
re-distribute Cityscapes dataset.
Args:
data_dir (string): Path to the dataset directory. The directory should
contain at least two directories, :obj:`leftImg8bit` and either
:obj:`gtFine` or :obj:`gtCoarse`. If :obj:`None` is given, it uses
:obj:`$CHAINER_DATSET_ROOT/pfnet/chainercv/cityscapes` by default.
label_resolution ({'fine', 'coarse'}): The resolution of the labels. It
should be either :obj:`fine` or :obj:`coarse`.
split ({'train', 'val'}): Select from dataset splits used in
Cityscapes dataset.
ignore_labels (bool): If True, the labels marked :obj:`ignoreInEval`
defined in the original
`cityscapesScripts<https://github.com/mcordts/cityscapesScripts>_`
will be replaced with :obj:`-1` in the :meth:`get_example` method.
The default value is :obj:`True`.
"""
def __init__(self, data_dir=None, label_resolution=None, split='train',
ignore_labels=True):
if data_dir is None:
data_dir = download.get_dataset_directory(
'pfnet/chainercv/cityscapes')
if label_resolution not in ['fine', 'coarse']:
raise ValueError('\'label_resolution\' argment should be eighter '
'\'fine\' or \'coarse\'.')
img_dir = os.path.join(data_dir, os.path.join('leftImg8bit', split))
resol = 'gtFine' if label_resolution == 'fine' else 'gtCoarse'
label_dir = os.path.join(data_dir, resol)
if not os.path.exists(img_dir) or not os.path.exists(label_dir):
raise ValueError(
'Cityscapes dataset does not exist at the expected location.'
'Please download it from https://www.cityscapes-dataset.com/.'
'Then place directory leftImg8bit at {} and {} at {}.'.format(
os.path.join(data_dir, 'leftImg8bit'), resol, label_dir))
self.ignore_labels = ignore_labels
self.label_paths = list()
self.img_paths = list()
city_dnames = list()
for dname in glob.glob(os.path.join(label_dir, '*')):
if split in dname:
for city_dname in glob.glob(os.path.join(dname, '*')):
for label_path in glob.glob(
os.path.join(city_dname, '*_labelIds.png')):
self.label_paths.append(label_path)
city_dnames.append(os.path.basename(city_dname))
for city_dname, label_path in zip(city_dnames, self.label_paths):
label_path = os.path.basename(label_path)
img_path = label_path.replace(
'{}_labelIds'.format(resol), 'leftImg8bit')
img_path = os.path.join(img_dir, city_dname, img_path)
self.img_paths.append(img_path)
def __len__(self):
return len(self.img_paths)
def get_example(self, i):
"""Returns the i-th example.
Returns a color image and a label image. The color image is in CHW
format and the label image is in HW format.
Args:
i (int): The index of the example.
Returns:
tuple of a color image and a label whose shapes are (3, H, W) and
(H, W) respectively. H and W are height and width of the image.
The dtype of the color image is :obj:`numpy.float32` and
the dtype of the label image is :obj:`numpy.int32`.
"""
img = read_image(self.img_paths[i])
label_orig = read_image(
self.label_paths[i], dtype=np.int32, color=False)[0]
if self.ignore_labels:
label_out = np.ones(label_orig.shape, dtype=np.int32) * -1
for label in cityscapes_labels:
if not label.ignoreInEval:
label_out[label_orig == label.id] = label.trainId
else:
label_out = label_orig
return img, label_out
|
10,434 | b5d218b2cd0f1222144e02367aa6cd4700044eea | """Module containing base class for lookup database tables.
LookupDBObject defines the base class for lookup tables and defines
relevant methods. LookupDBObject inherits from DBObjectUnsharded and
extends the functionality for getting, creating, updating and deleting
the lookup relationship.
"""
from vtdb import db_object_unsharded
class LookupDBObject(db_object_unsharded.DBObjectUnsharded):
"""An implementation of a lookup class stored in an unsharded db."""
@classmethod
def get(class_, cursor, entity_id_column, entity_id):
where_column_value_pairs = [(entity_id_column, entity_id),]
rows = class_.select_by_columns(cursor, where_column_value_pairs)
return [row.__dict__ for row in rows]
@classmethod
def create(class_, cursor, **bind_vars):
return class_.insert(cursor, **bind_vars)
@classmethod
def update(class_, cursor, sharding_key_column_name, sharding_key,
entity_id_column, new_entity_id):
where_column_value_pairs = [(sharding_key_column_name, sharding_key),]
update_column_value_pairs = [(entity_id_column, new_entity_id),]
return class_.update_columns(cursor, where_column_value_pairs,
update_column_value_pairs)
@classmethod
def delete(class_, cursor, sharding_key_column_name, sharding_key):
where_column_value_pairs = [(sharding_key_column_name, sharding_key),]
return class_.delete_by_columns(cursor, where_column_value_pairs)
|
10,435 | 8b4887e22726f0cf571ebea0174fcef42681a3ce | '''
Abril 17
Autor: Vitoya
'''
theBoard = {'7': ' ', '8': ' ', '9': ' ',
'4': ' ', '5': ' ', '6': ' ',
'1': ' ', '2': ' ', '3': ' '}
boardKeys = []
for key in theBoard:
boardKeys.append(key)
def printBoard(board):
print(board['7']+'|'+board['8']+'|'+board['9'])
print('------')
print(board['4']+'|'+board['5']+'|'+board['6'])
print('------')
print(board['1']+'|'+board['2']+'|'+board['3'])
#printBoard(theBoard)
def game():
turn = 'X'
count = 0
for i in range(10):
printBoard(theBoard)
print("Es el turno de la "+turn+" especifica el lugar donde quieres jugar")
move= input()
if move not in theBoard:
print("Por favor escribe un numero del 1 al 9")
continue
if theBoard[move]==' ':
theBoard[move]=turn
count+=1
else:
print("Lo siento este lugar ya se encuentra ocupado, intenta con otro...")
continue
if count>=5:
if theBoard['7']==theBoard['8']==theBoard['9']!=' ':
printBoard(theBoard)
print("El juego ha terminado")
print("El jugador "+turn+ ", gana el juego")
break
if theBoard['4']==theBoard['5']==theBoard['6']!=' ':
printBoard(theBoard)
print("El juego ha terminado")
print("El jugador "+turn+ ", gana el juego")
break
if theBoard['1']==theBoard['2']==theBoard['3']!=' ':
printBoard(theBoard)
print("El juego ha terminado")
print("El jugador "+turn+ ", gana el juego")
break
if theBoard['7']==theBoard['5']==theBoard['3']!=' ':
printBoard(theBoard)
print("El juego ha terminado")
print("El jugador "+turn+ ", gana el juego")
break
if theBoard['1']==theBoard['5']==theBoard['9']!=' ':
printBoard(theBoard)
print("El juego ha terminado")
print("El jugador "+turn+ ", gana el juego")
break
if theBoard['7']==theBoard['4']==theBoard['1']!=' ':
printBoard(theBoard)
print("El juego ha terminado")
print("El jugador "+turn+ ", gana el juego")
break
if theBoard['8']==theBoard['5']==theBoard['2']!=' ':
printBoard(theBoard)
print("El juego ha terminado")
print("El jugador "+turn+ ", gana el juego")
break
if theBoard['9']==theBoard['6']==theBoard['3']!=' ':
printBoard(theBoard)
print("El juego ha terminado")
print("El jugador "+turn+ ", gana el juego")
break
if count == 9:
print("El juego ha terminado")
print("Es un empate!!")
if turn=="X":
turn="O"
else:
turn="X"
restart=input("Quieres volver a jugar? (Y/N)")
if restart=='y' or restart=='Y':
for key in boardKeys:
theBoard[key]=' '
game()
if __name__=="__main__":
game() |
10,436 | 35be3e3d4596a80cf26a3c1e47da41d85a9efc86 | import requests
import json
import csv
from os.path import dirname
def sendMessage(jsonId):
baseURLDir = dirname(__file__)
json_open = open(baseURLDir + '/tmp/json/' + jsonId + '.json', 'r')
json_load = json.load(json_open)
with open(baseURLDir + '/data/webhookURL.csv') as f:
reader = csv.reader(f)
webhookUrlList = [row for row in reader]
baseURLDir = dirname(__file__)
with open(baseURLDir + '/data/webhookURL.csv') as f:
reader = csv.reader(f)
webhookUrlList = [row for row in reader]
WEBHOOK_URL = "https://discord.com/api/webhooks/863427389962321920/hEXW6SnCkizEoLTni-D3Jwl8xg_dSHHkS_pTgeMvub_U-peGsCmMVzhjZ2aL71aWqCAW?wait=true"
payload = {
"payload_json" : {
"username": "Webhook",
"embeds": [
{
"image": {
"url": "attachment:img/old/20210710052352/all.png"
},
"author": {
"name": "地震情報 byBotName",
},
"title": "地震情報",
"url": "https://www.jma.go.jp/bosai/map.html?contents=earthquake_map",
"description": json_load["Head"]["Headline"]["Text"] + " [気象庁 地震MAP](https://www.jma.go.jp/bosai/map.html?contents=earthquake_map)",
"color": 15258703,
"image": {
"url" : "attachment://all.png"
},
"fields": [
{
"name": "震源地",
"value": json_load["Body"]["Earthquake"]["Hypocenter"]["Area"]["Name"]
},
{
"name": "マグニチュード",
"value": "M " + json_load["Body"]["Earthquake"]["Magnitude"],
"inline": True
},
{
"name": "最大震度",
"value": json_load["Body"]["Intensity"]["Observation"]["MaxInt"],
"inline": True
},
{
"name": "地震ID",
"value": jsonId,
"inline": True
}
],
"footer": {
"text": "made by saladbowl",
"icon_url": "https://pbs.twimg.com/profile_images/1284044313312329728/TAJzweRl_400x400.jpg"
}
}
]
}
}
### embed付き
with open(baseURLDir + '/img/old/' + jsonId + "/all.png", 'rb') as f:
all = f.read()
files_qiita = {
"earthquakeMap" : ( "all.png", all ),
}
for webhookUrl in webhookUrlList:
WEBHOOK_URL = webhookUrl[0]
payload['payload_json'] = json.dumps( payload['payload_json'], ensure_ascii=False )
res = requests.post(WEBHOOK_URL, data = payload, files = files_qiita )
print( res.status_code )
print( json.dumps( json.loads(res.content), indent=4, ensure_ascii=False ) ) |
10,437 | e1b547b86f286e57f948dc3f7ec5b068ab63b75e | from flask import Flask
from flask import render_template, request, url_for, jsonify
import pickle
import numpy as np
import pandas
app = Flask(__name__,template_folder="templates")
model = pickle.load(open("rf_model.pkl","rb"))
label = pickle.load(open("label_encoder.pkl","rb"))
columns = ["age","workclass","fnlgwt","education","education_num","marital_status","occupation",
"relationship","race","sex","capital_gain","capital_loss","hours_per_week","native_country"]
encode_columns = ["occupation","workclass","education","marital_status","relationship","race","sex","native_country"]
@app.route("/")
def index():
return render_template("adult.html")
def encode_data(column: str, data):
"""encodes the column using label encoder"""
return label.fit_transform(data[column])
@app.route("/",methods=["POST"])
def predict():
#creates the single function for getting data
if request.method == "POST":
req = request.form
age = int(req.get("age",default=0))
workclass = req.get("workclass",default=0)
fnlgwt = int(req.get("fblgwt",default=0))
education = req.get("education",default=0)
education_num = int(req.get("education_num",default=0))
marital_status = req.get("marital_status",default=0)
occupation = req.get("occupation",default=0)
relationship = req.get("relationship",default=0)
race = req.get("race",default=0)
sex = req.get("sex",default=0)
capital_gain = int(req.get("capital_gain",default=0))
captal_loss = int(req.get("capital_loss",default=0))
hours_per_week = int(req.get("hours_per_week",default=0))
native_country = req.get("native_country",default=0)
#storing in array
array = np.array([age,workclass,fnlgwt,education,education_num,marital_status,
occupation,relationship,race,sex,capital_gain,captal_loss,hours_per_week,
native_country]).reshape(1,14)
#creates a dataframe to hold the data and perform transformation
data = pandas.DataFrame(data=array,columns=["age","workclass","fnlgwt","education","education_num","marital_status","occupation",
"relationship","race","sex","capital_gain","capital_loss","hours_per_week","native_country"])
#performing transformation
for column in encode_columns:
data[column] = encode_data(column, data)
#predict over the features gotten from the user
value = model.predict(data)
if value == [1]:
text = "This worker will earn over $50k dollars at the end of the year"
else:
text = "This worker will earn less than $50k dollars at the end of the year"
#passing value gotten to template for rendering
return render_template("adult.html",text=text)
# app.route("/results",methods=["POST"])
# def results():
# #this gets the result from the user and return the json representation
# data = request.get_json(force=True)
# frame = pandas.DataFrame(data.values(),columns=columns)
# frame["occupation"] = label.fit_transform(frame["occupation"])
# frame["workclass"] = label.fit_transform(frame["workclass"])
# frame["education"] = label.fit_transform(frame["education"])
# frame["marital_status"] = label.fit_transform(frame["marital_status"])
# frame["relationship"] = label.fit_transform(frame["relationship"])
# frame["race"] = label.fit_transform(frame["race"])
# frame["sex"] = label.fit_transform(frame["sex"])
# frame["native_country"] = label.fit_transform(frame["native_country"])
# value = model.predict(frame)
# #this is for determining the position
# if value == [1]:
# text = "This person will earn over $50k dollars"
# else:
# text="This person will earn less than $50k dollars"
# return jsonify(text)
if __name__ == "__main__":
from flask import Flask
from flask import render_template, request, url_for, jsonify
import pickle
import numpy as np
import pandas
app = Flask(__name__,template_folder="templates")
model = pickle.load(open("adult/rf_model.pkl","rb"))
label = pickle.load(open("adult/label_encoder.pkl","rb"))
columns = ["age","workclass","fnlgwt","education","education_num","marital_status","occupation",
"relationship","race","sex","capital_gain","capital_loss","hours_per_week","native_country"]
@app.route("/")
def index():
return render_template("adult.html")
@app.route("/",methods=["POST"])
def predict():
#creates the single function for getting data
if request.method == "POST":
req = request.form
age = int(req.get("age",default=0))
workclass = req.get("workclass",default=0)
fnlgwt = int(req.get("fblgwt",default=0))
education = req.get("education",default=0)
education_num = int(req.get("education_num",default=0))
marital_status = req.get("marital_status",default=0)
occupation = req.get("occupation",default=0)
relationship = req.get("relationship",default=0)
race = req.get("race",default=0)
sex = req.get("sex",default=0)
capital_gain = int(req.get("capital_gain",default=0))
captal_loss = int(req.get("capital_loss",default=0))
hours_per_week = int(req.get("hours_per_week",default=0))
native_country = req.get("native_country",default=0)
#storing in array
array = np.array([age,workclass,fnlgwt,education,education_num,marital_status,
occupation,relationship,race,sex,capital_gain,captal_loss,hours_per_week,
native_country]).reshape(1,14)
#creates a dataframe to hold the data and perform transformation
data = pandas.DataFrame(data=array,columns=["age","workclass","fnlgwt","education","education_num","marital_status","occupation",
"relationship","race","sex","capital_gain","capital_loss","hours_per_week","native_country"])
#performing transformation
data["occupation"] = label.fit_transform(data["occupation"])
data["workclass"] = label.fit_transform(data["workclass"])
data["education"] = label.fit_transform(data["education"])
data["marital_status"] = label.fit_transform(data["marital_status"])
data["relationship"] = label.fit_transform(data["relationship"])
data["race"] = label.fit_transform(data["race"])
data["sex"] = label.fit_transform(data["sex"])
data["native_country"] = label.fit_transform(data["native_country"])
#predict over the features gotten from the user
value = model.predict(data)
if value == [1]:
text = "This person will earn over $50k dollars"
else:
text = "This person will earn less than $50k dollars"
#passing value gotten to template for rendering
return render_template("adult.html",text=text)
app.route("/results",methods=["POST"])
def results():
#this gets the result from the user and return the json representation
data = request.get_json(force=True)
frame = pandas.DataFrame(data.values(),columns=columns)
frame["occupation"] = label.fit_transform(frame["occupation"])
frame["workclass"] = label.fit_transform(frame["workclass"])
frame["education"] = label.fit_transform(frame["education"])
frame["marital_status"] = label.fit_transform(frame["marital_status"])
frame["relationship"] = label.fit_transform(frame["relationship"])
frame["race"] = label.fit_transform(frame["race"])
frame["sex"] = label.fit_transform(frame["sex"])
frame["native_country"] = label.fit_transform(frame["native_country"])
value = model.predict(frame)
#this is for determining the position
if value == [1]:
text = "This person will earn over $50k dollars"
else:
text="This person will earn less than $50k dollars"
return jsonify(text)
if __name__ == "__main__":
app.run(debug=True) |
10,438 | 9049371a4c88edf184c6f83ad164bb4e1f50c0d4 | from pyBuilder import *
class MyGUIBuilder(BuildCMakeTarget):
def __init__(self):
self.initPaths()
def extract(self):
res = self.unzip('files/mygui-*.zip')
# unpack dependencies, needed for freetype
res |= self.unzip('files/OgreDependencies_MSVC_*.zip', self.path+'/mygui-*')
return res
def configure(self):
dir = self.getFirstFolder()
self.mkd(dir+'/build_'+self.arch)
return self.execute(r"""
cd %(path)s\mygui*
@call:checkReturnValue
cd build_%(arch)s
@call:checkReturnValue
cmake .. -G %(generator)s ^
-DMYGUI_STANDALONE_BUILD=ON^
-DOGRE_LIB_DIR="%(depsdir_cmake)s/libs/%(arch)s/ogre/%(conf)s/" ^
-DOGRE_INCLUDE_DIR="%(depsdir_cmake)s/includes/%(arch)s/ogre/" ^
-DOgre_LIBRARIES=OgreMain%(debug_d)s ^
-DMYGUI_BUILD_DEMOS=OFF ^
-DMYGUI_BUILD_PLUGINS=OFF ^
-DMYGUI_BUILD_TOOLS=OFF ^
-DMYGUI_RENDERSYSTEM=3 ^
-DMYGUI_DEPENDENCIES_DIR=Dependencies ^
-DMYGUI_STATIC=ON
@call:checkReturnValue
""")
def build(self):
return self.execute(r"""
cd %(path)s\mygui*
@call:checkReturnValue
cd build_%(arch)s
@call:checkReturnValue
msbuild %(target)s.sln /t:rebuild /p:Configuration=%(configuration)s /p:Platform=%(platform)s /verbosity:%(vsverbosity)s /nologo /maxcpucount:%(maxcpu)d
@call:checkReturnValue
""")
def install(self):
dir = self.getFirstFolder()
res = self.installIncludes(dir+'/MyGUIEngine/include/*.h')
res |= self.installIncludes(dir+'/Platforms/Ogre/OgrePlatform/include/*.h')
res |= self.installBinaries(dir+'/build_%(arch)s/bin/%(conf)s/*.exe')
res |= self.installLibs(dir+'/build_%(arch)s/lib/%(conf)s/*.lib')
res |= self.installBinaries(dir+'/build_%(arch)s/lib/%(conf)s/*.pdb', False) #optional
res |= self.installLibs(dir+'/Dependencies/lib/%(conf)s/freetype.lib')
res |= self.installBinaries(dir+'/Dependencies/lib/%(conf)s/*.pdb', False) #optional
return 0 |
10,439 | 0b847c67efc34cd2e4673f611bf337dd62fabe1f | # coding: utf-8
# @Time : 2020/7/10 11:00
# @Author : Liu rucai
# @Software : PyCharm
import random
import pandas as pd
import numpy as np
import os
import datetime
import sys
isGO = True
list = []
def sum_list(bool_list, n, now_sum):
global isGO
global list
if isGO == False:
return list
if n >= len(sum_l):
return
if (now_sum + sum_l[n] == sum_num and sum_l[n] != 0): # 如果原有值加上这个值正好为所求的数
bool_list[n] = True # 将这个数对应的数组值赋值为true
list1 = []
list2 = []
for i, j in enumerate(bool_list):
if j:
# print(sum_l[i], end=' ') # 输出所有对应值为true的值
# print(i, end=' ')
if sum_l[i] != 0:
list1.append(sum_l[i])
list2.append(i)
sum_l[i] = 0
print('数值:', list1)
print('序列:', list2)
list = list1
isGO = False
# exit()#这是退出所有程序运行
# print()
bool_list[n] = True # 如果这个数被选
sum_list(bool_list, n + 1, now_sum + sum_l[n]) # 原来的sum和加上新的被选值
bool_list[n] = False # 如果没被选
sum_list(bool_list, n + 1, now_sum) # 原来的sum值不变
return list
if __name__ == '__main__':
print('请将data.csv文件置于同一文件夹下')
path = './data.csv'
sum_l1 = pd.read_csv(path)
sum_input = input('Input the number of sum:')
for i in range(len(sum_l1-24)):
sum_l = sum_l1['data'][i:i+23].tolist()
# sum_l = random.sample(range(15), 8)
start = datetime.datetime.now()
sum_num = float(sum_input)
bool_list = [False for i in sum_l]
sum_list(bool_list, 0, 0)
# c={'data':sum_l}
# new=pd.DataFrame(c)
# new.to_csv('./data.csv') #保存索引列和name列
print('done')
# os.system('pause')
end = datetime.datetime.now()
print(end - start)
|
10,440 | adf7ab792f7539dfe6ff90ed2d5d18b6ddd7398c |
def wrap_around(string, offset):
offset = offset % len(string)
return string[offset:] + string[:offset]
|
10,441 | cfe897ed9651a3bc8eae8c0c739d353e0106f461 | """import smtplib
import email.utils
#from smtplib import SMTP
massge = "this is just letter from python"
import smtplib
try:
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
except:
print ("Something went wrong...")
mail.starttls()
mail.login('boooooo.2018@gmail.com','adgjmpw12345')
mail.sendmail ('boooooo.2018@gmail.com','mohammad.sawas2016@gmail.com',massge)
import smtplib
#import email.utils
gmail_user = "boooooo.2018@gmail.com"
gmail_password = "adgjmpw12345"
try:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(gmail_user, gmail_password)
except:
print ("Something went wrong...")"""
import smtplib
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login("boooooo.2018@gmail.com", "adgjmpw12345")
msg = "this is just my first python massge !"
server.sendmail("boooooo.2018@gmail.com", "mohammad.sawas2016@gmail.com", msg)
server.quit() |
10,442 | 6bece2b738cab259221b05dd4df1ad247f2d6d83 | #!/usr/bin/env python
__author__ = "Alberto Riva, ICBR Bioinformatics Core"
__contact__ = "ariva@ufl.edu"
__copyright__ = "(c) 2019, University of Florida Foundation"
__license__ = "MIT"
__date__ = "Mar 19 2019"
__version__ = "1.0"
import sys
import gzip
import os.path
import subprocess as sp
# Global
SHELL_VERBOSE = False
# Utilities
def missingOrStale(target, reference=None):
"""Return True if file `target' is missing, or is older than `reference'."""
if not os.path.isfile(target):
return True
if reference:
return os.path.getmtime(target) < os.path.getmtime(reference)
else:
return False
def shell(commandline, verbose=SHELL_VERBOSE):
"""Execute the specified command in a subshell. If `verbose' is True,
Prints the command being executed to standard error."""
if verbose:
sys.stderr.write("[Executing: " + commandline + "]\n")
return sp.check_output(commandline, shell=True)
def linkify(url, name, target="_blank"):
if name is None:
name = os.path.split(url)[1]
return "<A target='{}' href='{}'>{}</A>".format(target, url, name)
def get_iterator(dict):
if PYTHON_VERSION == 2:
return dict.iteritems()
else:
return dict.items()
def genOpen(filename, mode):
"""Generalized open() function - works on both regular files and .gz files."""
(name, ext) = os.path.splitext(filename)
if ext == ".gz":
return gzip.open(filename, mode)
else:
return open(filename, mode)
def decodeUnits(x):
if x.endswith("G"):
return (x[:-1], 1000000000)
if x.endswith("M") or x.endswith("m"):
return (x[:-1], 1000000)
else:
return (x, 1)
def printWithUnits(b, digits=2):
# Return a string containing the number b formatted as K, M, G, T, P
# as appropriate.
fmt = "{{:.{}f}} ".format(digits)
if b < 1024:
return str(b)
b = b / 1024.0
if b < 1024:
return fmt.format(b) + "K"
b = b / 1024.0
if b < 1024:
return fmt.format(b) + "M"
b = b / 1024.0
if b < 1024:
return fmt.format(b) + "G"
b = b / 1024.0
if b < 1024:
return fmt.format(b) + "T"
b = b / 1024.0
return fmt.format(b) + "P"
def parseFraction(f):
"""Parse a fraction (a string of the form N/D) returning a float.
Returns None if f is not in the form N/D, or if D is 0."""
p = f.find("/")
if p < 1:
return None
s1 = f[:p]
s2 = f[p+1:]
try:
v1 = int(s1)
v2 = int(s2)
except ValueError:
return None
if v2:
return 1.0 * v1 / v2
else:
return None
class Output():
destination = sys.stdout
out = None # stream
__doc__ = "A class that returns a stream to an open file, or sys.stdout if the filename is None or '-'."
def __init__(self, destination):
if destination != '-':
self.destination = destination
def __enter__(self):
if self.destination:
self.out = genOpen(self.destination, "w")
return self.out
def __exit__(self, type, value, traceback):
if self.destination:
self.out.close()
class ShellScript():
filename = ""
out = None
def __init__(self, filename):
self.filename = filename
def __enter__(self):
self.out = open(self.filename, "w")
self.out.write("#!/bin/bash\n\n")
return self.out
def __exit__(self, type, value, traceback):
self.out.close()
try:
os.chmod(self.filename, 0o770)
except:
pass
# Simulate case / typecase
def case(datum, choices):
if datum in choices:
return choices[datum](datum)
return None
def typecase(datum, choices):
typename = type(datum).__name__
if typename in choices:
return choices[typename](datum)
return None
|
10,443 | e3aa7c9485f02828bd969a5dc9bdd72b8e47f050 | # This is the weather module.
import keys
from urllib.request import urlopen
from twilio.rest import Client
import json
def is_valid(number, digits):
if len(number) != digits or not number.isdigit():
return False
def get_degree(zip_code):
if not is_valid(zip_code, 5):
return False
url = "http://api.openweathermap.org/data/2.5/weather?zip=" + zip_code \
+ ",us&units=imperial&appid=" + keys.weather_api
raw_json = urlopen(url).read()
data = json.loads(raw_json)
temp = data['main']['temp']
return str(round(temp))
def send_text(phone_number):
if not is_valid(phone_number, 10):
return False
client = Client(keys.account_sid, keys.auth_token)
sms_body = "It is " + str(round(temp)) + " degrees outside."
message = client.messages.create(
body=sms_body,
from_=keys.from_number,
to='+1' + phone
)
|
10,444 | fb3c3cdab3e7e304e685afa3d226ace324d59bc5 | # Generated by Django 2.1.5 on 2019-01-09 00:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_db_logger', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='statuslog',
options={'ordering': ('-create_datetime',), 'verbose_name': 'Logging', 'verbose_name_plural': 'Logging'},
),
migrations.AlterField(
model_name='statuslog',
name='create_datetime',
field=models.DateTimeField(auto_now_add=True, verbose_name='Created at'),
),
]
|
10,445 | 3bb5e10459e633a57993be7fb33377c48bdf769b | from flask import Flask, render_template
from game_of_life import GameOfLife
app = Flask(__name__)
game_of_life = None
@app.route('/')
def index():
GameOfLife(20, 20)
return render_template('index.html')
@app.route('/live')
def live():
life = GameOfLife()
if life.counter > 0:
life.form_new_generation()
life.bump()
generation = life.world
return render_template('live.html', generation=generation, life=life)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000)
|
10,446 | 8072c709f042a0be7a05b727bd0f32fae852eb78 | from gensim.models import Word2Vec
import gensim.downloader as api
corpus = api.load('text8')
model = Word2Vec(corpus)
model.wv.save("text8vectors.wordvectors")
|
10,447 | 205ac6e49769dabdd0ab09cd58606005f040e43e | # to read in an integer and double it
# python script.py
user_input = int(input('Type in an integer ')) # converts user input to int
double = user_input * 2 # multiply user input by 2
# print('User input = {}'.format(double))
print('You entered {}: Result doubled = {}'.format(user_input, double))
|
10,448 | 9810e045fefda259ba17ba3db90a162bfd92d553 | #!/usr/bin/python
"""
This is the code to accompany the Lesson 1 (Naive Bayes) mini-project.
Use a Naive Bayes Classifier to identify emails by their authors
authors and labels:
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("../tools/")
from tools.email_preprocess import preprocess
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
classifier = GaussianNB()
t0 = time()
classifier.fit(features_train, labels_train)
#get training time
training_time = time() - t0
t1 = time()
labels_predict = classifier.predict(features_test)
prediction_time = time() - t1
accuracy = accuracy_score(labels_test, labels_predict)
print 'training_time: ', round(training_time, 3), 's'
print 'prediction_time: ', round(prediction_time, 3), 's'
print 'accuracy: ', accuracy
#########################################################
|
10,449 | 2de35309d010027f3fdcedc5f0b42493a6ce6809 | import os
import logging
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from collections import defaultdict
from time import time
class DataLoader:
"""
Utility class for loading various types of data from CSV files
"""
def __init__(self, id_prefix='', header_file=None):
"""
Construct a DataLoader instance
Keyword args:
id_prefix (str): Prefix for user and object ID (currently 'so' or 'gh')
"""
self.id_prefix = id_prefix
def load_object_tag(self, path_file):
"""
Load object tag data from file
Args:
path_file (str): Data location path
Returns:
pd.DataFrame: Object tag dataframe
"""
if '.csv' not in path_file:
raise FileNotFoundError('Only CSV format is supported currently')
t0 = time()
df = pd.read_csv(path_file, sep=',', header=None)
if df.shape[1] != 2:
raise RuntimeError('Object tag data should only consist of object ID and its tags (separated by ;)')
df.columns = ['object_id', 'object_tags']
df['object_id'] = df['object_id'].map(lambda x: '{}_{}'.format(self.id_prefix, x))
df['object_tags'] = df['object_tags'].map(lambda tags: [t.strip() for t in tags.split(';')])
logging.info('Loading object tag data with {} rows from {} takes {} secs'.format(df.shape[0],
path_file, time() - t0))
return df
def load_user_object(self, path_file):
"""
Load user object data from file
Args:
path_file (str): Data location path
Returns:
pd.DataFrame: User object dataframe
"""
if '.csv' not in path_file:
raise FileNotFoundError('Only CSV format is supported currently')
t0 = time()
df = pd.read_csv(path_file, sep=',', header=None)
if df.shape[1] != 2:
raise RuntimeError('User object data should only consist of user ID and object ID')
df.columns = ['user_id', 'object_id']
df['user_id'] = df['user_id'].map(lambda x: '{}_{}'.format(self.id_prefix, x))
df['object_id'] = df['object_id'].map(lambda x: '{}_{}'.format(self.id_prefix, x))
logging.info('Loading user object data with {} rows from {} takes {} secs'.format(df.shape[0],
path_file, time() - t0))
return df
@staticmethod
def load_label(path_file):
"""
Load label data from file
Args:
path_file (str): Data location path
Returns:
pd.DataFrame: Label dataframe
"""
if '.csv' not in path_file:
raise FileNotFoundError('Only CSV format is supported currently')
t0 = time()
df = pd.DataFrame()
with open(path_file, 'r') as f:
# TODO: Implement the logic once the format is finalised
pass
logging.info('Loading label data with {} rows from {} takes {} secs'.format(df.shape[0],
path_file, time() - t0))
return df
class DataProcessor:
@staticmethod
def aggregate_user_tags(user_obj_df, obj_tag_df):
"""
Aggregate user tags from user-object and object-tag data
Args:
user_obj_df (pd.DataFrame): User object dataframe
obj_tag_df (pd.DataFrame): Object tag dataframe
Returns:
pd.DataFrame: Computed user tag dictionary
"""
t0 = time()
user_obj_dict = dict(zip(user_obj_df['user_id'], user_obj_df['object_id']))
obj_tag_dict = dict(zip(obj_tag_df['object_id'], obj_tag_df['object_tags']))
user_tag_dict = defaultdict(list)
for user_id, object_id in user_obj_dict.items():
user_tag_dict[user_id].extend(obj_tag_dict.get(object_id, []))
df = pd.DataFrame([{'user_id': id, 'user_tags': tags} for id, tags in user_tag_dict.items()])
logging.info('Aggregating tags for {} users takes {} secs'.format(df.shape[0], time() - t0))
return df
@staticmethod
def compute_user_tag_features(user_tag_df, vectorizer=TfidfVectorizer()):
"""
Computes user features by merging user tags from multiple platforms
Args:
user_tag_df (pd.DataFrame): List of user tag dictionaries
Keyword args:
vectorizer: Feature vectorizer (Default: TfidfVectorizer)
Returns:
List: List of user ids
sp.csr_matrix: SciPy sparse matrix representation of features with shape of (n_users, n_features)
"""
t0 = time()
user_ids = user_tag_df['user_id'].tolist()
user_tag_features = vectorizer.fit_transform(user_tag_df['user_tags'].map(lambda x: ' '.join(x)))
logging.info('Computing user features with shape takes {} secs'.format(user_tag_features.shape, time() - t0))
return user_ids, user_tag_features
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
root_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
data_path = os.path.join(root_path, 'data', 'SO_GH')
so_user_question_file = os.path.join(data_path, 'user_question.csv.gz')
so_question_tag_file = os.path.join(data_path, 'question_tag.csv.gz')
so_loader = DataLoader(id_prefix='so')
so_user_question_df = so_loader.load_user_object(so_user_question_file)
so_question_tag_df = so_loader.load_object_tag(so_question_tag_file)
so_user_tag_df = DataProcessor.aggregate_user_tags(so_user_question_df, so_question_tag_df)
so_user_ids, so_user_tag_features = DataProcessor.compute_user_tag_features(so_user_tag_df)
gh_user_repo_file = os.path.join(data_path, 'user_repository.csv.gz')
gh_repo_tag_file = os.path.join(data_path, 'repository_tag.csv.gz')
gh_loader = DataLoader(id_prefix='gh')
gh_user_repo_df = gh_loader.load_user_object(gh_user_repo_file)
gh_repo_tag_df = gh_loader.load_object_tag(gh_repo_tag_file)
gh_user_tag_df = DataProcessor.aggregate_user_tags(gh_user_repo_df, gh_repo_tag_df)
gh_user_ids, gh_user_tag_features = DataProcessor.compute_user_tag_features(gh_user_tag_df)
logging.info('StackOverflow has {} users and each user has {} features'.format(len(so_user_ids),
so_user_tag_features.shape[1]))
logging.info('GitHub has {} users and each user has {} features'.format(len(gh_user_ids),
gh_user_tag_features.shape[1]))
|
10,450 | 18bbe7d9961aeac2db08d8115f575cabd756b559 | """
Follow up for "Unique Paths":
Now consider if some obstacles are added to the grids. How many unique paths would there be?
An obstacle and empty space is marked as 1 and 0 respectively in the grid.
For example,
There is one obstacle in the middle of a 3x3 grid as illustrated below.
[
[0,0,0],
[0,1,0],
[0,0,0]
]
The total number of unique paths is 2.
Note: m and n will be at most 100.
"""
class Solution(object):
"""
Idea:
- DP (bottom up)
- initialize path[m+1][n+1]
- initialize path[m - 1][n] = 1 (one way to get to the end)
- path[m][n] = path[m - 1][n] + path[m][n - 1] if no obstacle, else 0
"""
def uniquePathsWithObstacles(self, obstacleGrid):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
"""
m = len(obstacleGrid)
n = len(obstacleGrid[0])
path = [[0 for j in xrange(n + 1)] for i in xrange(m + 1)]
path[m - 1][n] = 1
for row in reversed(range(m)):
for col in reversed(range(n)):
if obstacleGrid[row][col] == 0:
path[row][col] = path[row + 1][col] + path[row][col + 1]
else:
path[row][col] = 0
return path[0][0]
|
10,451 | dd70fe2c48b9f094ecea75f1426ac453fba7e3cf | # Generated by Django 3.1.2 on 2020-10-09 08:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0002_product_product_id'),
]
operations = [
migrations.AlterField(
model_name='product',
name='detail',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='product',
name='longdesc',
field=models.TextField(max_length=200),
),
migrations.AlterField(
model_name='product',
name='namecat',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='product',
name='partnumber',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='product',
name='productname',
field=models.CharField(max_length=200),
),
]
|
10,452 | 77e53110f7585128f08dfe2ba76176035530af57 | # -*-coding:utf8-*-
################################################################################
#
#
#
################################################################################
"""
模块用法说明: 登录的引导页
Authors: turinblueice
Date: 2016/7/26
"""
from base import base_frame_view
from util import log
from gui_widgets.basic_widgets import window
from gui_widgets.basic_widgets import image_button
from gui_widgets.basic_widgets import image
from gui_widgets.basic_widgets import static_text
from gui_widgets.basic_widgets import text_field
from gui_widgets.basic_widgets import button
from gui_widgets.basic_widgets import action_sheet
from appium.webdriver.common.mobileby import MobileBy
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from UIAWindows import windows
from selenium.common.exceptions import TimeoutException
import time
class LoginGuideCameraWindow(base_frame_view.BaseFrameView):
"""
Summary:
登陆引导页
Attributes:
parent: 该活动页的父亲framework
"""
name = '.login.Window.GuideCameraWindow'
def __init__(self, parent):
super(LoginGuideCameraWindow, self).__init__(parent)
@property
def camera_guide_login(self):
"""
Summary:
登录页的摄像头
:return:
"""
id_ = 'com.jiuyan.infashion:id/iv_guide_camera'
return image.ImageView(self.parent, id=id_)
@property
def skip_guide_login_button(self):
"""
Summary:
登录引导页的跳过,首次安装会出现
:return:
"""
id_ = 'com.jiuyan.infashion:id/tv_guide_skip'
return static_text.TextView(self.parent, id=id_)
@property
def skip_dialogue_button(self):
"""
Summary:
弹出框的跳过按钮
:return:
"""
id_ = 'com.jiuyan.infashion:id/dialog_guide_camera_cancel'
return static_text.TextView(self.parent, id=id_)
@property
def take_a_photo_dialogue_button(self):
"""
Summary:
弹出框的’拍一张‘按钮
"""
id_ = 'com.jiuyan.infashion:id/dialog_guide_camera_confirm'
return static_text.TextView(self.parent, id=id_)
# ***********************操作方法*********************************
def tap_skip_button(self, skip=True):
"""
Summary:
跳过引导
Args:
skip:True:点击提示框的跳过按钮,False:点击提示框的拍一张按钮
"""
log.logger.info("点击跳过引导页")
self.skip_guide_login_button.tap()
log.logger.info("点击完毕")
try:
WebDriverWait(self.base_parent, 10).until(
EC.presence_of_element_located(
(MobileBy.ID, 'com.jiuyan.infashion:id/dialog_guide_camera_content'))
)
log.logger.info("弹出了提示框")
if skip:
log.logger.info("点击提示框的跳过按钮")
self.skip_dialogue_button.tap()
log.logger.info("点击完毕")
if self.wait_window(windows.WindowNames.IN_MAIN, 10):
log.logger.info("成功进入In主页")
return True
log.logger.error("进入In主页失败")
return False
else:
log.logger.info("点击拍一张按钮")
self.take_a_photo_dialogue_button.tap()
log.logger.info("点击完毕")
if self.wait_window(windows.WindowNames.PHOTO_STORY_GALLERY, 10):
log.logger.info("成功进入图片选择页")
return True
log.logger.error("进入图片选择页失败")
return False
except TimeoutException:
log.logger.error("没有出现提示框")
return False
|
10,453 | fd98dc891bdee68eb0d26638d91493da43a2d6f5 | #!/usr/bin/python
# Version 0.05
#
# Copyright (C) 2007 Adam Wolk "Mulander" <netprobe@gmail.com>
# Slightly updated by Mikael Berthe
#
# To use this script, set the "events_command" option to the path of
# the script (see the mcabberrc.example file for an example)
#
# This script is provided under the terms of the GNU General Public License,
# see the file COPYING in the root mcabber source directory.
#
import sys
CMD_MSG_IN="/usr/bin/play /home/stas/.mcabber/pin_dropping.wav"
SHORT_NICK=False
if len(sys.argv) == 5:
event,arg1,arg2,filename = sys.argv[1:5]
else:
event,arg1,arg2 = sys.argv[1:4]
filename = None
if event == 'MSG' and arg1 == 'IN':
import pynotify,os,locale
encoding = (locale.getdefaultlocale())[1]
msg = 'sent you a message.'
if SHORT_NICK and '@' in arg2:
arg2 = arg2[0:arg2.index('@')]
if filename is not None:
f = file(filename)
msg = f.read()
pynotify.init('mcnotify')
msgbox = pynotify.Notification(unicode(arg2, encoding),unicode(msg, encoding))
msgbox.set_timeout(3000)
msgbox.set_urgency(pynotify.URGENCY_LOW)
msgbox.show()
if (CMD_MSG_IN):
os.system(CMD_MSG_IN + '> /dev/null 2>&1')
if filename is not None and os.path.exists(filename):
os.remove(filename)
pynotify.uninit()
# vim:set noet sts=8 sw=8:
|
10,454 | f54bc661b9400206bafe571051f4fe4721e27cf2 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import ngraph.op_graph.hetr_grpc.hetr_pb2 as ngraph_dot_op__graph_dot_hetr__grpc_dot_hetr__pb2
class HetrStub(object):
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.BuildTransformer = channel.unary_unary(
'/Hetr/BuildTransformer',
request_serializer=ngraph_dot_op__graph_dot_hetr__grpc_dot_hetr__pb2.BuildRequest.SerializeToString,
response_deserializer=ngraph_dot_op__graph_dot_hetr__grpc_dot_hetr__pb2.BuildReply.FromString,
)
self.Computation = channel.unary_unary(
'/Hetr/Computation',
request_serializer=ngraph_dot_op__graph_dot_hetr__grpc_dot_hetr__pb2.ComputationRequest.SerializeToString,
response_deserializer=ngraph_dot_op__graph_dot_hetr__grpc_dot_hetr__pb2.ComputationReply.FromString,
)
self.FeedInput = channel.unary_unary(
'/Hetr/FeedInput',
request_serializer=ngraph_dot_op__graph_dot_hetr__grpc_dot_hetr__pb2.FeedInputRequest.SerializeToString,
response_deserializer=ngraph_dot_op__graph_dot_hetr__grpc_dot_hetr__pb2.FeedInputReply.FromString,
)
self.GetResults = channel.unary_unary(
'/Hetr/GetResults',
request_serializer=ngraph_dot_op__graph_dot_hetr__grpc_dot_hetr__pb2.GetResultsRequest.SerializeToString,
response_deserializer=ngraph_dot_op__graph_dot_hetr__grpc_dot_hetr__pb2.GetResultsReply.FromString,
)
class HetrServicer(object):
def BuildTransformer(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Computation(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def FeedInput(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetResults(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_HetrServicer_to_server(servicer, server):
rpc_method_handlers = {
'BuildTransformer': grpc.unary_unary_rpc_method_handler(
servicer.BuildTransformer,
request_deserializer=ngraph_dot_op__graph_dot_hetr__grpc_dot_hetr__pb2.BuildRequest.FromString,
response_serializer=ngraph_dot_op__graph_dot_hetr__grpc_dot_hetr__pb2.BuildReply.SerializeToString,
),
'Computation': grpc.unary_unary_rpc_method_handler(
servicer.Computation,
request_deserializer=ngraph_dot_op__graph_dot_hetr__grpc_dot_hetr__pb2.ComputationRequest.FromString,
response_serializer=ngraph_dot_op__graph_dot_hetr__grpc_dot_hetr__pb2.ComputationReply.SerializeToString,
),
'FeedInput': grpc.unary_unary_rpc_method_handler(
servicer.FeedInput,
request_deserializer=ngraph_dot_op__graph_dot_hetr__grpc_dot_hetr__pb2.FeedInputRequest.FromString,
response_serializer=ngraph_dot_op__graph_dot_hetr__grpc_dot_hetr__pb2.FeedInputReply.SerializeToString,
),
'GetResults': grpc.unary_unary_rpc_method_handler(
servicer.GetResults,
request_deserializer=ngraph_dot_op__graph_dot_hetr__grpc_dot_hetr__pb2.GetResultsRequest.FromString,
response_serializer=ngraph_dot_op__graph_dot_hetr__grpc_dot_hetr__pb2.GetResultsReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Hetr', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
10,455 | aa497969a588e15beb447df88a90b8ab1e6e7af4 | listdata = list(range(5))
ret1 = reversed(listdata)
print('원본 리스트 ', end='');print(listdata);
print('역순 리스트 ', end='');print(list(ret1))
ret2 = listdata[::-1]
print('슬라이싱 이용 ', end='');print(ret2)
|
10,456 | 35f07233857de4103826fe132654c3814cf65d02 | from Qt import * |
10,457 | 6b3934c1a1e7db09a524005485ced8b1a218dc0d | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
OpenNebula Driver for Linstor
Copyright 2019 LINBIT USA LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import sys
import os
from one import util, consts
from one.datastore import Datastore
from one.extender import deploy, get_device_path
from linstor import SizeCalc
SIZE = sys.argv[1]
FORMAT = sys.argv[2]
DST = sys.argv[3]
VM_ID = sys.argv[4]
DS_ID = sys.argv[5]
def main():
util.log_info("Entering tm/mkimage on {h} size {s}.".format(h=DST, s=SIZE))
disk_id = DST.split(".")[-1].strip()
dst_host = util.arg_host(DST).strip()
dst_path = util.arg_path(DST).strip()
dst_dir = os.path.dirname(dst_path).strip()
datastore = Datastore(util.show_ds(DS_ID))
res_name = consts.VOLATILE_PREFIX + "-vm{vm_id}-disk{disk_id}".format(vm_id=VM_ID, disk_id=disk_id)
resource = deploy(
linstor_controllers=datastore.linstor_controllers,
resource_name=res_name,
vlm_size_str='{s}Mib'.format(s=SIZE),
resource_group=datastore.linstor_resource_group
)
resource.activate(dst_host)
device_path = get_device_path(resource)
if FORMAT and FORMAT != "raw":
mkfs_command = 'set -e ; export PATH=/usr/sbin:/sbin:$PATH ; $(mkfs_command "{0}" "{1}" "{2}")'.format(
device_path,
FORMAT,
SizeCalc.convert(resource.volumes[0].size, SizeCalc.UNIT_B, SizeCalc.UNIT_MiB))
rc = util.ssh_exec_and_log(
host=dst_host,
cmd=mkfs_command,
error_msg="Error mkimage {}, on {}".format(device_path, dst_host))
if rc != 0:
sys.exit(rc)
# link drbd device
util.link_file(dst_host, dst_dir, dst_path, device_path, res_name)
util.log_info("Leaving tm/mkimage successfully.")
if __name__ == "__main__":
util.run_main(main)
|
10,458 | 7413ffdb53524a72e59fae9138e1b143a9a2e046 | import os
import curses
import random
class CaroBoard(object):
"""A class handle everything realted to the caro board"""
def __init__(self, width, height):
self.width = width
self.height = height
self.board = [[' ' for x in range(self.width)] for y in range(self.height)]
self._turnCount = 0
def ConfigBoard(self):
pass
def UpdateBoard(self, Pos_X, Pos_Y, Marker):
if self.board[Pos_X][Pos_Y] == ' ':
self.board[Pos_X][Pos_Y] = Marker
self._turnCount += 1
return True
else: return False
def CheckBoard(self, Pos_X, Pos_Y, PlayerMarker):
CheckProcess = 'UpperLeft'
Combo = 1
CurrentCheckPosX, CurrentCheckPosY = Pos_X, Pos_Y
# Checking if the current player has won the game
# this is written for python 2 so it doesn't support nonlocal keyword
while CheckProcess != 'Complete':
if CheckProcess == 'UpperLeft':
if CurrentCheckPosX - 1 >= 0 and CurrentCheckPosY - 1 >= 0 and \
self.board[max(0, CurrentCheckPosX - 1)][max(0, CurrentCheckPosY - 1)] == PlayerMarker:
Combo += 1
CurrentCheckPosX -= 1
CurrentCheckPosY -= 1
else:
CurrentCheckPosX, CurrentCheckPosY = Pos_X, Pos_Y
Combo = 1
CheckProcess = 'Up'
elif CheckProcess == 'Up':
if CurrentCheckPosY - 1 >= 0 and \
self.board[CurrentCheckPosX][max(0, CurrentCheckPosY - 1)] == PlayerMarker:
Combo += 1
CurrentCheckPosY -= 1
else:
CurrentCheckPosX, CurrentCheckPosY = Pos_X, Pos_Y
Combo = 1
CheckProcess = 'UpperRight'
elif CheckProcess == 'UpperRight':
if CurrentCheckPosX + 1 < self.width and CurrentCheckPosY - 1 >= 0 \
and self.board[min(self.width-1, CurrentCheckPosX + 1)][max(0, CurrentCheckPosY - 1)] == PlayerMarker:
Combo += 1
CurrentCheckPosX += 1
CurrentCheckPosY -= 1
else:
CurrentCheckPosX, CurrentCheckPosY = Pos_X, Pos_Y
Combo = 1
CheckProcess = 'Right'
elif CheckProcess == 'Right':
if CurrentCheckPosX + 1 < self.width and \
self.board[min(self.width-1, CurrentCheckPosX + 1)][CurrentCheckPosY] == PlayerMarker:
Combo += 1
CurrentCheckPosX += 1
else:
CurrentCheckPosX, CurrentCheckPosY = Pos_X, Pos_Y
Combo = 1
CheckProcess = 'DownRight'
elif CheckProcess == 'DownRight':
if CurrentCheckPosX + 1 < self.width and \
CurrentCheckPosY + 1 < self.height and \
self.board[min(self.width-1, CurrentCheckPosX + 1)][min(self.height-1, CurrentCheckPosY + 1)] == PlayerMarker:
Combo += 1
CurrentCheckPosX += 1
CurrentCheckPosY += 1
else:
CurrentCheckPosX, CurrentCheckPosY = Pos_X, Pos_Y
Combo = 1
CheckProcess = 'Down'
elif CheckProcess == 'Down':
if CurrentCheckPosY + 1 < self.height and \
self.board[CurrentCheckPosX][min(self.height-1, CurrentCheckPosY + 1)] == PlayerMarker:
Combo += 1
CurrentCheckPosY += 1
else:
CurrentCheckPosX, CurrentCheckPosY = Pos_X, Pos_Y
Combo = 1
CheckProcess = 'DownLeft'
elif CheckProcess == 'DownLeft':
if CurrentCheckPosX - 1 >= 0 and \
CurrentCheckPosY + 1 < self.height and \
self.board[max(0, CurrentCheckPosX - 1)][min(self.height-1, CurrentCheckPosY + 1)] == PlayerMarker:
Combo += 1
CurrentCheckPosX -= 1
CurrentCheckPosY += 1
else:
CurrentCheckPosX, CurrentCheckPosY = Pos_X, Pos_Y
Combo = 1
CheckProcess = 'Left'
elif CheckProcess == 'Left':
if CurrentCheckPosX - 1 >= 0 and \
self.board[max(0, CurrentCheckPosX - 1)][CurrentCheckPosY] == PlayerMarker:
Combo += 1
CurrentCheckPosX -= 1
else:
CheckProcess = 'Complete'
if Combo >= 5:
return True
return False
def CheckMarkPos(self, Pos_X, Pos_Y):
return self.board[Pos_X][Pos_Y] == ' '
def CheckFull(self):
return self._turnCount == (self.width * self.height)
def GetParameters(self):
return {'Width':self.width, 'Height':self.height}
|
10,459 | 6ef8f6ab961db2cd66cdf90f7cd254bce23e9434 | """
pyeventhub - a CLI that sends messages to an Azure Event Hub.
"""
import asyncio
import random
import time
import json
from itertools import count
from datetime import datetime, timedelta
from argparse import ArgumentParser
from azure.eventhub.aio import EventHubProducerClient
from azure.eventhub import EventData
def _create_parser():
"""
Creates the argument parser.
Returns:
The argument parser.
"""
parser = ArgumentParser(description="A CLI that sends messages to an Azure event hub.")
parser.add_argument("--connection-string", type=str, required=True,
help="The Azure event hub connection string")
parser.add_argument("--name", type=str, required=True,
help="The Azure event hub name")
parser.add_argument("--interval", type=int, required=False,
help="The number of seconds to wait between sends. Defaults to 10 seconds.")
parser.add_argument("--what-if", type=bool, required=False,
help="Run the program without sending messages to the Event Hub. "
"The app will log what would have been sent to the Event Hub.")
return parser
def _create_event_data(index):
"""
Creates event data that is sent to the event hub.
Args:
The data's index which is used as the "name" property of the event hub data.
Returns:
A dictionary containing the event hub data.
"""
time_stamp = str(datetime.utcnow() + timedelta(seconds=index))
name = str(index)
metric = random.randint(0, 1000)
return {"timeStamp": time_stamp, "name": name, "metric": metric, "source": "pyeventhub"}
async def _send_message(producer, event_data):
"""
Sends a message to the event hub.
Args:
producer: The EventHubProducerClient.
event_data: A dictionary containing the event data to send.
"""
batch = await producer.create_batch()
batch.add(EventData(_serialize_event_data_as_json(event_data)))
await producer.send_batch(batch)
def _serialize_event_data_as_json(event_data):
"""
Serializes event data to a JSON string.
Args:
event_data: The event data dictionary to serialize.
Returns:
The event data as a JSON string.
"""
return json.dumps(event_data)
def _print_send_status(event_data):
"""
Prints a status after a message has been sent.
Args:
event_data: A dictionary containing the event data that was sent.
"""
message_count = (int(event_data["name"]) - 1000) + 1
if message_count % 5 == 0:
print(f"Sent {message_count} messages.", end="\r")
async def _run(params):
"""
Runs the application.
Args:
params: A dictionary containing the following:
connection_string: The event hub namespace connection string.
name: The event hub name.
interval: The number of seconds to wait between message sends.
what_if: When true does not send the message to the event hub.
Instead it prints what would have been sent.
"""
producer = EventHubProducerClient.from_connection_string(
params["connection_string"],
eventhub_name=params["name"])
async with producer:
for index in count(1000):
event_data = _create_event_data(index)
if params["what_if"]:
print(event_data)
else:
await _send_message(
producer,
event_data)
_print_send_status(event_data)
time.sleep(params["interval"])
def _main():
"""
The entry point of the application.
"""
parser = _create_parser()
args = parser.parse_args()
if args.interval is None:
args.interval = 10
if args.what_if is None:
args.what_if = False
loop = asyncio.get_event_loop()
params = {
"connection_string": args.connection_string,
"name": args.name,
"interval": args.interval,
"what_if": args.what_if
}
loop.run_until_complete(_run(params))
if __name__ == "__main__":
_main()
|
10,460 | 65b4f90ee9b19c1a6406ab99f370a01aa9a8b79c | from itertools import product
def primes(n):
""" Returns a list of primes < n """
sieve = [True] * n
for i in xrange(3,int(n**0.5)+1,2):
if sieve[i]:
sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)
return [2] + [i for i in xrange(3,n,2) if sieve[i]]
P = primes(1000000)
def num (n, base):
nn = 0
for x in n:
nn = nn*base + x
return nn
def check (n, base):
nn=num(n,base)
d = 1
for p in P:
if p*p>=nn: break
if not nn%p:
d=p
break
return d
def solve(t):
print "Case #%d:"%(t+1)
N, J = map(int, raw_input().strip().split())
fine = []
for p in product(range(2), repeat= N-2):
if len(fine)>= J: break
pp = (1,)+p + (1,)
divs = []
for b in xrange(2,11):
d = check(pp, b)
if d==1:
break
divs.append((d))
if len(divs)==9:
fine.append((pp, divs))
for r in fine[:J]:
print ''.join(map(str,r[0])), ' '.join(map(str,r[1]))
pass
def main():
T = input()
for i in xrange(T):
solve(i)
if __name__=="__main__":
main() |
10,461 | 3bcf4f4a4cfcb83fc088f60c119bd4f6c0320d48 |
# -*- coding: utf-8 -*-
import rivescript
import interaction
import re
import diccs
import pickle
import os
def give_card(text,card_options):
buttons=[]
for option in card_options:
buttons.append({
"type":"postback",
"title":option,
"payload":"DEVELOPER_DEFINED_PAYLOAD"
})
message={
"attachment":{
"type":"template",
"payload":{
"template_type":"button",
"text":text,
"buttons":buttons
}
}
}
return message
def give_text(text):
message= {'text':text}
return message
def give_final_offer(semantic):#TONYX AQUI ES DONDE HAY QUE PEDIR AL SERVIDOR LOS DATOS DE LA TARJETA
#POR AHORA SOLO RESPONDE TEXTO
#nota los botones ayudan a poder hacer nuevas peticiones es importante dejarlos
return give_card("Se ofrece seguro con características: "+str(semantic),["confirmo","nueva consulta"])
def open_list(path):
return pickle.load(open(path, "rb" ) )
def save_list(path,semantic):
return pickle.dump(semantic,open(path, "wb"))
def get_response(message):
message=message.lower().replace("?","")
message=message.lower()
bot_ans=interaction.get_bot_subs(message,"chepix")
#semantic representated as an array which has [brand,model,year,gasConverted]
#load semantic
path=os.path.realpath("./string_cache/semantic.pkl")
semantic=open_list(path)
print semantic
#charge semantic diferent values
model = re.search('(?<=md )\w+',bot_ans)#modelos
brand = re.search('(?<=mr )\w+',bot_ans)#marcas
if brand!= None:
semantic[0]=brand.group(0)
if model ==None:
semantic[1]=''
if model != None:
semantic[1]=model.group(0)
semantic[0]=diccs.modelos[semantic[1]][0]
print diccs.modelos[semantic[1]][0]
#hardcode for year
for word in bot_ans.split(" "):
if word.isdigit():
if len(word)==4:
if int(word)>1970 and int(word)<2018:
semantic[2]=word
else:
return give_text("El año debe ser mayor a 1970 y menor a 2017")
if len(word)==2:
if int(word)>70 and int(word)<=99:
semantic[2]='19'+word
if int(word)>=0 and int(word)<18:
semantic[2]='20'+word
else:
return give_text("El año debe ser mayot a 1970 y menor a 2017")
if ("convertido" in message)and ("gas" in message):
if "no" in message:
semantic[3]=False
else:
semantic[3]=True
#save semantic
save_list(path,semantic)
#find action to do depanding on semantic values
if semantic[0]=='':#check for brand
return give_card("Para comprar un seguro, puede empezar seleccionando entre una de éstas marcas",["ford","nissan","chevrolet"])
if semantic[1]=='':#cheack for model
return give_card("Ahora podría decirme sobre algun modelo que le interece, entre los que tengo están",diccs.marcas[semantic[0]][0:3])
if semantic[2]=='':#check for year
return give_card("¿Podría proporcionarme el año de su vehículo? o ¿está dentro de los siguiéntes casos?",['2016''2014','2013'])
if semantic[3]=='':#check for gas converted
return give_card("Perfecto, solo falta saber si su coche fue convertido a gas",["mi coche fue convertido a gas","no fue convertido a gas"])
else:
return give_final_offer(semantic)#Call when now everything about the car
if __name__ == '__main__':
msg=raw_input("Dame un texto: ")
print get_response(msg)
|
10,462 | ea552e4771c4fec4b9992a96bf0996b2f76b46cc | """
Entradas
compra-->int-->c
salidas
Descuento-->flot-->d
"""
c=float(input("digite compra"))
#caja negra
d=(c*0.15)
total=(c-d)
#Salidas
print("el total a pagar es de :"+str(total))
|
10,463 | 6237ba43195b7b69706e5d46b0627423177c12e3 | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Localite)
admin.site.register(User)
admin.site.register(Groupe)
admin.site.register(Membre)
admin.site.register(Alerte)
admin.site.register(SuiviAlerteGroupe)
admin.site.register(SuiviAlertePerso)
admin.site.register(SuiviAlerteLocalite)
admin.site.register(Article)
admin.site.register(Personne)
admin.site.register(Coordonnees)
admin.site.register(PieceJointe)
admin.site.register(Signal)
admin.site.register(Agence)
admin.site.register(AgenceLink)
admin.site.register(SuiviAlerteAgence)
admin.site.register(Bloccage) |
10,464 | 2e039c917d6c8267ad71fd88085122aa7759cd79 | import os
import requests
import re
def main():
print('start download test')
with requests.get('http://tedisfree.github.io/abcdef', stream=True) as r:
if r.status_code!=200:
print('failed to download file (code=%d)' % r.status_code)
if 'Content-Disposition' not in r.headers:
print('cannot find content headers from response')
return
filename = re.findall('filename=(.+)', r.headers['Content-Disposition'])[0]
if len(filename) == 0:
print('cannot find file name from response')
return
filename = filename.replace('"', '')
print('dest name = '+filename)
with open(filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024*1024):
if chunk:
f.write(chunk)
print('download complete. file name='+filename)
if __name__=='__main__':
main()
|
10,465 | f5f21e75a61dab08f6efb88b0e6d47b39617378d | import logging
from urllib.request import urlopen, Request, HTTPError, URLError
import json
logger = logging.getLogger()
class CustomResourceResponse:
def __init__(self, event):
self.event = event
self.response = {
"StackId": event["StackId"],
"RequestId": event["RequestId"],
"LogicalResourceId": event["LogicalResourceId"],
"Status": 'SUCCESS',
}
def _send_response(self, resp_object):
req_data = json.dumps(resp_object).encode('utf-8')
req = Request(
self.event['ResponseURL'],
data=req_data,
headers={'Content-Length': len(req_data),'Content-Type': ''}
)
req.get_method = lambda: 'PUT'
print(f"Responding with\n{json.dumps(resp_object)}")
try:
urlopen(req)
logger.debug("Response to CFN API succeeded, nothing to do here")
except HTTPError as e:
logger.error("Callback to CFN API failed with status %d" % e.code)
logger.error("Response: %s" % e.reason)
except URLError as e:
logger.error("Failed to reach the server - %s" % e.reason)
def success(self, physical_id=None):
"""
Sends success signal back to CloudFormation with given physical_id for CREATE and UPDATE requests
"""
response = self.response
if physical_id is not None:
response["PhysicalResourceId"] = physical_id
elif self.event.get("PhysicalResourceId", None):
response["PhysicalResourceId"] = self.event["PhysicalResourceId"]
else:
response["PhysicalResourceId"] = self.event["LogicalResourceId"]
logger.debug(f"Received {self.event['RequestType']} request with event: {self.event}")
logger.info(f"Responding to {self.event['RequestType']} request with: {response}")
self._send_response(response)
def error(self, message):
"""
Sends error signal back to CloudFormation via S3 signed url
"""
if 'PhysicalResourceId' not in self.response:
self.response['PhysicalResourceId'] = self.response['LogicalResourceId']
self.response['Status'] = 'FAILED'
self.response['Reason'] = message
self._send_response(self.response)
|
10,466 | 5a3e8d74198a054ca3a259ec5826bdb7b40f8672 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QTableWidgetItem
import Estate
import login
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(1003, 945)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.tabWidget = QtWidgets.QTabWidget(Dialog)
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.tab)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label = QtWidgets.QLabel(self.tab)
font = QtGui.QFont()
font.setPointSize(10)
self.label.setFont(font)
self.label.setObjectName("label")
self.verticalLayout_2.addWidget(self.label)
self.My_sobs_table = QtWidgets.QTableWidget(self.tab)
self.My_sobs_table.setObjectName("My_sobs_table")
self.My_sobs_table.setColumnCount(0)
self.My_sobs_table.setRowCount(0)
self.My_sobs_table.horizontalHeader().setStretchLastSection(False)
self.verticalLayout_2.addWidget(self.My_sobs_table)
self.verticalLayout_3.addLayout(self.verticalLayout_2)
self.update_push_button_1 = QtWidgets.QPushButton(self.tab)
self.update_push_button_1.setObjectName("update_push_button_1")
self.verticalLayout_3.addWidget(self.update_push_button_1)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.tab_2)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_2 = QtWidgets.QLabel(self.tab_2)
font = QtGui.QFont()
font.setPointSize(10)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.verticalLayout_4.addWidget(self.label_2)
self.Ptesents_table_1 = QtWidgets.QTableWidget(self.tab_2)
self.Ptesents_table_1.setObjectName("Ptesents_table_1")
self.Ptesents_table_1.setColumnCount(0)
self.Ptesents_table_1.setRowCount(0)
self.verticalLayout_4.addWidget(self.Ptesents_table_1)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 1, 0, 1, 1)
self.Presen_push_button = QtWidgets.QPushButton(self.tab_2)
self.Presen_push_button.setObjectName("Presen_push_button")
self.gridLayout.addWidget(self.Presen_push_button, 1, 3, 1, 1)
self.lineEdit_7 = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit_7.setObjectName("lineEdit_7")
self.gridLayout.addWidget(self.lineEdit_7, 1, 1, 1, 1)
self.lineEdit_6 = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit_6.setObjectName("lineEdit_6")
self.gridLayout.addWidget(self.lineEdit_6, 1, 2, 1, 1)
self.label_21 = QtWidgets.QLabel(self.tab_2)
font = QtGui.QFont()
font.setPointSize(10)
self.label_21.setFont(font)
self.label_21.setObjectName("label_21")
self.gridLayout.addWidget(self.label_21, 0, 1, 1, 1)
self.label_22 = QtWidgets.QLabel(self.tab_2)
font = QtGui.QFont()
font.setPointSize(10)
self.label_22.setFont(font)
self.label_22.setObjectName("label_22")
self.gridLayout.addWidget(self.label_22, 0, 2, 1, 1)
self.verticalLayout_4.addLayout(self.gridLayout)
self.verticalLayout_6.addLayout(self.verticalLayout_4)
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.label_3 = QtWidgets.QLabel(self.tab_2)
font = QtGui.QFont()
font.setPointSize(10)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.verticalLayout_5.addWidget(self.label_3)
self.tableWidget = QtWidgets.QTableWidget(self.tab_2)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
self.verticalLayout_5.addWidget(self.tableWidget)
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem1, 0, 0, 1, 1)
self.chansel_present_push_button = QtWidgets.QPushButton(self.tab_2)
self.chansel_present_push_button.setObjectName("chansel_present_push_button")
self.gridLayout_2.addWidget(self.chansel_present_push_button, 0, 1, 1, 1)
self.verticalLayout_5.addLayout(self.gridLayout_2)
self.label_23 = QtWidgets.QLabel(self.tab_2)
font = QtGui.QFont()
font.setPointSize(10)
self.label_23.setFont(font)
self.label_23.setObjectName("label_23")
self.verticalLayout_5.addWidget(self.label_23)
self.tableWidget_14 = QtWidgets.QTableWidget(self.tab_2)
self.tableWidget_14.setObjectName("tableWidget_14")
self.tableWidget_14.setColumnCount(0)
self.tableWidget_14.setRowCount(0)
self.verticalLayout_5.addWidget(self.tableWidget_14)
self.verticalLayout_6.addLayout(self.verticalLayout_5)
self.gridLayout_3 = QtWidgets.QGridLayout()
self.gridLayout_3.setObjectName("gridLayout_3")
self.pushButton = QtWidgets.QPushButton(self.tab_2)
self.pushButton.setObjectName("pushButton")
self.gridLayout_3.addWidget(self.pushButton, 0, 2, 1, 1)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_3.addItem(spacerItem2, 0, 0, 1, 1)
self.pushButton_20 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_20.setObjectName("pushButton_20")
self.gridLayout_3.addWidget(self.pushButton_20, 0, 1, 1, 1)
self.verticalLayout_6.addLayout(self.gridLayout_3)
self.tabWidget.addTab(self.tab_2, "")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.verticalLayout_12 = QtWidgets.QVBoxLayout(self.tab_3)
self.verticalLayout_12.setObjectName("verticalLayout_12")
self.verticalLayout_7 = QtWidgets.QVBoxLayout()
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.label_4 = QtWidgets.QLabel(self.tab_3)
font = QtGui.QFont()
font.setPointSize(10)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.verticalLayout_7.addWidget(self.label_4)
self.tableWidget_2 = QtWidgets.QTableWidget(self.tab_3)
self.tableWidget_2.setObjectName("tableWidget_2")
self.tableWidget_2.setColumnCount(0)
self.tableWidget_2.setRowCount(0)
self.verticalLayout_7.addWidget(self.tableWidget_2)
self.gridLayout_4 = QtWidgets.QGridLayout()
self.gridLayout_4.setObjectName("gridLayout_4")
self.verticalLayout_9 = QtWidgets.QVBoxLayout()
self.verticalLayout_9.setObjectName("verticalLayout_9")
self.verticalLayout_8 = QtWidgets.QVBoxLayout()
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.label_5 = QtWidgets.QLabel(self.tab_3)
font = QtGui.QFont()
font.setPointSize(10)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.verticalLayout_8.addWidget(self.label_5)
self.lineEdit = QtWidgets.QLineEdit(self.tab_3)
self.lineEdit.setObjectName("lineEdit")
self.verticalLayout_8.addWidget(self.lineEdit)
self.verticalLayout_9.addLayout(self.verticalLayout_8)
self.pushButton_2 = QtWidgets.QPushButton(self.tab_3)
self.pushButton_2.setObjectName("pushButton_2")
self.verticalLayout_9.addWidget(self.pushButton_2)
self.gridLayout_4.addLayout(self.verticalLayout_9, 0, 4, 1, 1)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_4.addItem(spacerItem3, 0, 0, 1, 1)
self.verticalLayout_30 = QtWidgets.QVBoxLayout()
self.verticalLayout_30.setObjectName("verticalLayout_30")
self.gridLayout_4.addLayout(self.verticalLayout_30, 0, 2, 1, 1)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_4.addItem(spacerItem4, 0, 1, 1, 1)
self.verticalLayout_7.addLayout(self.gridLayout_4)
self.verticalLayout_12.addLayout(self.verticalLayout_7)
self.verticalLayout_10 = QtWidgets.QVBoxLayout()
self.verticalLayout_10.setObjectName("verticalLayout_10")
self.label_6 = QtWidgets.QLabel(self.tab_3)
font = QtGui.QFont()
font.setPointSize(10)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.verticalLayout_10.addWidget(self.label_6)
self.tableWidget_3 = QtWidgets.QTableWidget(self.tab_3)
self.tableWidget_3.setObjectName("tableWidget_3")
self.tableWidget_3.setColumnCount(0)
self.tableWidget_3.setRowCount(0)
self.verticalLayout_10.addWidget(self.tableWidget_3)
self.gridLayout_5 = QtWidgets.QGridLayout()
self.gridLayout_5.setObjectName("gridLayout_5")
self.pushButton_3 = QtWidgets.QPushButton(self.tab_3)
self.pushButton_3.setObjectName("pushButton_3")
self.gridLayout_5.addWidget(self.pushButton_3, 0, 1, 1, 1)
spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_5.addItem(spacerItem5, 0, 0, 1, 1)
self.verticalLayout_10.addLayout(self.gridLayout_5)
self.verticalLayout_12.addLayout(self.verticalLayout_10)
self.gridLayout_7 = QtWidgets.QGridLayout()
self.gridLayout_7.setObjectName("gridLayout_7")
spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_7.addItem(spacerItem6, 0, 0, 1, 1)
self.pushButton_5 = QtWidgets.QPushButton(self.tab_3)
self.pushButton_5.setObjectName("pushButton_5")
self.gridLayout_7.addWidget(self.pushButton_5, 0, 1, 1, 1)
self.verticalLayout_12.addLayout(self.gridLayout_7)
self.tabWidget.addTab(self.tab_3, "")
self.tab_4 = QtWidgets.QWidget()
self.tab_4.setObjectName("tab_4")
self.verticalLayout_11 = QtWidgets.QVBoxLayout(self.tab_4)
self.verticalLayout_11.setObjectName("verticalLayout_11")
self.gridLayout_9 = QtWidgets.QGridLayout()
self.gridLayout_9.setObjectName("gridLayout_9")
self.verticalLayout_13 = QtWidgets.QVBoxLayout()
self.verticalLayout_13.setObjectName("verticalLayout_13")
self.label_8 = QtWidgets.QLabel(self.tab_4)
font = QtGui.QFont()
font.setPointSize(10)
self.label_8.setFont(font)
self.label_8.setObjectName("label_8")
self.verticalLayout_13.addWidget(self.label_8)
self.tableWidget_5 = QtWidgets.QTableWidget(self.tab_4)
self.tableWidget_5.setObjectName("tableWidget_5")
self.tableWidget_5.setColumnCount(0)
self.tableWidget_5.setRowCount(0)
self.verticalLayout_13.addWidget(self.tableWidget_5)
self.gridLayout_9.addLayout(self.verticalLayout_13, 0, 0, 1, 1)
self.gridLayout_8 = QtWidgets.QGridLayout()
self.gridLayout_8.setObjectName("gridLayout_8")
self.verticalLayout_14 = QtWidgets.QVBoxLayout()
self.verticalLayout_14.setObjectName("verticalLayout_14")
self.label_10 = QtWidgets.QLabel(self.tab_4)
font = QtGui.QFont()
font.setPointSize(10)
self.label_10.setFont(font)
self.label_10.setObjectName("label_10")
self.verticalLayout_14.addWidget(self.label_10)
self.lineEdit_3 = QtWidgets.QLineEdit(self.tab_4)
self.lineEdit_3.setObjectName("lineEdit_3")
self.verticalLayout_14.addWidget(self.lineEdit_3)
self.gridLayout_8.addLayout(self.verticalLayout_14, 0, 1, 1, 1)
self.verticalLayout_15 = QtWidgets.QVBoxLayout()
self.verticalLayout_15.setObjectName("verticalLayout_15")
self.label_9 = QtWidgets.QLabel(self.tab_4)
font = QtGui.QFont()
font.setPointSize(10)
self.label_9.setFont(font)
self.label_9.setObjectName("label_9")
self.verticalLayout_15.addWidget(self.label_9)
self.lineEdit_2 = QtWidgets.QLineEdit(self.tab_4)
self.lineEdit_2.setObjectName("lineEdit_2")
self.verticalLayout_15.addWidget(self.lineEdit_2)
self.gridLayout_8.addLayout(self.verticalLayout_15, 0, 0, 1, 1)
self.pushButton_6 = QtWidgets.QPushButton(self.tab_4)
self.pushButton_6.setObjectName("pushButton_6")
self.gridLayout_8.addWidget(self.pushButton_6, 0, 2, 1, 1)
self.gridLayout_9.addLayout(self.gridLayout_8, 1, 0, 1, 1)
self.verticalLayout_11.addLayout(self.gridLayout_9)
self.verticalLayout_16 = QtWidgets.QVBoxLayout()
self.verticalLayout_16.setObjectName("verticalLayout_16")
self.label_11 = QtWidgets.QLabel(self.tab_4)
font = QtGui.QFont()
font.setPointSize(10)
self.label_11.setFont(font)
self.label_11.setObjectName("label_11")
self.verticalLayout_16.addWidget(self.label_11)
self.tableWidget_6 = QtWidgets.QTableWidget(self.tab_4)
self.tableWidget_6.setObjectName("tableWidget_6")
self.tableWidget_6.setColumnCount(0)
self.tableWidget_6.setRowCount(0)
self.verticalLayout_16.addWidget(self.tableWidget_6)
self.gridLayout_10 = QtWidgets.QGridLayout()
self.gridLayout_10.setObjectName("gridLayout_10")
self.pushButton_7 = QtWidgets.QPushButton(self.tab_4)
self.pushButton_7.setObjectName("pushButton_7")
self.gridLayout_10.addWidget(self.pushButton_7, 0, 1, 1, 1)
spacerItem7 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_10.addItem(spacerItem7, 0, 0, 1, 1)
self.verticalLayout_16.addLayout(self.gridLayout_10)
self.verticalLayout_11.addLayout(self.verticalLayout_16)
self.verticalLayout_17 = QtWidgets.QVBoxLayout()
self.verticalLayout_17.setObjectName("verticalLayout_17")
self.label_12 = QtWidgets.QLabel(self.tab_4)
font = QtGui.QFont()
font.setPointSize(10)
self.label_12.setFont(font)
self.label_12.setObjectName("label_12")
self.verticalLayout_17.addWidget(self.label_12)
self.tableWidget_7 = QtWidgets.QTableWidget(self.tab_4)
self.tableWidget_7.setObjectName("tableWidget_7")
self.tableWidget_7.setColumnCount(0)
self.tableWidget_7.setRowCount(0)
self.verticalLayout_17.addWidget(self.tableWidget_7)
self.gridLayout_6 = QtWidgets.QGridLayout()
self.gridLayout_6.setObjectName("gridLayout_6")
self.pushButton_8 = QtWidgets.QPushButton(self.tab_4)
self.pushButton_8.setObjectName("pushButton_8")
self.gridLayout_6.addWidget(self.pushButton_8, 0, 1, 1, 1)
spacerItem8 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_6.addItem(spacerItem8, 0, 0, 1, 1)
self.verticalLayout_17.addLayout(self.gridLayout_6)
self.verticalLayout_11.addLayout(self.verticalLayout_17)
self.gridLayout_11 = QtWidgets.QGridLayout()
self.gridLayout_11.setObjectName("gridLayout_11")
self.pushButton_9 = QtWidgets.QPushButton(self.tab_4)
self.pushButton_9.setObjectName("pushButton_9")
self.gridLayout_11.addWidget(self.pushButton_9, 0, 1, 1, 1)
spacerItem9 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_11.addItem(spacerItem9, 0, 0, 1, 1)
self.verticalLayout_11.addLayout(self.gridLayout_11)
self.tabWidget.addTab(self.tab_4, "")
self.tab_5 = QtWidgets.QWidget()
self.tab_5.setObjectName("tab_5")
self.verticalLayout_24 = QtWidgets.QVBoxLayout(self.tab_5)
self.verticalLayout_24.setObjectName("verticalLayout_24")
self.verticalLayout_18 = QtWidgets.QVBoxLayout()
self.verticalLayout_18.setObjectName("verticalLayout_18")
self.label_7 = QtWidgets.QLabel(self.tab_5)
font = QtGui.QFont()
font.setPointSize(10)
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.verticalLayout_18.addWidget(self.label_7)
self.tableWidget_4 = QtWidgets.QTableWidget(self.tab_5)
self.tableWidget_4.setObjectName("tableWidget_4")
self.tableWidget_4.setColumnCount(0)
self.tableWidget_4.setRowCount(0)
self.verticalLayout_18.addWidget(self.tableWidget_4)
self.gridLayout_12 = QtWidgets.QGridLayout()
self.gridLayout_12.setObjectName("gridLayout_12")
self.verticalLayout_20 = QtWidgets.QVBoxLayout()
self.verticalLayout_20.setObjectName("verticalLayout_20")
self.label_14 = QtWidgets.QLabel(self.tab_5)
font = QtGui.QFont()
font.setPointSize(10)
self.label_14.setFont(font)
self.label_14.setObjectName("label_14")
self.verticalLayout_20.addWidget(self.label_14)
self.lineEdit_5 = QtWidgets.QLineEdit(self.tab_5)
self.lineEdit_5.setObjectName("lineEdit_5")
self.verticalLayout_20.addWidget(self.lineEdit_5)
self.gridLayout_12.addLayout(self.verticalLayout_20, 0, 1, 1, 1)
self.verticalLayout_19 = QtWidgets.QVBoxLayout()
self.verticalLayout_19.setObjectName("verticalLayout_19")
self.label_13 = QtWidgets.QLabel(self.tab_5)
font = QtGui.QFont()
font.setPointSize(10)
self.label_13.setFont(font)
self.label_13.setObjectName("label_13")
self.verticalLayout_19.addWidget(self.label_13)
self.lineEdit_4 = QtWidgets.QLineEdit(self.tab_5)
self.lineEdit_4.setObjectName("lineEdit_4")
self.verticalLayout_19.addWidget(self.lineEdit_4)
self.gridLayout_12.addLayout(self.verticalLayout_19, 0, 0, 1, 1)
self.pushButton_4 = QtWidgets.QPushButton(self.tab_5)
self.pushButton_4.setObjectName("pushButton_4")
self.gridLayout_12.addWidget(self.pushButton_4, 0, 2, 1, 1)
self.verticalLayout_18.addLayout(self.gridLayout_12)
self.verticalLayout_24.addLayout(self.verticalLayout_18)
self.verticalLayout_21 = QtWidgets.QVBoxLayout()
self.verticalLayout_21.setObjectName("verticalLayout_21")
self.label_15 = QtWidgets.QLabel(self.tab_5)
font = QtGui.QFont()
font.setPointSize(10)
self.label_15.setFont(font)
self.label_15.setObjectName("label_15")
self.verticalLayout_21.addWidget(self.label_15)
self.tableWidget_8 = QtWidgets.QTableWidget(self.tab_5)
self.tableWidget_8.setObjectName("tableWidget_8")
self.tableWidget_8.setColumnCount(0)
self.tableWidget_8.setRowCount(0)
self.verticalLayout_21.addWidget(self.tableWidget_8)
self.gridLayout_13 = QtWidgets.QGridLayout()
self.gridLayout_13.setObjectName("gridLayout_13")
self.pushButton_10 = QtWidgets.QPushButton(self.tab_5)
self.pushButton_10.setObjectName("pushButton_10")
self.gridLayout_13.addWidget(self.pushButton_10, 0, 1, 1, 1)
spacerItem10 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_13.addItem(spacerItem10, 0, 0, 1, 1)
self.verticalLayout_21.addLayout(self.gridLayout_13)
self.verticalLayout_24.addLayout(self.verticalLayout_21)
self.verticalLayout_22 = QtWidgets.QVBoxLayout()
self.verticalLayout_22.setObjectName("verticalLayout_22")
self.label_16 = QtWidgets.QLabel(self.tab_5)
font = QtGui.QFont()
font.setPointSize(10)
self.label_16.setFont(font)
self.label_16.setObjectName("label_16")
self.verticalLayout_22.addWidget(self.label_16)
self.tableWidget_9 = QtWidgets.QTableWidget(self.tab_5)
self.tableWidget_9.setObjectName("tableWidget_9")
self.tableWidget_9.setColumnCount(0)
self.tableWidget_9.setRowCount(0)
self.verticalLayout_22.addWidget(self.tableWidget_9)
self.gridLayout_14 = QtWidgets.QGridLayout()
self.gridLayout_14.setObjectName("gridLayout_14")
self.pushButton_12 = QtWidgets.QPushButton(self.tab_5)
self.pushButton_12.setObjectName("pushButton_12")
self.gridLayout_14.addWidget(self.pushButton_12, 0, 2, 1, 1)
self.pushButton_11 = QtWidgets.QPushButton(self.tab_5)
self.pushButton_11.setObjectName("pushButton_11")
self.gridLayout_14.addWidget(self.pushButton_11, 0, 1, 1, 1)
spacerItem11 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_14.addItem(spacerItem11, 0, 0, 1, 1)
self.verticalLayout_22.addLayout(self.gridLayout_14)
self.verticalLayout_24.addLayout(self.verticalLayout_22)
self.verticalLayout_23 = QtWidgets.QVBoxLayout()
self.verticalLayout_23.setObjectName("verticalLayout_23")
self.label_17 = QtWidgets.QLabel(self.tab_5)
font = QtGui.QFont()
font.setPointSize(10)
self.label_17.setFont(font)
self.label_17.setObjectName("label_17")
self.verticalLayout_23.addWidget(self.label_17)
self.tableWidget_10 = QtWidgets.QTableWidget(self.tab_5)
self.tableWidget_10.setObjectName("tableWidget_10")
self.tableWidget_10.setColumnCount(0)
self.tableWidget_10.setRowCount(0)
self.verticalLayout_23.addWidget(self.tableWidget_10)
self.gridLayout_15 = QtWidgets.QGridLayout()
self.gridLayout_15.setObjectName("gridLayout_15")
spacerItem12 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_15.addItem(spacerItem12, 0, 0, 1, 1)
self.pushButton_14 = QtWidgets.QPushButton(self.tab_5)
self.pushButton_14.setObjectName("pushButton_14")
self.gridLayout_15.addWidget(self.pushButton_14, 0, 1, 1, 1)
self.verticalLayout_23.addLayout(self.gridLayout_15)
self.verticalLayout_24.addLayout(self.verticalLayout_23)
self.gridLayout_16 = QtWidgets.QGridLayout()
self.gridLayout_16.setObjectName("gridLayout_16")
self.pushButton_13 = QtWidgets.QPushButton(self.tab_5)
self.pushButton_13.setObjectName("pushButton_13")
self.gridLayout_16.addWidget(self.pushButton_13, 0, 1, 1, 1)
spacerItem13 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_16.addItem(spacerItem13, 0, 0, 1, 1)
self.verticalLayout_24.addLayout(self.gridLayout_16)
self.tabWidget.addTab(self.tab_5, "")
self.tab_6 = QtWidgets.QWidget()
self.tab_6.setObjectName("tab_6")
self.verticalLayout_28 = QtWidgets.QVBoxLayout(self.tab_6)
self.verticalLayout_28.setObjectName("verticalLayout_28")
self.verticalLayout_25 = QtWidgets.QVBoxLayout()
self.verticalLayout_25.setObjectName("verticalLayout_25")
self.label_18 = QtWidgets.QLabel(self.tab_6)
font = QtGui.QFont()
font.setPointSize(10)
self.label_18.setFont(font)
self.label_18.setObjectName("label_18")
self.verticalLayout_25.addWidget(self.label_18)
self.tableWidget_11 = QtWidgets.QTableWidget(self.tab_6)
self.tableWidget_11.setObjectName("tableWidget_11")
self.tableWidget_11.setColumnCount(0)
self.tableWidget_11.setRowCount(0)
self.verticalLayout_25.addWidget(self.tableWidget_11)
self.gridLayout_17 = QtWidgets.QGridLayout()
self.gridLayout_17.setObjectName("gridLayout_17")
self.pushButton_15 = QtWidgets.QPushButton(self.tab_6)
self.pushButton_15.setObjectName("pushButton_15")
self.gridLayout_17.addWidget(self.pushButton_15, 0, 1, 1, 1)
spacerItem14 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_17.addItem(spacerItem14, 0, 0, 1, 1)
self.verticalLayout_25.addLayout(self.gridLayout_17)
self.verticalLayout_28.addLayout(self.verticalLayout_25)
self.verticalLayout_26 = QtWidgets.QVBoxLayout()
self.verticalLayout_26.setObjectName("verticalLayout_26")
self.label_19 = QtWidgets.QLabel(self.tab_6)
font = QtGui.QFont()
font.setPointSize(10)
self.label_19.setFont(font)
self.label_19.setObjectName("label_19")
self.verticalLayout_26.addWidget(self.label_19)
self.tableWidget_12 = QtWidgets.QTableWidget(self.tab_6)
self.tableWidget_12.setObjectName("tableWidget_12")
self.tableWidget_12.setColumnCount(0)
self.tableWidget_12.setRowCount(0)
self.verticalLayout_26.addWidget(self.tableWidget_12)
self.gridLayout_18 = QtWidgets.QGridLayout()
self.gridLayout_18.setObjectName("gridLayout_18")
self.pushButton_16 = QtWidgets.QPushButton(self.tab_6)
self.pushButton_16.setObjectName("pushButton_16")
self.gridLayout_18.addWidget(self.pushButton_16, 0, 1, 1, 1)
spacerItem15 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_18.addItem(spacerItem15, 0, 0, 1, 1)
self.verticalLayout_26.addLayout(self.gridLayout_18)
self.verticalLayout_28.addLayout(self.verticalLayout_26)
self.verticalLayout_27 = QtWidgets.QVBoxLayout()
self.verticalLayout_27.setObjectName("verticalLayout_27")
self.label_20 = QtWidgets.QLabel(self.tab_6)
font = QtGui.QFont()
font.setPointSize(10)
self.label_20.setFont(font)
self.label_20.setObjectName("label_20")
self.verticalLayout_27.addWidget(self.label_20)
self.tableWidget_13 = QtWidgets.QTableWidget(self.tab_6)
self.tableWidget_13.setObjectName("tableWidget_13")
self.tableWidget_13.setColumnCount(0)
self.tableWidget_13.setRowCount(0)
self.verticalLayout_27.addWidget(self.tableWidget_13)
self.gridLayout_19 = QtWidgets.QGridLayout()
self.gridLayout_19.setObjectName("gridLayout_19")
self.pushButton_17 = QtWidgets.QPushButton(self.tab_6)
self.pushButton_17.setObjectName("pushButton_17")
self.gridLayout_19.addWidget(self.pushButton_17, 0, 2, 1, 1)
spacerItem16 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_19.addItem(spacerItem16, 0, 0, 1, 1)
self.pushButton_18 = QtWidgets.QPushButton(self.tab_6)
self.pushButton_18.setObjectName("pushButton_18")
self.gridLayout_19.addWidget(self.pushButton_18, 0, 1, 1, 1)
self.verticalLayout_27.addLayout(self.gridLayout_19)
self.verticalLayout_28.addLayout(self.verticalLayout_27)
self.gridLayout_20 = QtWidgets.QGridLayout()
self.gridLayout_20.setObjectName("gridLayout_20")
self.pushButton_19 = QtWidgets.QPushButton(self.tab_6)
self.pushButton_19.setObjectName("pushButton_19")
self.gridLayout_20.addWidget(self.pushButton_19, 0, 1, 1, 1)
spacerItem17 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_20.addItem(spacerItem17, 0, 0, 1, 1)
self.verticalLayout_28.addLayout(self.gridLayout_20)
self.tabWidget.addTab(self.tab_6, "")
self.tab_7 = QtWidgets.QWidget()
self.tab_7.setObjectName("tab_7")
self.widget = QtWidgets.QWidget(self.tab_7)
self.widget.setGeometry(QtCore.QRect(380, 330, 137, 213))
self.widget.setObjectName("widget")
self.verticalLayout_37 = QtWidgets.QVBoxLayout(self.widget)
self.verticalLayout_37.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_37.setObjectName("verticalLayout_37")
self.verticalLayout_36 = QtWidgets.QVBoxLayout()
self.verticalLayout_36.setObjectName("verticalLayout_36")
self.label_24 = QtWidgets.QLabel(self.widget)
self.label_24.setObjectName("label_24")
self.verticalLayout_36.addWidget(self.label_24)
self.lineEdit_8 = QtWidgets.QLineEdit(self.widget)
self.lineEdit_8.setObjectName("lineEdit_8")
self.verticalLayout_36.addWidget(self.lineEdit_8)
self.verticalLayout_37.addLayout(self.verticalLayout_36)
self.verticalLayout_33 = QtWidgets.QVBoxLayout()
self.verticalLayout_33.setObjectName("verticalLayout_33")
self.label_25 = QtWidgets.QLabel(self.widget)
self.label_25.setObjectName("label_25")
self.verticalLayout_33.addWidget(self.label_25)
self.lineEdit_9 = QtWidgets.QLineEdit(self.widget)
self.lineEdit_9.setObjectName("lineEdit_9")
self.verticalLayout_33.addWidget(self.lineEdit_9)
self.verticalLayout_37.addLayout(self.verticalLayout_33)
self.verticalLayout_34 = QtWidgets.QVBoxLayout()
self.verticalLayout_34.setObjectName("verticalLayout_34")
self.label_26 = QtWidgets.QLabel(self.widget)
self.label_26.setObjectName("label_26")
self.verticalLayout_34.addWidget(self.label_26)
self.lineEdit_10 = QtWidgets.QLineEdit(self.widget)
self.lineEdit_10.setObjectName("lineEdit_10")
self.verticalLayout_34.addWidget(self.lineEdit_10)
self.verticalLayout_37.addLayout(self.verticalLayout_34)
self.verticalLayout_35 = QtWidgets.QVBoxLayout()
self.verticalLayout_35.setObjectName("verticalLayout_35")
self.label_27 = QtWidgets.QLabel(self.widget)
self.label_27.setObjectName("label_27")
self.verticalLayout_35.addWidget(self.label_27)
self.lineEdit_11 = QtWidgets.QLineEdit(self.widget)
self.lineEdit_11.setObjectName("lineEdit_11")
self.verticalLayout_35.addWidget(self.lineEdit_11)
self.pushButton_21 = QtWidgets.QPushButton(self.widget)
self.pushButton_21.setObjectName("pushButton_21")
self.verticalLayout_35.addWidget(self.pushButton_21)
self.verticalLayout_37.addLayout(self.verticalLayout_35)
self.tabWidget.addTab(self.tab_7, "")
self.verticalLayout.addWidget(self.tabWidget)
self.retranslateUi(Dialog)
self.tabWidget.setCurrentIndex(6)
QtCore.QMetaObject.connectSlotsByName(Dialog)
# Код начинается отсюда
#Основные Переменные
self.out_pass_file = open("pass.txt", "r")
self.out_login_file = open("login.txt", "r")
self.Main_Login = self.out_login_file.readlines()
self.Main_Password = self.out_pass_file.readlines()
self.STATION = None
if self.Main_Login[0] == "0xAc771378BB6c2b8878fbF75F80880cbdDefd1B1e" and self.Main_Password[0]=="123456789":
self.STATION=True
else:
self.STATION = False
if self.STATION==True:
print("Welcom Aministrator")
else:
self.lineEdit_8.setDisabled(True)
self.lineEdit_9.setDisabled(True)
self.lineEdit_10.setDisabled(True)
self.lineEdit_11.setDisabled(True)
self.pushButton_21.setDisabled(True)
self.present_time = 0
self.present_addres = 0
#Основные Переменные
self.out_pass_file.close()
self.out_login_file.close()
print("Login", self.Main_Login) # DEBUG
print("Password", self.Main_Password) # DEBUG
#ИНИЦИАЛИЗАЦИЯ ТАБЛИЦ
self.My_sobs_table.setRowCount(7)# Строки
self.My_sobs_table.setColumnCount(7) #Столбцы
self.Ptesents_table_1.setRowCount(5)
self.Ptesents_table_1.setColumnCount(4)
self.Ptesents_table_1.setHorizontalHeaderLabels(["Id","Адрес собственности","Площадь","Полезная площадь"])
self.My_sobs_table.setHorizontalHeaderLabels(["Id","Адрес собственности","Общая площадь","Полезная площадь","Подарок","Продажа","Залог"])
self.tableWidget.setColumnCount(6)
self.tableWidget.setRowCount(7)
self.tableWidget.setHorizontalHeaderLabels(["Id","Для кого","Адрес","Площадь","Полезная Площадь","Срок"])
self.tableWidget_5.setColumnCount(4)
self.tableWidget_5.setRowCount(7)
self.tableWidget_5.setHorizontalHeaderLabels(["Id","Информация","Площадь","Полезная Площадь"])
self.tableWidget_6.setColumnCount(6)
self.tableWidget_6.setRowCount(7)
self.tableWidget_6.setHorizontalHeaderLabels(["Id","Ифнормация","Площадь","Полезная площадь","Цена","Срок"])
self.tableWidget_7.setColumnCount(8)
self.tableWidget_7.setRowCount(7)
self.tableWidget_7.setHorizontalHeaderLabels(["Id","Ифнормация","Площадь","Полезная площадь","Наменальная цена","Срок","Покупатель","Предложенная цена"])
self.tableWidget_2.setColumnCount(6)
self.tableWidget_2.setRowCount(7)
self.tableWidget_2.setHorizontalHeaderLabels(["Id","Информация","Площадь","Полезная площадь","Цена","Срок"])
self.tableWidget_3.setColumnCount(8)
self.tableWidget_3.setRowCount(7)
self.tableWidget_3.setHorizontalHeaderLabels(["Id","Информация","Площадь","Полезная площадь","Наминальная цена","Срок","Покупатель","Предложенная цена"])
self.tableWidget_4.setColumnCount(4)
self.tableWidget_4.setRowCount(7)
self.tableWidget_4.setHorizontalHeaderLabels(["Id","Информация","Площадь","Полезная площадь"])
self.tableWidget_8.setColumnCount(6)
self.tableWidget_8.setRowCount(7)
self.tableWidget_8.setHorizontalHeaderLabels(["Id","Информация","Площадь","Полезная площадь","Деньги","Длительность"])
self.tableWidget_9.setColumnCount(7)
self.tableWidget_9.setRowCount(7)
self.tableWidget_9.setHorizontalHeaderLabels(["Id","Информация","Площадь","Полезная площадь","Предлогает залог","Деньги","Длительность"])
self.tableWidget_10.setColumnCount(7)
self.tableWidget_10.setRowCount(7)
self.tableWidget_10.setHorizontalHeaderLabels(["Id","Информация","Площадь","Полезная площадь","Залогодатель", "Сумма","Срок"])
self.tableWidget_11.setRowCount(7)
self.tableWidget_11.setColumnCount(7)
self.tableWidget_11.setHorizontalHeaderLabels(["Id","Кому выдать","Площадь","Полезная площадь","Информация","Сумма" ,"Длительность"])
self.tableWidget_12.setRowCount(7)
self.tableWidget_12.setColumnCount(7)
self.tableWidget_12.setHorizontalHeaderLabels(["Id","Кому выдать","Площадь","Полезная площадь","Информация","Сумма" ,"Длительность"])
self.tableWidget_13.setColumnCount(7)
self.tableWidget_13.setRowCount(7)
self.tableWidget_13.setHorizontalHeaderLabels(["Id", "Информация", "Площадь", "Полезная площадь", "Залогодатель", "Сумма", "Срок"])
self.tableWidget_14.setColumnCount(6)
self.tableWidget_14.setRowCount(7)
self.tableWidget_14.setHorizontalHeaderLabels(["Id","От кого","Информация","Площадь","Полезная площадь","Срок"])
#КОНЕЦ ИНИЦИАЛИЗАЦИЙ ТАБЛИЦ
#МАТРИЦЫ
self.Matrix_For_My_sobs_table = [[]]
self.Matrix_For_Presents = [[]]
self.Matrix_For_Mine_Presents = [[]]
self.Matrix_For_Can_Be_Slea = [[]]
self.Matix_For_My_Sales = [[]]
self.Matrix_For_Who_Wont_To_By = [[]]
self.Matrix_For_Choose_to_by = [[]]
self.Matrix_For_I_By =[[]]
self.Matrix_For_Zalog_My_Sobstvennost = [[]]
self.Matrix_For_My_Zalogs = [[]]
self.Matrix_For_Get_Zalog = [[]]
self.Matrix_For_Getting_For_Me_Zalogs = [[]]
self.Matrix_For_Gettin_Other_Zalog =[[]]
self.Matrix_For_Pred_My_Zalogs =[[]]
self.Matrix_For_Get_My_Zalogs =[[]]
self.Matrix_For_My_Presents = [[]]
#КОНЕЦ МАТРИЦ
#СИГНАЛЫ
self.update_push_button_1.clicked.connect(self.get_my_estates)
self.pushButton.clicked.connect(self.get_can_my_presents)
self.Presen_push_button.clicked.connect(self.create_present)
self.pushButton_9.clicked.connect(self.udpate_tables_slae)
self.pushButton_5.clicked.connect(self.update_table_choose)
self.pushButton_13.clicked.connect(self.zalog_update)
self.pushButton_19.clicked.connect(self.update_get_zalog)
self.pushButton_2.clicked.connect(self.take_by)
self.pushButton_6.clicked.connect(self.create_sole_slot)
self.chansel_present_push_button.clicked.connect(self.chansle_1)
self.pushButton_4.clicked.connect(self.create_playedge_slot)
self.pushButton_21.clicked.connect(self.Admin_Slot)
#КОНЕЦ СИГНАЛОВ
self.Estate_main_object = Estate.Estate()
Station = self.Estate_main_object.auth(self.Main_Login[0], self.Main_Password[0])
print("Login Station", Station) # DEBUG
def Admin_Slot(self):
self.userAdrr = self.lineEdit_8.text()
self.FisicAddress = self.lineEdit_9.text()
self.Square = self.lineEdit_10.text()
self.UsefulSquare = self.lineEdit_11.text()
self.Estate_main_object.create_estate(self.userAdrr,self.FisicAddress,self.Square,self.UsefulSquare)
def get_my_estates(self):
self.My_sobs_table.clear()
self.My_sobs_table.setHorizontalHeaderLabels(
["Id", "Адрес собственности", "Общая площадь", "Полезная площадь", "Подарок", "Продажа", "Залог"])
self.Matrix_For_My_sobs_table = self.Estate_main_object.my_estates()
if self.Matrix_For_My_sobs_table == []:
print("Matrix_For_My_sobs_table is emty ")
else:
print(self.Matrix_For_My_sobs_table) # DEBUG
self.My_sobs_table.setRowCount(len(self.Matrix_For_My_sobs_table)) # Количество столбцов
self.My_sobs_table.setColumnCount(len(self.Matrix_For_My_sobs_table[0])) # Количество строк
for i in range(len(self.Matrix_For_My_sobs_table)):
for j in range(len(self.Matrix_For_My_sobs_table[0])):
self.My_sobs_table.setItem(i, j, QTableWidgetItem(self.Matrix_For_My_sobs_table[i][j]))
def get_can_my_presents(self):
self.Ptesents_table_1.clear()
self.Ptesents_table_1.setHorizontalHeaderLabels(
["id", "Адрес собственности", "Площадь", "Полезная площадь"])
self.Matrix_For_Presents = self.Estate_main_object.can_present()
if self.Matrix_For_Presents == []:
print("Matrix For Presents Is Emty")
else:
print(self.Matrix_For_Presents) # DEBUG
self.Ptesents_table_1.setRowCount(len(self.Matrix_For_Presents))
self.Ptesents_table_1.setColumnCount(len(self.Matrix_For_Presents[0]))
for i in range(len(self.Matrix_For_Presents)):
for j in range(len(self.Matrix_For_Presents[0])):
self.Ptesents_table_1.setItem(i, j, QTableWidgetItem(self.Matrix_For_Presents[i][j]))
self.tableWidget.clear()
self.tableWidget.setHorizontalHeaderLabels(["Id", "Для кого", "Адрес", "Площадь", "Полезная Площадь", "Срок"])
self.Matrix_For_Mine_Presents = self.Estate_main_object.i_presented()
if self.Matrix_For_Mine_Presents == []:
print("Matrix For Mine Presents Is Emty")
else:
print(self.Matrix_For_Mine_Presents)#DEBUG
self.tableWidget.setRowCount(len(self.Matrix_For_Mine_Presents))
self.tableWidget.setColumnCount(len(self.Matrix_For_Mine_Presents[0]))
for i in range (len(self.Matrix_For_Mine_Presents)):
for j in range(len(self.Matrix_For_Mine_Presents[0])):
self.tableWidget.setItem(i,j,QTableWidgetItem(self.Matrix_For_Mine_Presents[i][j]))
self.tableWidget_14.clear()
self.tableWidget_14.setHorizontalHeaderLabels(["Id","От кого","Информация","Площадь","Полезная площадь","Срок"])
self.Matrix_For_My_Presents = self.Estate_main_object.my_present()
if self.Matrix_For_My_Presents == []:
print("Matrix_For_My_Presents Is Emty") #DEBUG
else:
print(self.Matrix_For_My_Presents) # DEBUG
self.tableWidget_14.setRowCount(len(self.Matrix_For_My_Presents))
self.tableWidget_14.setColumnCount(len(self.Matrix_For_My_Presents[0]))
for i in range(len(self.Matrix_For_My_Presents)):
for j in range(len(self.Matrix_For_My_Presents[0])):
self.tableWidget_14.setItem(i, j, QTableWidgetItem(self.Matrix_For_My_Presents[i][j]))
def create_sole_slot(self):
Row = self.tableWidget_5.currentRow()
estate_id = self.tableWidget_5.item(Row,0).text()
print(estate_id) #DEBUG
Time = self.lineEdit_3.text()
Price = self.lineEdit_2.text()
self.Estate_main_object.create_sale(estate_id,Price,Time)
def create_present(self):
#estate_id = self.Ptesents_table_1.currentItem().text()
ROW = self.Ptesents_table_1.currentRow()
print(ROW)
estate_id = self.Ptesents_table_1.item(ROW,0).text()
print(estate_id)#DEBUG
#print(estate_id)#DEBUG
self.present_addres=self.lineEdit_6.text()
self.present_time = self.lineEdit_7.text()
self.Estate_main_object.create_present(estate_id ,self.present_addres,self.present_time)
def chansle_1(self):
Row = self.tableWidget.currentRow()
estate_id = self.tableWidget.item(Row, 0).text()
print(estate_id) # DEBUG
self.Estate_main_object.cancel_present(estate_id)
def take_by(self):
Row = self.tableWidget.currentRow()
estate_id = self.tableWidget.item(Row, 0).text()
print(estate_id) # DEBUG
Price = self.lineEdit.text()
self.Estate_main_object.i_want_to_buy(estate_id,Price)
def create_playedge_slot(self):
Row = self.tableWidget_4.currentRow()
estate_id = self.tableWidget_4.item(Row, 0).text()
print(estate_id) # DEBUG
Price = self.lineEdit_4.text()
Days = self.lineEdit_5.text()
self.Estate_main_object.create_pledge(estate_id,Price,Days)
def udpate_tables_slae(self):
self.tableWidget_5.clear()
self.tableWidget_5.setHorizontalHeaderLabels(["Id","Информация","Площадь","Полезная Площадь"])
self.Matrix_For_Can_Be_Slea = self.Estate_main_object.can_sale()
print("Таблица ",self.Matrix_For_Can_Be_Slea)
if self.Matrix_For_Can_Be_Slea == []:
print("Matrix_For_Cam_Be_Slea is Emty")
else:
self.tableWidget_5.setRowCount(len(self.Matrix_For_Can_Be_Slea))
self.tableWidget_5.setColumnCount(len(self.Matrix_For_Can_Be_Slea[0]))
for i in range(len(self.Matrix_For_Can_Be_Slea)):
for j in range(len(self.Matrix_For_Can_Be_Slea[0])):
self.tableWidget_5.setItem(i,j,QTableWidgetItem(self.Matrix_For_Can_Be_Slea[i][j]))
self.tableWidget_6.clear()
self.tableWidget_6.setHorizontalHeaderLabels(["Id","Ифнормация","Площадь","Полезная площадь","Цена","Срок"])
self.Matix_For_My_Sales = self.Estate_main_object.my_sales()
if self.Matix_For_My_Sales == []:
print("Matix_For_My_Sales is Emty")
else:
self.tableWidget_6.setRowCount(len(self.Matix_For_My_Sales))
self.tableWidget_6.setColumnCount(len(self.Matix_For_My_Sales[0]))
for i in range(len(self.Matix_For_My_Sales)):
for j in range(len(self.Matix_For_My_Sales[0])):
self.tableWidget_6.setItem(i,j,QTableWidgetItem(self.Matix_For_My_Sales[i][j]))
self.tableWidget_7.clear()
self.tableWidget_7.setHorizontalHeaderLabels(["Id","Ифнормация","Площадь","Полезная площадь","Наменальная цена","Срок","Покупатель","Предложенная цена"])
self.Matrix_For_Who_Wont_To_By = self.Estate_main_object.who_want_to_buy()
if self.Matrix_For_Who_Wont_To_By == []:
print("Matrix_For_Cam_Be_Slea is Emty")
else:
self.tableWidget_7.setRowCount(len(self.Matrix_For_Who_Wont_To_By))
self.tableWidget_7.setColumnCount(len(self.Matrix_For_Who_Wont_To_By[0]))
for i in range(len(self.Matrix_For_Who_Wont_To_By)):
for j in range(len(self.Matrix_For_Who_Wont_To_By[0])):
self.tableWidget_7.setItem(i,j,QTableWidgetItem(self.Matrix_For_Who_Wont_To_By[i][j]))
def update_table_choose(self):
self.tableWidget_2.clear()
self.tableWidget_2.setHorizontalHeaderLabels(["Id","Информация","Площадь","Полезная площадь","Цена","Срок"])
self.Matrix_For_Choose_to_by = self.Estate_main_object.choose_to_buy()
if self.Matrix_For_Choose_to_by ==[]:
print("Matrix_For_Choose_to_by is Emty") #DEBUG
else:
self.tableWidget_2.setRowCount(len(self.Matrix_For_Choose_to_by))
self.tableWidget_2.setColumnCount(len(self.Matrix_For_Choose_to_by[0]))
for i in range(len(self.Matrix_For_Choose_to_by)):
for j in range(len(self.Matrix_For_Choose_to_by[0])):
self.tableWidget_2.setItem(i,j,QTableWidgetItem(self.Matrix_For_Choose_to_by[i][j]))
self.tableWidget_3.clear()
self.tableWidget_3.setHorizontalHeaderLabels(["Id","Информация","Площадь","Полезная площадь","Наминальная цена","Срок","Покупатель","Предложенная цена"])
self.Matrix_For_I_By = self.Estate_main_object.i_have_payed()
if self.Matrix_For_I_By==[]:
print("Matrix_For_I_By is Emty")#DEBUG
else:
self.tableWidget_3.setRowCount(len(self.Matrix_For_I_By))
self.tableWidget_3.setColumnCount(len(self.Matrix_For_I_By[0]))
for i in range(len(self.Matrix_For_I_By)):
for j in range(len(self.Matrix_For_I_By)):
self.tableWidget_3.setItem(i,j,QTableWidgetItem(self.Matrix_For_I_By[i][j]))
def zalog_update(self):
self.tableWidget_4.clear()
self.tableWidget_4.setHorizontalHeaderLabels(["Id", "Информация", "Площадь", "Полезная площадь"])
self.Matrix_For_Zalog_My_Sobstvennost = self.Estate_main_object.can_pledge()
if self.Matrix_For_Zalog_My_Sobstvennost==[]:
print("Matrix_For_Zalog_My_Sobstvennost is Emty")#DEBUG
else:
self.tableWidget_4.setRowCount(len(self.Matrix_For_Zalog_My_Sobstvennost))
self.tableWidget_4.setColumnCount(len(self.Matrix_For_Zalog_My_Sobstvennost[0]))
for i in range(len(self.Matrix_For_Zalog_My_Sobstvennost)):
for j in range(len(self.Matrix_For_Zalog_My_Sobstvennost[0])):
self.tableWidget_4.setItem(i,j,QTableWidgetItem(self.Matrix_For_Zalog_My_Sobstvennost[i][j]))
self.tableWidget_8.clear()
self.tableWidget_8.setHorizontalHeaderLabels(["Id","Информация","Площадь","Полезная площадь","Деньги","Длительность"])
self.Matrix_For_My_Zalogs = self.Estate_main_object.my_pledges()
if self.Matrix_For_My_Zalogs==[]:
print("Matrix_For_My_Zalogs is Emty")#DEBUG
else:
self.tableWidget_8.setRowCount(len(self.Matrix_For_My_Zalogs))
self.tableWidget_8.setColumnCount(len(self.Matrix_For_My_Zalogs[0]))
for i in range(len(self.Matrix_For_My_Zalogs)):
for j in range(len(self.Matrix_For_My_Zalogs[0])):
self.tableWidget_8.setItem(i,j,QTableWidgetItem(self.Matrix_For_My_Zalogs[i][j]))
self.tableWidget_9.clear()
self.tableWidget_9.setHorizontalHeaderLabels(["Id","Информация","Площадь","Полезная площадь","Предлогает залог","Деньги","Длительность"])
self.Matrix_For_Get_Zalog = self.Estate_main_object.show_pledges_i_got()
if self.Matrix_For_Get_Zalog==[]:
print("Matrix_For_Get_Zalog is Emty")#DEBUG
else:
self.tableWidget_9.setRowCount(len(self.Matrix_For_Get_Zalog))
self.tableWidget_9.setColumnCount(len(self.Matrix_For_Get_Zalog[0]))
for i in range(len(self.Matrix_For_Get_Zalog)):
for j in range(len(self.Matrix_For_Get_Zalog[0])):
self.tableWidget_9.setItem(i,j,QTableWidgetItem(self.Matrix_For_Get_Zalog[i][j]))
self.tableWidget_10.clear()
self.tableWidget_10.setHorizontalHeaderLabels(["Id","Информация","Площадь","Полезная площадь","Залогодатель", "Сумма","Срок"])
self.Matrix_For_Getting_For_Me_Zalogs = self.Estate_main_object.show_pledges_when_i_find_money()
if self.Matrix_For_Getting_For_Me_Zalogs==[]:
print("Matrix_For_Getting_For_Me_Zalogs is Emty")#DEBUG
else:
self.tableWidget_10.setRowCount(len(self.Matrix_For_Getting_For_Me_Zalogs))
self.tableWidget_10.setColumnCount(len(self.Matrix_For_Getting_For_Me_Zalogs[0]))
for i in range(len(self.Matrix_For_Getting_For_Me_Zalogs)):
for j in range(len(self.Matrix_For_Getting_For_Me_Zalogs[0])):
self.tableWidget_10.setItem(i,j,QTableWidgetItem(self.Matrix_For_Getting_For_Me_Zalogs[i][j]))
def update_get_zalog(self):
self.tableWidget_11.clear()
self.tableWidget_11.setHorizontalHeaderLabels(["Id","Кому выдать","Площадь","Полезная площадь","Информация","Сумма" ,"Длительность"])
self.Matrix_For_Gettin_Other_Zalog = self.Estate_main_object.choose_to_guess_pledge()
if self.Matrix_For_Gettin_Other_Zalog == []:
print("Matrix_For_Gettin_Other_Zalog is Emty")#DEBUG
else:
self.tableWidget_11.setRowCount(len(self.Matrix_For_Gettin_Other_Zalog))
self.tableWidget_11.setColumnCount(len(self.Matrix_For_Gettin_Other_Zalog[0]))
for i in range(len(self.Matrix_For_Gettin_Other_Zalog)):
for j in range(len(self.Matrix_For_Gettin_Other_Zalog[0])):
self.tableWidget_11.setItem(i,j,QTableWidgetItem(self.Matrix_For_Gettin_Other_Zalog[i][j]))
self.tableWidget_12.clear()
self.tableWidget_12.setHorizontalHeaderLabels(["Id","Кому выдать","Площадь","Полезная площадь","Информация","Сумма" ,"Длительность"])
self.Matrix_For_Pred_My_Zalogs = self.Estate_main_object.choose_to_cancel_guess_pledge()
if self.Matrix_For_Pred_My_Zalogs == []:
print("Matrix_For_Pred_My_Zalogs is Emty") # DEBUG
else:
self.tableWidget_12.setRowCount(len(self.Matrix_For_Pred_My_Zalogs))
self.tableWidget_12.setColumnCount(len(self.Matrix_For_Pred_My_Zalogs[0]))
for i in range(len(self.Matrix_For_Pred_My_Zalogs)):
for j in range(len(self.Matrix_For_Pred_My_Zalogs[0])):
self.tableWidget_12.setItem(i, j, QTableWidgetItem(self.Matrix_For_Pred_My_Zalogs[i][j]))
self.tableWidget_13.clear()
self.tableWidget_13.setHorizontalHeaderLabels(["Id", "Информация", "Площадь", "Полезная площадь", "Залогодатель", "Сумма", "Срок"])
self.Matrix_For_Get_My_Zalogs = self.Estate_main_object.choose_to_cancel_guess_pledge()
if self.Matrix_For_Get_My_Zalogs == []:
print("Matrix_For_Get_My_Zalogs is Emty") # DEBUG
else:
self.tableWidget_13.setRowCount(len(self.Matrix_For_Get_My_Zalogs))
self.tableWidget_13.setColumnCount(len(self.Matrix_For_Get_My_Zalogs[0]))
for i in range(len(self.Matrix_For_Get_My_Zalogs)):
for j in range(len(self.Matrix_For_Get_My_Zalogs[0])):
self.tableWidget_13.setItem(i, j, QTableWidgetItem(self.Matrix_For_Get_My_Zalogs[i][j]))
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "Моя Собственность"))
self.update_push_button_1.setText(_translate("Dialog", "Обновить Таблицы"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("Dialog", "Моя собственность"))
self.label_2.setText(_translate("Dialog", "Что Можно подарить"))
self.Presen_push_button.setText(_translate("Dialog", "Подарить"))
self.label_21.setText(_translate("Dialog", "Время в днях"))
self.label_22.setText(_translate("Dialog", "Адрес"))
self.label_3.setText(_translate("Dialog", "Я дарю"))
self.chansel_present_push_button.setText(_translate("Dialog", "Оменить"))
self.label_23.setText(_translate("Dialog", "Мне дарят"))
self.pushButton.setText(_translate("Dialog", "Обновить все таблицы"))
self.pushButton_20.setText(_translate("Dialog", "Принять"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("Dialog", "Подарки"))
self.label_4.setText(_translate("Dialog", "Выбрать"))
self.label_5.setText(_translate("Dialog", "Цена"))
self.pushButton_2.setText(_translate("Dialog", "Выбрать"))
self.label_6.setText(_translate("Dialog", "Я покупаю"))
self.pushButton_3.setText(_translate("Dialog", "Отменить покупку"))
self.pushButton_5.setText(_translate("Dialog", "Обновить все таблицы"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("Dialog", "Купля"))
self.label_8.setText(_translate("Dialog", "Наша собственность"))
self.label_10.setText(_translate("Dialog", "Время в днях"))
self.label_9.setText(_translate("Dialog", "Цена"))
self.pushButton_6.setText(_translate("Dialog", "Продать"))
self.label_11.setText(_translate("Dialog", "Выберите что бы отменить продажу"))
self.pushButton_7.setText(_translate("Dialog", "Оменить продажу"))
self.label_12.setText(_translate("Dialog", "Покупатели"))
self.pushButton_8.setText(_translate("Dialog", "Продать"))
self.pushButton_9.setText(_translate("Dialog", "Обновить все таблицы"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _translate("Dialog", "Продажа"))
self.label_7.setText(_translate("Dialog", "Моя собственность"))
self.label_14.setText(_translate("Dialog", "Дни"))
self.label_13.setText(_translate("Dialog", "Цена"))
self.pushButton_4.setText(_translate("Dialog", "Заложить"))
self.label_15.setText(_translate("Dialog", "Мои залоги"))
self.pushButton_10.setText(_translate("Dialog", "Отменить"))
self.label_16.setText(_translate("Dialog", "Получить залоги"))
self.pushButton_12.setText(_translate("Dialog", "Откозать"))
self.pushButton_11.setText(_translate("Dialog", "Принять"))
self.label_17.setText(_translate("Dialog", "Выданные мне залоги"))
self.pushButton_14.setText(_translate("Dialog", "Вернуть"))
self.pushButton_13.setText(_translate("Dialog", "Обновить все таблицы"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_5), _translate("Dialog", "Взять под залог"))
self.label_18.setText(_translate("Dialog", "Выберите что бы выдать залог"))
self.pushButton_15.setText(_translate("Dialog", "Выдать"))
self.label_19.setText(_translate("Dialog", "Предложенные мною залоги"))
self.pushButton_16.setText(_translate("Dialog", "Отменить"))
self.label_20.setText(_translate("Dialog", "Выданные мною залоги"))
self.pushButton_17.setText(_translate("Dialog", "Отказать"))
self.pushButton_18.setText(_translate("Dialog", "Выдать"))
self.pushButton_19.setText(_translate("Dialog", "Обновить все таблицы"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_6), _translate("Dialog", "Выдать залог"))
self.label_24.setText(_translate("Dialog", "Адресс пользователя"))
self.label_25.setText(_translate("Dialog", "Физический Адресс"))
self.label_26.setText(_translate("Dialog", "Общая площадь"))
self.label_27.setText(_translate("Dialog", "Полезная площадь"))
self.pushButton_21.setText(_translate("Dialog", "Добавить"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_7), _translate("Dialog", "Добавление Имущества"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
10,467 | 4ca92509dcea2fb058f1278be4269f85939db5b3 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on
@author: a.mikkonen@iki.fi
"""
import time
import scipy as sp
from matplotlib import pyplot as plt
def power_law_profile(nn):
n = 7
yperR = sp.linspace(0,1, 1000)
u_rat = (yperR)**(1/n)
# plt.plot(yperR, u_rat, 'k:', label=r"$\frac{\overline{u}}{u_{max}}=\frac{y}{R}^{1/n}$")
plt.plot(yperR, u_rat, 'k:', label="pipe velocity profile")
fig = plt.gcf()
fig.set_size_inches(3.5,2.5)
yperR_disc = sp.linspace(0,1, nn+1)
yperR_f = (yperR_disc[:-1] + yperR_disc[1:])/2
u_rat_disc = (yperR_f)**(1/n)
for k in range(len(yperR_disc)-1):
plt.plot(yperR_disc[k:k+2], [u_rat_disc[k],u_rat_disc[k]], 'k')
plt.plot(yperR_disc[k:k+1], [u_rat_disc[k]], 'k', label="discretization")
ax = plt.gca()
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
plt.xlim(-0.01, 1)
plt.ylim(0, 1.1)
plt.xlabel("$y/R$")
plt.ylabel("$u/u_{max}$")
plt.tight_layout()
plt.legend(frameon=False, loc='lower right')
plt.savefig("wallProfile"+str(nn)+".pdf")
def main():
pass
if __name__ == "__main__":
start = time.time()
print("START")
# main()
power_law_profile(10)
# power_law_profile(50)
print("END %.4f s" % (time.time()-start))
|
10,468 | f510b19b9cd2b13fb7cf6033c3180622c2e410da | # q2b.py
# Name:
# Section:
# TODO: fill sv_recursive
# m is a matrix represented by a 2D list of integers. e.g. m = [[3, 0, 2, 18],[-1, 1, 3, 4],[-2, -3, 18, 7]]
# This function returns the Special Value of the matrix passed in.
def sv_recursive(m):
# your code here
return 0 # change
|
10,469 | a5f5cf0a0965e5bafc578a0c4bf0dad9d4714e00 | #
# * The Plan class for the <i>groupby</i> operator.
# * @author Edward Sciore
#
from simpledb.materialize.GroupByScan import GroupByScan
from simpledb.materialize.SortPlan import SortPlan
from simpledb.plan.Plan import Plan
from simpledb.record.Schema import Schema
class GroupByPlan(Plan):
#
# * Create a groupby plan for the underlying query.
# * The grouping is determined by the specified
# * collection of group fields,
# * and the aggregation is computed by the
# * specified collection of aggregation functions.
# * @param p a plan for the underlying query
# * @param groupfields the group fields
# * @param aggfns the aggregation functions
# * @param tx the calling transaction
#
def __init__(self, tx, p, groupfields, aggfns):
super(GroupByPlan, self).__init__()
self.p = SortPlan(tx, p, groupfields)
self.groupfields = groupfields
self.aggfns = aggfns
self.sch = Schema()
for fldname in groupfields:
self.sch.add(fldname, p.schema())
for fn in aggfns:
self.sch.addIntField(fn.fieldName())
#
# * This method opens a sort plan for the specified plan.
# * The sort plan ensures that the underlying records
# * will be appropriately grouped.
# * @see Plan#open()
#
def open(self):
s = self.p.open()
return GroupByScan(s, self.groupfields, self.aggfns)
#
# * Return the number of blocks required to
# * compute the aggregation,
# * which is one pass through the sorted table.
# * It does <i>not</i> include the one-time cost
# * of materializing and sorting the records.
# * @see Plan#blocksAccessed()
#
def blocksAccessed(self):
return self.p.blocksAccessed()
#
# * Return the number of groups. Assuming equal distribution,
# * this is the product of the distinct values
# * for each grouping field.
# * @see Plan#recordsOutput()
#
def recordsOutput(self):
numgroups = 1
for fldname in self.groupfields:
numgroups *= self.p.distinctValues(fldname)
return numgroups
#
# * Return the number of distinct values for the
# * specified field. If the field is a grouping field,
# * then the number of distinct values is the same
# * as in the underlying query.
# * If the field is an aggregate field, then we
# * assume that all values are distinct.
# * @see Plan#distinctValues(String)
#
def distinctValues(self, fldname):
if self.p.schema().hasField(fldname):
return self.p.distinctValues(fldname)
else:
return self.recordsOutput()
#
# * Returns the schema of the output table.
# * The schema consists of the group fields,
# * plus one field for each aggregation function.
# * @see Plan#schema()
#
def schema(self):
return self.sch
|
10,470 | 6618c741754d4dcc209e95a3a9e56814ac9243c4 | #!/usr/local/bin/python3
from lib import *
if __name__ == '__main__':
input = [str.split("\t") for str in open('./input.txt', 'r').read().strip().split("\n")]
print("solution: {}".format(Solver().solve(input)))
|
10,471 | c372dc232f0a7e10145d1b15e6457373ce2f9abb | from dataChest import *
import matplotlib.pyplot as plt
# import os
file_path = 'GapEngineer\\Nb_GND_Dev01\\Leiden_2020Feb\\LIU\\Q1\\03-16-20\\QP_Tunneling_PSD\\HDF5Data'
file_name = 'cvd2133wum_QP_Tunneling_PSD.hdf5'
# ftag = file_name.split('_')[0]
print(file_path.split('\\'))
dc = dataChest(file_path.split('\\'))
dc.openDataset(file_name)
varsList = dc.getVariables()
data = dc.getData()
data = data.transpose()
pdata = data[4]
fig = plt.figure()
plt.plot(pdata, 'o-', label=r" Parity")
plt.legend(bbox_to_anchor=(0.75, 0.58), loc=2)
plt.show()
|
10,472 | b51705afbfedb1f8b33fa0457c98262670a151a2 | pattern = {'N':(1,5,2,3,0,4),'S':(4,0,2,3,5,1),'E':(3,1,0,5,4,2),'W':(2,1,5,0,4,3)}
dice_num = input().split()
for x in input():
dice_num = [dice_num[i] for i in pattern[x]]
print(dice_num[0])
|
10,473 | 446ce06b97ac1183d73546eb376cb79a71ac9176 | from flask import Flask, abort, make_response, jsonify, render_template
from util.utils import Member,getSchedules
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
app.config['JSON_SORT_KEYS'] = False
@app.route('/', methods=['GET'])
def index():
api_list = ['/members', '/member/20','/schedules']
return render_template('index.html', api_list=api_list)
@app.route('/member/<int:memberId>', methods=['GET'])
def get_member(memberId):
try:
member = Member.get(Member.memberId == memberId)
except Member.DoesNotExist:
return make_response(jsonify({'error': 'Not found'}), 404)
items = []
items.append({
"memberId": member.memberId,
"name": member.name,
"furigana": member.furigana,
"en": member.en,
"birthday": member.birthday.isoformat(),
"birthplace": member.birthplace,
"constellation": member.constellation,
"height": member.height,
"bloodtype": member.bloodtype,
"thumb": member.thumb_url
})
result = {
"ResultInfo": {
"result": True,
"count": len(items),
},
"items": items
}
return make_response(jsonify(result))
@app.route('/members', methods=['GET'])
def get_all_members():
try:
members = Member.select()
except Member.DoesNotExist:
abort(404)
items = []
for member in members:
items.append({
"memberId": member.memberId,
"name": member.name,
"furigana": member.furigana,
"birthplace": member.birthplace,
})
result = {
"ResultInfo": {
"result": True,
"count": len(items),
},
"items": items
}
return make_response(jsonify(result))
@app.route('/schedules',methods=['GET'])
def get_all_schedules():
schedule_list = getSchedules()
result = {
"ResultInfo":{
"result":True,
"count":len(schedule_list),
},
"items":schedule_list
}
return make_response(jsonify(result))
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
if __name__ == '__main__':
app.run()
|
10,474 | a29022140fd7603594c802b27b442c676ab167b7 | __author__ = 'sunary'
from time import sleep
from multiprocessing import Process
class StreamEachId():
'''
Each stream listen each id
'''
def __init__(self):
self.list_process = []
def run(self):
list_ids = [i for i in range(10)]
for id in list_ids:
self.list_process.append({'process': Process(target=self.listener, args=(id,)), 'user_id': id})
self.list_process[-1]['process'].start()
sleep(1)
for p in self.list_process:
if p['user_id'] % 3 == 0:
p['process'].terminate()
def add_user_id(self, user_id):
self.list_process.append({'process': Process(target=self.listener, args=(user_id,)), 'user_id': user_id})
self.list_process[-1]['process'].start()
def listener(self, user_id):
print 'start process: %s' % user_id
sleep(30)
print 'end process: %s' % user_id
def terminate(self, user_id):
for p in self.list_process:
if p['user_id'] == user_id:
p['process'].terminate()
p['process'].join()
if __name__ == '__main__':
stream_each_id = StreamEachId()
stream_each_id.run()
stream_each_id.add_user_id(25)
stream_each_id.terminate(4) |
10,475 | 02026c1c6f3de9d2b0ce9dcd0c438dfaf8ef8a56 | #Read in the data
import pandas as pd
import numpy as np
dete_survey = pd.read_csv('D:\\dataquest\\projects\\dete_survey.csv')
#Quick exploration of the data
pd.options.display.max_columns = 150 # to avoid truncated output
dete_survey.head()
#Read in the data
tafe_survey = pd.read_csv("D:\\dataquest\\projects\\tafe_survey.csv")
#Quick exploration of the data
tafe_survey.head()
# Read in the data again, but this time read `Not Stated` values as `NaN`
dete_survey = pd.read_csv('D:\\dataquest\\projects\\dete_survey.csv', na_values='Not Stated')
# Quick exploration of the data
dete_survey.head()
# Remove columns we don't need for our analysis
dete_survey_updated = dete_survey.drop(dete_survey.columns[28:49], axis=1)
tafe_survey_updated = tafe_survey.drop(tafe_survey.columns[17:66], axis=1)
#Check that the columns were dropped
print(dete_survey_updated.columns)
print(tafe_survey_updated.columns)
# Clean the column names
dete_survey_updated.columns = dete_survey_updated.columns.str.lower().str.strip().str.replace(' ', '_')
# Check that the column names were updated correctly
dete_survey_updated.columns
# Update column names to match the names in dete_survey_updated
mapping = {'Record ID': 'id', 'CESSATION YEAR': 'cease_date', 'Reason for ceasing employment': 'separationtype', 'Gender. What is your Gender?': 'gender', 'CurrentAge. Current Age': 'age',
'Employment Type. Employment Type': 'employment_status',
'Classification. Classification': 'position',
'LengthofServiceOverall. Overall Length of Service at Institute (in years)': 'institute_service',
'LengthofServiceCurrent. Length of Service at current workplace (in years)': 'role_service'}
tafe_survey_updated = tafe_survey_updated.rename(mapping, axis = 1)
# Check that the specified column names were updated correctly
tafe_survey_updated.columns
tafe_survey_updated['separationtype'].value_counts()
# Check the unique values for the separationtype column
dete_survey_updated['separationtype'].value_counts()
# Update all separation types containing the word "resignation" to 'Resignation'
dete_survey_updated['separationtype'] = dete_survey_updated['separationtype'].str.split('-').str[0]
# Check the values in the separationtype column were updated correctly
dete_survey_updated['separationtype'].value_counts()
# Select only the resignation separation types from each dataframe
dete_resignations = dete_survey_updated[dete_survey_updated['separationtype'] == 'Resignation'].copy()
tafe_resignations = tafe_survey_updated[tafe_survey_updated['separationtype'] == 'Resignation'].copy()
# Check the unique values
dete_resignations['cease_date'].value_counts()
# Extract the years and convert them to a float type
dete_resignations['cease_date'] = dete_resignations['cease_date'].str.split('/').str[-1]
dete_resignations['cease_date'] = dete_resignations['cease_date'].astype("float")
# Check the values again and look for outliers
dete_resignations['cease_date'].value_counts()
# Check the unique values and look for outliers
dete_resignations['dete_start_date'].value_counts().sort_values()
# Check the unique values
tafe_resignations['cease_date'].value_counts().sort_values()
# Calculate the length of time an employee spent in their respective workplace and create a new column
dete_resignations['institute_service'] = dete_resignations['cease_date'] - dete_resignations['dete_start_date']
# Quick check of the result
dete_resignations['institute_service'].head()
# Check the unique values
tafe_resignations['Contributing Factors. Dissatisfaction'].value_counts()
# Check the unique values
tafe_resignations['Contributing Factors. Job Dissatisfaction'].value_counts()
# Update the values in the contributing factors columns to be either True, False, or NaN
def update_vals(x):
if x == '-':
return False
elif pd.isnull(x):
return np.nan
else:
return True
tafe_resignations['dissatisfied'] = tafe_resignations[['Contributing Factors. Dissatisfaction', 'Contributing Factors. Job Dissatisfaction']].applymap(update_vals).any(1, skipna=False)
tafe_resignations_up = tafe_resignations.copy()
# Check the unique values after the updates
tafe_resignations_up['dissatisfied'].value_counts(dropna=False)
# Update the values in columns related to dissatisfaction to be either True, False, or NaN
dete_resignations['dissatisfied'] = dete_resignations[['job_dissatisfaction',
'dissatisfaction_with_the_department', 'physical_work_environment',
'lack_of_recognition', 'lack_of_job_security', 'work_location',
'employment_conditions', 'work_life_balance',
'workload']].any(1, skipna=False)
dete_resignations_up = dete_resignations.copy()
dete_resignations_up['dissatisfied'].value_counts(dropna=False)
# Add an institute column
dete_resignations_up['institute'] = 'DETE'
tafe_resignations_up['institute'] = 'TAFE'
# Combine the dataframes
combined = pd.concat([dete_resignations_up, tafe_resignations_up], ignore_index=True,sort=False)
# Verify the number of non null values in each column
combined.notnull().sum().sort_values()
# Drop columns with less than 500 non null values
combined_updated = combined.dropna(thresh = 500, axis =1).copy()
# Check the unique values
combined_updated['institute_service'].value_counts(dropna=False)
# Extract the years of service and convert the type to float
combined_updated['institute_service_up'] = combined_updated['institute_service'].astype('str').str.extract(r'(\d+)')
combined_updated['institute_service_up'] = combined_updated['institute_service_up'].astype('float')
# Check the years extracted are correct
combined_updated['institute_service_up'].value_counts()
# Convert years of service to categories
def transform_service(val):
if val >= 11:
return "Veteran"
elif 7 <= val < 11:
return "Established"
elif 3 <= val < 7:
return "Experienced"
elif pd.isnull(val):
return np.nan
else:
return "New"
combined_updated['service_cat'] = combined_updated['institute_service_up'].apply(transform_service)
# Quick check of the update
combined_updated['service_cat'].value_counts()
# Verify the unique values
combined_updated['dissatisfied'].value_counts(dropna=False)
# Replace missing values with the most frequent value, False
combined_updated['dissatisfied'] = combined_updated['dissatisfied'].fillna(False)
# Calculate the percentage of employees who resigned due to dissatisfaction in each category
dis_pct = combined_updated.pivot_table(index='service_cat', values='dissatisfied')
# Plot the results
dis_pct.plot(kind='bar', rot=30)
|
10,476 | de39e4dd694431a279c829b01de68084773403c1 | from invoke import task, run
@task(aliases=["sh"])
def shell(ctx):
"""
Runs django's interactive shell
:return:
"""
run("./manage.py shell_plus", pty=True)
@task(aliases=["mg"])
def migrate(ctx):
"""
Runs the migrations
:return:
"""
run("./manage.py migrate", pty=True)
@task(aliases=["mm"])
def make_migrations(ctx, app_name):
"""
Runs the make migrations
:return:
"""
run("./manage.py makemigrations {}".format(app_name), pty=True)
@task(pre=[migrate], aliases=["rs"])
def runserver(ctx):
"""
Runs the local server
:return:
"""
run("./manage.py runserver", pty=True)
@task(aliases=["cs"])
def collect_static(ctx):
run("./manage.py collectstatic --noinput")
|
10,477 | 6174ea75dd183ccef94441f055397f0e3e9dca8d | import sys
import json
import numpy as np
# import tensorflow as tf
from scipy import sparse
from sklearn.metrics import f1_score
from sklearn import svm
import random
def print_request(r):
"""Print a request in a human readable format to stdout"""
def fmt_token(t):
return t['shape'] + t['after']
print('Subject: ' + ''.join(map(fmt_token, filter(
lambda x: x['where'] == 'subject', r['tokens']))))
print(''.join(map(fmt_token, filter(
lambda x: x['where'] == 'body', r['tokens']))))
if __name__ == "__main__":
if len(sys.argv) != 3:
raise ValueError("predict_category <train_file> <test_file>")
train_file, test_file = sys.argv[1:]
# ToDo: Implement logic to find a model based on data in train_file
train_file, test_file = sys.argv[1:]
print("Loading training File ",train_file)
data =json.load(open(train_file))
#get Lables
labels_count={}
for i in range(len(data)):
for label in data[i]['labels'].keys():
if label in labels_count:
labels_count[label]+=1
else:
labels_count[label]=1
lables_ids={}
for lab,itr in zip(labels_count.keys(),range(len(labels_count.keys()))):
lables_ids[lab]=itr
#NER tokens -> required for feature mapping
ner={}
for i in range(len(data)):
for token in data[i]['tokens']:
ner[token['rner']]=1
nerFeaturePosition={}
for value,pos in zip(ner.keys(),range(len(ner.keys()))):
nerFeaturePosition[value]=pos
#features
#where_1 = is_body
#where_2 = is_subject
#shape_1 = begins_with_capital
#shape_2 = contains_colon
#shape_3 = contains_hyphen
#shape_4 = contains_d
#start = is_begining (is 1 if its positon is less than 10)
#ner = 2 placeholder for every nerType (so 2*24) + 1 for other
#feature vector for every token = 2+4+1+49 = 56
maxTokenLength = 2600 #required for padding If text contains more than 2600 tokens, it will be ignored. if less thant it ll be padded with 0
tokenFeaturesCount=56
def featureMapping(tokens):
featureVector = np.zeros([maxTokenLength,tokenFeaturesCount])
##Feature Vector is used for CNN -> which I wanted to experiment
columns=[]
for tok,itr in zip(tokens,range(len(tokens))):
if itr>= maxTokenLength:
break
if tok['where']=='body':
featureVector[itr][0]=1
columns.append(itr*tokenFeaturesCount+0)
if tok['where']=='subject':
featureVector[itr][1]=1
columns.append(itr*tokenFeaturesCount+1)
if tok['shape'].startswith('X'):
featureVector[itr][2]=1
columns.append(itr*tokenFeaturesCount+2)
if ':' in tok['shape']:
featureVector[itr][3]=1
columns.append(itr*tokenFeaturesCount+3)
if '-' in tok['shape']:
featureVector[itr][4]=1
columns.append(itr*tokenFeaturesCount+4)
if 'd' in tok['shape']:
featureVector[itr][5]=1
columns.append(itr*tokenFeaturesCount+5)
if tok['start'] < 10:
featureVector[itr][6]=1
columns.append(itr*tokenFeaturesCount+6)
nerFeature = 7+int(nerFeaturePosition[tok['rner']])
columns.append(itr*tokenFeaturesCount+nerFeature)
featureVector[itr][nerFeature]=1
return columns
random.shuffle(data)
splitPoint=int(len(data)*0.75)
trainingData = data[:splitPoint]
validationData=data[splitPoint:]
def dataToFeature(data,testSet=False):
colm=[]
row = []
d =[]
tar=[]
batch=len(data)
print("Genearating feature vector ",batch)
for i in range(len(lables_ids)):
tar.append(np.zeros(batch))
for i in range(batch):
if i %1500==0:
print (i," loaded")
fea=featureMapping(data[i]['tokens'])
for ele in fea:
colm.append(ele)
row.append(i)
d.append(1)
if testSet == True:
continue
for lab in data[i]['labels']:
tar[lables_ids[lab]][i]=1
#dummy variable to make feature length of test/validation/train same size
row.append(i)
d.append(0)
colm.append(maxTokenLength*tokenFeaturesCount)
return sparse.csr_matrix((d,(row,colm))),tar
feature_valid,label_valid=dataToFeature(validationData)
print("Validation data")
feature_train,label_train=dataToFeature(trainingData)
models ={}
yPred={}
for label in lables_ids:
labelId = lables_ids[label]
print("Buidling model for ",label,labelId)
##Cross Validation and Hyper parameter tuning
# parameter_candidates = [
# {'C': [1, 4, 16, 32,64,1024], 'kernel': ['linear']}
# ]
# clf = GridSearchCV(estimator=svm.SVC(), param_grid=parameter_candidates, n_jobs=3,scoring=f1_scorer)
# clf.fit(feature_train,label_train[labelId])
models[label] = svm.SVC(kernel='linear', C = 1.0,probability=True,verbose=True)
models[label].fit(feature_train,label_train[labelId])
print("Before threshold tuning ")
yPred={}
yPred_prob={}
for label in lables_ids:
labelId = lables_ids[label]
yPred[label]=models[label].predict(feature_valid)
yPred_prob[label]=models[label].predict_proba(feature_valid)
print(label," F-Score " ,f1_score(label_valid[labelId],yPred[label]))
print("After threshodl tuning")
bestThreshold={}
for label in lables_ids:
labelId=lables_ids[label]
bestThreshold[label]=0.5
bestFScore=0
thresSorted =sorted(set([round(x,3) for x in sorted(yPred_prob[label][:,1])]))
for thres in thresSorted:
tempFscore=f1_score(label_valid[labelId],yPred_prob[label][:,1]>thres)
if tempFscore>bestFScore:
bestFScore=tempFscore
bestThreshold[label]= thres
print(label," Best F-score ",bestFScore," Optimal threshold thres",bestThreshold[label])
# ToDo: Make predictions on data in test_file
print("Loading Test data")
data_test = json.load(open(test_file))
# print("Test Data Feature gen")
feature_test,temp=dataToFeature(data_test,testSet=True)
print("Predicting Labels")
yPred_test={}
yPred_prob_test={}
for label in lables_ids:
labelId = lables_ids[label]
yPred_test[label]=models[label].predict(feature_test)
yPred_prob_test[label]=models[label].predict_proba(feature_test)
# ToDo: Generate output
for i in range(len(data_test)):
data_test[i]['labels']={}
noneLabel=True
for label in lables_ids:
labelId = lables_ids[label]
# print(label,yPred_prob_test[label][i][1],bestThreshold[label])
if yPred_prob_test[label][i][1]>bestThreshold[label]:
noneLabel=False
data_test[i]['labels'][label]=yPred_prob_test[label][i][1]
if noneLabel :
data_test[i]['labels']['others']=1
# break
print("comtravo_challenge_test.json has been created")
json.dump(data_test,open("comtravo_challenge_test.json","w"))
|
10,478 | b90d6c878b312820f3b4da10d47ec93a7fd27057 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 18 20:03:29 2014
3. Faça um programa que crie dois vetores com 10 elementos aleatórios entre 1 e 100. Gere um
terceiro vetor de 20 elementos, cujos valores deverão ser compostos pelos elementos
intercalados dos dois outros vetores. Imprima os três vetores.
@author: portela.marco@gmail.com
"""
import random
vetor1 = []
vetor2 = []
vetor3 = []
for i in range(10):
vetor1.append(random.randint(1,100))
vetor2.append(random.randint(1,100))
for j in range(10):
vetor3.append(vetor1[j])
vetor3.append(vetor2[j])
print('Vetor1')
print(vetor1)
print('Vetor2')
print(vetor2)
print('Vetor3')
print(vetor3) |
10,479 | a94dc128ab45088bd205ce6cd334eb1a05898a18 | import cv2
import numpy as np
from tensorflow.keras.utils import Sequence
import pydicom
from rle2mask import rle2mask
class DataGenerator(Sequence):
'Generates data for Keras'
def __init__(self,
all_filenames,
batch_size,
input_dim,
n_channels,
transform,
shuffle=True):
self.all_filenames = all_filenames
self.batch_size = batch_size
self.input_dim = input_dim
self.n_channels = n_channels
self.transform = transform
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'''
return:
Trả về số lượng batch/1 epoch
'''
return int(np.floor(len(self.all_filenames) / self.batch_size))
def __getitem__(self, index):
'''
params:
index: index của batch
return:
X, y cho batch thứ index
'''
# Lấy ra indexes của batch thứ index
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# List all_filenames trong một batch
indexs = [k for k in indexes]
# Khởi tạo data
X, Y = self.__data_generation(indexs)
return X, Y
def on_epoch_end(self):
'''
Shuffle dữ liệu khi epochs end hoặc start.
'''
self.indexes = np.arange(len(self.all_filenames))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, indexs):
#print("ok")
'''
params:
all_filenames_temp: list các filenames trong 1 batch
return:
Trả về giá trị cho một batch.
'''
X = np.empty((self.batch_size, *self.input_dim, self.n_channels))
Y = np.empty((self.batch_size, *self.input_dim, self.n_channels))
# Khởi tạo dữ liệu
for i, index in enumerate(indexs):
name = self.all_filenames.values[index][-1]
img = pydicom.read_file(name).pixel_array
pixel = self.all_filenames.values[index][1]
if pixel != ' -1':
label = rle2mask(pixel, 1024, 1024)
label = np.rot90(label, 3) #rotating three times 90 to the right place
label = np.flip(label, axis=1)
else:
label = np.zeros((512,512,1))
if self.transform is not None:
img, label = self.transform(img, label)
X[i,] = img
Y[i] = label
del img, label, name, pixel
return X, Y |
10,480 | 98d2a9b7f4b143fc875e7d14267b02aee7930c12 | from .declaration_parser import DeclarationParser
class ModelParser:
def __init__(self, model):
self.model = model
def parse_model(self):
dp = DeclarationParser(self.model)
for decl in self.model.declarations:
dp.parse_declaration(decl, type(decl).__name__)
return dp.get_results()
|
10,481 | 80026f6fa46b3c73aa80ae34745b90719af95e41 | import utilities as ut
import numpy as np
from skimage import data, io, color, transform, exposure
from pprint import pprint
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import sys
def listToRows(x) :
X = np.array([x])
r = X.shape
return np.reshape(x, (r[1], -r[1]+1))
def addCols(X, n) :
p = 2
while n > 0 :
X = np.c_[X, listToRows((X[:,1])**p)]
n = n - 1
p = p + 1
return X
def leastSquares(X, y) :
Y = listToRows(y)
Xt = np.transpose(X)
return np.dot(np.linalg.inv(np.dot(Xt, X)), np.dot(Xt, Y)) #(Xt.X)^-1.Xt.Y
def genXMatrix(x, n, uf = 0) :
X = listToRows(x)
r = X.shape
X = np.c_[np.ones((r[0],1)), X]
if uf == 1 : return np.c_[X[:,0], np.sin(X[:,1])]
return addCols(X, n - 1)
def findError(X, y, A) : #Problem, e not being calculated correctly, X and Y are fine
Y = listToRows(y)
r = X.shape
newY = 0
i = 0
while i < r[1] :
newY = newY + A[i]*X[:,i]
i = i + 1
newY = listToRows(newY)
return np.sum((Y - newY)**2) #Performs (Y[i] - newY[i])^2 for all i 0 -> 19
def plotGraph(x, y, r, A, segs) :
plt.scatter(x, y)
i = 0
j = 0
lines = np.array([])
while i < r :
line = 0
k = 0
while k < segs[j].shape[1]:
line = line + A[j][k]*segs[j][:,k]
k = k + 1
lines = np.append(lines, line)
i = i + 20
j = j + 1
lines = lines.flatten()
plt.plot(x, lines, c ='r')
plt.show()
# def plotGraph(x, y, r, A, segs) :
# plt.scatter(x, y)
# i = 0
# j = 0
# while i < r :
# line = 0
# k = 0
# while k < segs[j].shape[1]:
# line = line + A[j][k]*segs[j][:,k]
# k = k + 1
# line = listToRows(line)
# plt.plot(x[i:i+20], line, c ='r')
# i = i + 20
# j = j + 1
# plt.show()
args = sys.argv[1:]
file = "train/" + args[0]
x, y = ut.load_points_from_file(file)
X = genXMatrix(x, 3) # linear, cubed and sin
r, = x.shape
i = 0
segs = []
As = []
sse = 0
while i < r : #Calculate A matrix for each line segment
X1 = genXMatrix(x[i:i+20], 1) #Linear
A1 = leastSquares(X1, y[i:i+20])
err1 = findError(X1, y[i:i+20], A1)
X2 = genXMatrix(x[i:i+20], 3) #Cubed
A2 = leastSquares(X2, y[i:i+20])
err2 = findError(X2, y[i:i+20], A2)
X3 = genXMatrix(x[i:i+20], 1, 1) #Sine
A3 = leastSquares(X3, y[i:i+20])
err3 = findError(X3, y[i:i+20], A3)
if err2 < 0.9 * err1 : #If cubed function is significantly better, use that
if err3 < err2 : #If sine function is better than cubed use that
As.append(A3)
sse = sse + err3
segs.append(X3)
else :
As.append(A2)
sse = sse + err2
segs.append(X2)
else :
As.append(A1)
sse = sse + err1
segs.append(X1)
i = i + 20
print(sse)
if len(args) > 1 :
if args[1] == '--plot' : plotGraph(x, y, r, As, segs)
if args[1] == '--view' : ut.view_data_segments(x, y) |
10,482 | 1ab8a7c2bd7a5eab94986675ac8d6bb429618150 | def recite(start_verse, end_verse):
days = ['first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh', 'eighth', 'ninth', 'tenth', 'eleventh', 'twelfth']
presents = ['a Partridge',
'two Turtle Doves',
'three French Hens',
'four Calling Birds',
'five Gold Rings',
'six Geese-a-Laying',
'seven Swans-a-Swimming',
'eight Maids-a-Milking',
'nine Ladies Dancing',
'ten Lords-a-Leaping',
'eleven Pipers Piping',
'twelve Drummers Drumming']
result = []
for i in range(12):
string = ""
presents_string = ""
if i > 0:
counter = i
while counter > 0:
presents_string += presents[counter] + ", "
counter-=1
presents_string += "and " + presents[0]
else:
presents_string = presents[0]
string = "On the " + days[i] + " day of Christmas my true love gave to me: " + presents_string + " in a Pear Tree."
result.append(string)
print(result[0:11])
return result[start_verse-1:end_verse]
|
10,483 | 73fb3fc8f7bee256475e7e28db9e98d71565f9b2 | import json
import logging
import logging.config
import logging.handlers
from .agent import Agent
from .log import Log
from .profile import Profile, Snmp
with open("/monitor/config/python_logging_configuration.json", 'r') as configuration_file:
config_dict = json.load(configuration_file)
logging.config.dictConfig(config_dict)
# Create the Logger
logger = logging.getLogger(__name__)
|
10,484 | 83a1153e25ecedf23d081afb3508764cf4101432 | # configuring PYTHONPATH (By default, this will add the src and lib directory for each of your dependencies to your PYTHONPATH)
import roslib; roslib.load_manifest('sap_pkg')
# import client library
import rospy
# import messages
import auction_msgs.msg
# import services
import auction_srvs.srv
import auction_common
# import auxiliar libraries
import random
import math
# "global" variables (to be referred as global under def fun(something))
winner_id = 'none'
winner_cost = 0
#####################################################################################
## Buyer Service Callback
#####################################################################################
def handle_buyer_server_callback(auction_req):
# update number of messages in parameter server
if rospy.has_param('/num_messages'):
num_messages = rospy.get_param('/num_messages')
num_messages += 2
rospy.set_param('/num_messages', num_messages)
# Create a bid messsage to put an offer for the item in auction_req!
bid = auction_msgs.msg.Bid()
bid.header.frame_id = 'base_link' # to be rechecked
bid.header.stamp = rospy.Time.now()
bid.buyer_id = rospy.get_name()
if auction_req.auction_data.metrics == "distance":
# to be given by the cost to go to position of the ocurring event
# the cost for the metrics==distance is calculated using the euclidean
# distance between the parameter position of the node and the task_position
# given in the auction_req
node_position = eval(rospy.get_param('~position'))
x = float(node_position[0])-auction_req.auction_data.task_location.x
y = float(node_position[1])-auction_req.auction_data.task_location.y
z = float(node_position[2])-auction_req.auction_data.task_location.z
bid.cost_distance = float(math.sqrt(x*x+y*y+z*z))
else:
rospy.loginfo("Metrics unkown")
bid.cost_distance = 999999;
# put bid to auctioneer
service_path = auction_req.auctioneer_node+'/auctioneer_bid_reception_server'
rospy.wait_for_service(service_path)
auctioneer_bid_reception_service = rospy.ServiceProxy(service_path, auction_srvs.srv.AuctioneerBidReceptionService)
try:
sending_node = rospy.get_name()
auctioneer_bid_reception_server_resp = auctioneer_bid_reception_service(sending_node,bid)
except rospy.ServiceException, e:
rospy.logwarn("Service did not process request: %s",e)
# Relay information to neighbour nodes!
neighbour_nodes_relay_list = auction_common.create_neighbour_nodes_list(auction_req)
if neighbour_nodes_relay_list:
# Prepare information
if auction_req.auction_data.command == 'join_auction':
role = 'be_buyer'
else:
role = 'none'
auction_type = 'sap'
sending_node = rospy.get_name()
auctioneer_node = auction_req.auctioneer_node
# updated nodes_collected
if rospy.has_param('/nodes_collected'):
nodes_collected = rospy.get_param('/nodes_collected')+','+rospy.get_name()
rospy.set_param('/nodes_collected',nodes_collected)
else:
nodes_collected = rospy.get_param('~neighbour_nodes_list')
auction_data = auction_req.auction_data
for node in neighbour_nodes_relay_list:
# prepare neighbours to be buyers
service_path = node+'/auction_config_server'
rospy.wait_for_service(service_path)
neighbour_node_auction_config_server = rospy.ServiceProxy(service_path,
auction_srvs.srv.AuctionConfigService)
try:
neighbour_node_auction_config_server_resp = neighbour_node_auction_config_server(role,auction_type,sending_node)
except rospy.ServiceException, e:
rospy.logwarn("[%s] Service call failed: %s",rospy.get_name(),e)
# send the auction information to the buyer node
service_path = node+'/buyer_server'
rospy.wait_for_service(service_path)
buyer_service = rospy.ServiceProxy(service_path, auction_srvs.srv.BuyerService)
try:
buyer_server_resp = buyer_service(auctioneer_node,sending_node,nodes_collected,auction_data)
except rospy.ServiceException, e:
rospy.logwarn("[%s] Service call failed: %s",rospy.get_name(),e)
# return best bid
return {'response_info': 'valid'+rospy.get_name()}
|
10,485 | 7dcf4c203debfd0eee120597d760a799daf074c6 | #%% load dataset
import numpy as np
import DL
# change the directory
Label_Train, Features_Train, Label_Test, Features_Test = DL.ReadFile("H:\\4th comp\\NN\\cifar-10-batches-py")
# features dimensions (m, c, h, w)
#%% training
batch_size = 128
num_epochs = 20
num_classes = 10
hidden_units = 100
input_dimensions = (32, 32, 3)
# change each label from scaler value to vector( 2 ---> [0, 0, 1, 0, 0, ...] ) (hot one)
Label_Train_hotone = DL.hot_one(Label_Train, num_classes)
model = DL.model()
model.input_dims(input_dimensions)
model.add('flatten')
model.add('Relu', hidden_units)
model.add('Relu', hidden_units)
model.add('Linear', num_classes)
optim = DL.optimizer('gd',0.001)
loss_fn = DL.loss_Function('SoftmaxCrossEntropy')
loss_fn.setLambda(0)
model.fit(Features_Train, Label_Train_hotone,
batch_size, num_epochs, optim, loss_fn)
#%% testing
# test on the same trained data set
predicted_labels = np.argmax(model.predict(Features_Train), axis=0)
accuracy = DL.accuracy(predicted_labels, Label_Train)
print("Accuracy of training dataset = {:.2f}%".format(accuracy*100))
# test on the test data set
predicted_labels = np.argmax(model.predict(Features_Test), axis=0)
accuracy = DL.accuracy(predicted_labels, Label_Test)
print("Model Accuracy = {:.2f}%".format(accuracy*100))
#%% store and load model
# DL.store(model, "FC CIFAR model") # store
# model = DL.load("FC CIFAR model") # load
|
10,486 | 2d080d53d3f88bcb1a4cfe8040fe015e950b8b54 | import socket, json, sqlite3, threading, time
from datetime import datetime
RegCount = 12
#bytesToSend = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
bytesToSend = (255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255)
#bytesToSend = (170, 170, 170, 170, 170, 170, 170, 170, 170, 170, 170, 170)
#bytesToSend = (255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0)
buf = bytearray(3+RegCount)
buf[0] = 0x5A
buf[1] = RegCount
sum = buf[1]
sss = ''.rjust(8*RegCount, '1')
for i in range(RegCount):
p1 = 8*i
p2 = p1+8
a = sss[p1:p2]
b = int(a,2)
buf[i+2] = b
buf[i+2] = bytesToSend[i]
sum += buf[i+2]
buf[-1] = sum % 256
sock = socket.socket()
sock.settimeout(1)
ip = '192.168.2.77'
port = 7850
#ip = 'localhost'
#port = 6116
try:
sock.connect((ip, port))
sock.send(buf)
except socket.error as msg:
print('watchdog is offline')
sock.close()
sock = None
print('Ok') |
10,487 | a7cec4b8152740086e40d37c303eccab4e485641 | # Generated by Django 3.1 on 2020-10-29 13:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0046_stream'),
]
operations = [
migrations.AlterField(
model_name='stream',
name='ss',
field=models.CharField(default='', max_length=5000),
),
]
|
10,488 | 6c4db2f9f02bd3d1975509bd47efe0067da03735 | from functools import reduce
numTestCases = int(input())
def go():
global opt,p,s
numParties = int(input())
parties = []
for i in range(numParties):
si, pi = [int(x) for x in input().split(' ')]
pi = float(pi)/100
parties.append((pi,si))
p,s = zip(*parties) # arranged in descending order of p
opt = [[None]*151 for p in range(numParties)]
# DYNAMIC PROGRAMMING
for i in range(numParties):
for S in range(50):
ssi = max(S-s[i], 0)
if S <= s[i] and i > 0:
opt[i][S] = max(opt[i-1][S],opt[i-1][0]*p[i],p[i] )
elif i > 0: # S > s[i]
opt[0][S] = max(opt[i-1][S],opt[i-1][S-s[i]]*p[i])
else: # i = 0 and any S
opt[i][S] = 0 if s[0] < S else p[0]
print(opt[-1][50])
for i in range(numTestCases):
go()
|
10,489 | 53c2b4434cd9b843bab99d1a48e36942f5d35a09 | #!/usr/bin/python3
# ------------------------------------------------
# Honas state rotation script. Run regularly to
# automatically archive Honas state information.
# ------------------------------------------------
import datetime
import glob
import os
import argparse
import shutil
import logging
import logging.handlers
HONAS_STATE_DIR = "/var/spool/honas"
HONAS_CONFIG_FILE = "/etc/honas/gather.conf"
HONAS_DATA_ARCHIVE_DIR = "/data"
HONAS_ROTATION_FILE = HONAS_DATA_ARCHIVE_DIR + "/.honas_state_rotation"
HONAS_COMBINE_BIN = "/home/gijs/honas/build/honas-combine"
HONAS_INFO_BIN = "/home/gijs/honas/build/honas-info"
# Parse input arguments.
parser = argparse.ArgumentParser(description='Honas state archiving, rotation and merging tool')
parser.add_argument('-v', action='store_true', dest='verbose', help='Verbose output')
results = parser.parse_args()
# Initialize Syslog.
log = logging.getLogger('honas_state_rotate')
log.setLevel(logging.DEBUG)
handler = logging.handlers.SysLogHandler(address = '/dev/log')
formatter = logging.Formatter('%(module)s: %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
if results.verbose:
log.debug("Performing state rotation...")
# Calculate the number of state files required for a full day.
state_interval = 0
with open(HONAS_CONFIG_FILE, 'r') as conf_file:
for entry in conf_file.read().split('\n'):
if entry.find("period_length") != -1:
state_interval = int(entry[len("period_length") + 1:len(entry)])
break
required_state_files = int(86400 / state_interval)
completed_states = {}
state_files = {}
if results.verbose:
log.debug("State interval is " + str(state_interval) + ", " + str(required_state_files) + " states required for daily rotation")
# Get all available state files.
for filename in glob.iglob(HONAS_STATE_DIR + "/*.hs"):
if results.verbose:
log.debug("Found state file: " + filename)
state_date = datetime.datetime.strptime(os.path.basename(filename).replace(".hs", ""), "%Y-%m-%dT%H:%M:%S")
state_date_simplified = datetime.datetime.strftime(state_date, "%d-%m-%Y")
# Create state-count mapping for easy completion check.
if state_date_simplified in completed_states:
completed_states[state_date_simplified] += 1
else:
completed_states[state_date_simplified] = 1
# Store the state file name with the mapping for reference.
state_files[filename] = state_date_simplified
states_for_rotation_file = []
# Loop over all states and check which are completed.
for k, v in completed_states.items():
if v >= required_state_files:
# This state is completed, we can archive and merge.
if results.verbose:
log.debug("Daily state for " + k + " is completed!")
# Create new folder for archive in data directory.
new_state_archive = HONAS_DATA_ARCHIVE_DIR + "/" + k
try:
os.mkdir(new_state_archive)
if results.verbose:
log.debug("Created archive directory " + new_state_archive)
except OSError:
log.debug("Failed to create archive directory: directory exists!")
continue
# Move all state files that apply to this directory.
moved = 0
dest_state = ""
for s, t in state_files.items():
if k == t:
basefile = os.path.basename(s)
shutil.move(s, new_state_archive + "/" + basefile)
if not dest_state:
dest_state = basefile
moved += 1
if results.verbose:
log.debug("Moved state file " + s + " to archive directory " + new_state_archive)
# Set this completed state to be written to the rotation file.
states_for_rotation_file.append(k)
# Check if we actually rotated some states.
if len(states_for_rotation_file) > 0:
if results.verbose:
log.debug("Writing out " + str(len(states_for_rotation_file)) + " rotation entries to " + HONAS_ROTATION_FILE)
# Write out rotation file for the Honas state merging script. We write all completed state dates to it.
with open(HONAS_ROTATION_FILE, 'w') as rotation_file:
for rot in states_for_rotation_file:
rotation_file.write(rot + '\n')
|
10,490 | e60fb6412fecc8d0b4978799a6b6fd822d12c533 | #!/usr/bin/env python
__author__ = "Pedro Heleno Isolani"
__copyright__ = "Copyright 2019, QoS-aware WiFi Slicing"
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Pedro Heleno Isolani"
__email__ = "pedro.isolani@uantwerpen.be"
__status__ = "Prototype"
" Python script for making graphs from CSV output"
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
def draw_line_graph_with_multiple_y_axis(filename=None, directory=None, title=None,
x_axis=None, y1_axis=None, y2_axis=None,
x_axis_label=None, y1_axis_label=None, y2_axis_label=None,
y1_stdev=None, y2_stdev=None):
# In case of font problems
# matplotlib.font_manager._rebuild()
# or remove ~/.cache/fontconfig/*
if [filename, title, x_axis, y1_axis, y2_axis] is not None:
# Applying Seaborn style
# whitegrid, darkgrid, whitegrid, dark, white, and ticks
sns.set(style="whitegrid", font='Times New Roman', palette='deep', font_scale=2, color_codes=True, rc=None)
# Reading data results
data_dict = read_results(filename=filename,
x_axis=x_axis,
y1_axis=y1_axis, y2_axis=y2_axis,
y1_stdev=y1_stdev, y2_stdev=y2_stdev)
# Plotting just the first values of the experiment (16, 8)
fig, host = plt.subplots(figsize=(20, 5), dpi=144)
# fig.subplots_adjust(right=0.75)
# Adjust x Axis
plt.tight_layout()
par1 = host.twinx()
# Offset the right spine of par2. The ticks and label have already been
# placed on the right by twinx above.
# Having been created by twinx, par2 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
# Add linestyle='None' in case of removing the lines
p1, = host.plot(data_dict['x_axis']['values'], data_dict['y1_axis']['values'], "b-", marker="D", markevery=1,
markersize=10, mfc='none', markeredgewidth=2, label=data_dict['y1_axis']['label'])
p2, = par1.plot(data_dict['x_axis']['values'], data_dict['y2_axis']['values'], "-g", marker="o", markevery=1,
markersize=10, mfc='none', markeredgewidth=2, label=data_dict['y2_axis']['label'])
axis_padding = 0.3 # percentage
# host.set_xlim(min(data_dict['x_axis']['values']),
# max(data_dict['x_axis']['values']))
host.set_xlim(1,
64)
# plt.xticks(np.arange(min(data_dict['x_axis']['values']), max(data_dict['x_axis']['values'])+1, 1.0))
host.set_ylim(0,
max(data_dict['y1_axis']['values']) +
(max(data_dict['y1_axis']['values']) * axis_padding))
par1.set_ylim(0,
max(data_dict['y2_axis']['values']) +
(max(data_dict['y2_axis']['values']) * axis_padding))
host.set_xlabel(x_axis_label)
host.set_ylabel(y1_axis_label)
par1.set_ylabel(y2_axis_label)
# host.yaxis.label.set_color(p1.get_color())
# par1.yaxis.label.set_color(p2.get_color())
# tkw = dict(size=4, width=1.5)
# host.tick_params(axis='y', colors=p1.get_color(), **tkw)
# par1.tick_params(axis='y', colors=p2.get_color(), **tkw)
lines = [p1, p2]
if [y1_stdev, y2_stdev] is not None:
host.errorbar(data_dict['x_axis']['values'], data_dict['y1_axis']['values'],
yerr=data_dict['y1_stdev']['values'], fmt='none', ecolor='b', capthick=3, capsize=5)
par1.errorbar(data_dict['x_axis']['values'], data_dict['y2_axis']['values'],
yerr=data_dict['y2_stdev']['values'], fmt='none', ecolor='g', capthick=3, capsize=5)
# Title of the graph
# plt.title(title)
plt.legend(lines, [l.get_label() for l in lines], loc='upper center', bbox_to_anchor=(0.5, 1.00),
ncol=2) # shadow=True)
plt.savefig(str(directory) + '/' + str(title) + '.pdf', format="pdf", bbox_inches="tight")
plt.savefig(str(directory) + '/' + str(title) + '.png', format="png", bbox_inches="tight")
plt.show()
def draw_stacked_lines_graph(filename=None, directory=None, title=None, fig_size=[10, 3.4]):
sns.set(style="whitegrid", font='Times New Roman', palette='deep', font_scale=1.5, color_codes=True, rc=None)
fig, host = plt.subplots(figsize=(fig_size[0], fig_size[1]), dpi=144)
# TODO: Read from results...
lista = {}
lista['index'] = [0, 1, 2, 3]
lista['basic'] = [0, 1, 2, 3]
lista['saving'] = [0, 1, 2, 3]
lista['money_mkt'] = [0, 1, 2, 3]
lista['credit'] = [0, 1, 2, 3]
# Adjust x Axis
plt.tight_layout()
plt.stackplot(lista['index'],
[lista['basic'], lista['saving'],
lista['money_mkt'], lista['credit']],
labels=['basic', 'saving', 'money_mkt', 'credit'],
alpha=0.8)
host.set_xlabel('Time (sec)')
host.set_ylabel('Queue Delay (ms)')
plt.legend(loc='upper center', fontsize='small', ncol=4)
plt.savefig(str(directory) + '/' + str(title) + '.pdf', format="pdf", bbox_inches="tight")
plt.savefig(str(directory) + '/' + str(title) + '.png', format="png", bbox_inches="tight")
plt.show()
def draw_line_graph_with_multiple_y_axis_and_files(filenames=None, title=None, directory=None,
x_axises=None, y1_axises=None, y2_axises=None, x_axis_label=None,
y1_axis_label=None, y2_axis_label=None, y1_expected=None, reformulate_xticks=None,
x_pos=None, x_labels=None, x_axis_limit=None, x_axis_start=None, y1_axis_limit=None, y2_axis_limit=None,
y1_stdevs=None, y2_stdevs=None, markers=None, fig_size=[10, 3.4],
log_scale_x=None, log_scale_y=None, annotation_label=None, annotation_xy=None):
# In case of font problems
# matplotlib.font_manager._rebuild()
if [filenames, title, x_axises, y1_axises, y2_axises] is not None:
# Applying Seaborn style
# whitegrid, darkgrid, whitegrid, dark, white, and ticks
sns.set(style="whitegrid", font='Times New Roman', palette='deep', font_scale=1.5, color_codes=True, rc=None)
#plt.rc('text', usetex=True)
#plt.rc('font', family='Times New Roman', weight='normal', size=14)
plt.rcParams['mathtext.fontset'] = 'stix'
# Reading data results
data_dict = []
for i in range(0, len(filenames)):
if all(param is not None for param in [y1_stdevs, y2_stdevs]):
data_dict.append(read_results(filename=filenames[i],
x_axis=x_axises[i],
y1_axis=y1_axises[i],
y2_axis=y2_axises[i],
y1_stdev=y1_stdevs[i],
y2_stdev=y2_stdevs[i]))
else:
if y1_expected is not None:
data_dict.append(read_results(filename=filenames[i],
x_axis=x_axises[i],
y1_axis=y1_axises[i],
y1_expected=y1_expected[i],
y2_axis=y2_axises[i]))
else:
data_dict.append(read_results(filename=filenames[i],
x_axis=x_axises[i],
y1_axis=y1_axises[i],
y2_axis=y2_axises[i]))
# Plotting just the first values of the experiment (16, 8)
fig, host = plt.subplots(figsize=(fig_size[0], fig_size[1]), dpi=144)
# fig.subplots_adjust(right=0.75)
# Adjust x Axis
plt.tight_layout()
par1 = host.twinx()
if log_scale_y is not None:
par1.set_yscale('log')
if log_scale_x is not None:
host.set_yscale('log')
# Offset the right spine of par2. The ticks and label have already been
# placed on the right by twinx above.
# Having been created by twinx, par2 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
lines = []
colors = ['darkblue', 'darkviolet', 'mediumblue', 'deeppink', 'dodgerblue', 'magenta']
for data in data_dict:
# Add linestyle='None' in case of removing the lines
# Add mfc='None' in case of no fill for markers
print(data)
if markers is None:
p1, = host.plot(data['x_axis']['values'], data['y1_axis']['values'], colors[0],
linewidth=2,
label=data['y1_axis']['label'])
if y1_expected is not None:
p3, = host.plot(data['x_axis']['values'], data['y1_expected']['values'], colors[4], linestyle="-.",
#linewidth=2,
label=r'$\sum_{s\in S^b}{W^s}, \mu^b_{MAX} = 1720$')
p2, = par1.plot(data['x_axis']['values'], data['y2_axis']['values'], colors[1], linestyle="--",
linewidth=2,
label=data['y2_axis']['label'])
else:
p1, = host.plot(data['x_axis']['values'], data['y1_axis']['values'], colors[0],
marker=markers[0], markevery=1, linewidth=2, markersize=8, mfc='none', markeredgewidth=1,
label=data['y1_axis']['label'])
p2, = par1.plot(data['x_axis']['values'], data['y2_axis']['values'], colors[1],
marker=markers[0], markevery=1, linewidth=2, markersize=8, markeredgewidth=1,
label=data['y2_axis']['label'])
markers.pop(0)
colors.pop(0)
colors.pop(0)
lines.append(p1)
if y1_expected is not None:
lines.append(p3)
lines.append(p2)
axis_padding = 0.3 # percentage
# host.set_xlim(min(data_dict['x_axis']['values']),
# max(data_dict['x_axis']['values']))
if x_axis_limit and x_axis_start:
host.set_xlim(x_axis_start, x_axis_limit)
elif x_axis_limit:
host.set_xlim(1, x_axis_limit)
else:
host.set_xlim(1, 64)
#plt.xticks(np.arange(0, 330, step=30))
if reformulate_xticks:
plt.xticks(x_pos, x_labels)
if annotation_label is not None:
if annotation_xy is not None:
par1.annotate(annotation_label,
xy=annotation_xy, xycoords='data',
xytext=(0.7, 0.95), textcoords='axes fraction',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right', verticalalignment='top')
#plt.xticks(np.arange(min(data_dict['x_axis']['values']), max(data_dict['x_axis']['values']), 1.0))
# host. set_ylim(0,
# max(data_dict['y1_axis']['values']) +
# (max(data_dict['y1_axis']['values'])*axis_padding))
# par1.set_ylim(0,
# max(data_dict['y2_axis']['values']) +
# (max(data_dict['y2_axis']['values']) * axis_padding))
if y1_axis_limit:
host.set_ylim(0, y1_axis_limit)
if y2_axis_limit:
par1.set_ylim(0, y2_axis_limit)
host.set_xlabel(x_axis_label)
host.set_ylabel(y1_axis_label)
par1.set_ylabel(y2_axis_label)
# host.yaxis.label.set_color(p1.get_color())
# par1.yaxis.label.set_color(p2.get_color())
# tkw = dict(size=4, width=1.5)
# host.tick_params(axis='y', colors=p1.get_color(), **tkw)
# par1.tick_params(axis='y', colors=p2.get_color(), **tkw)
# lines = [p1, p2, p3, p4]
colors = ['darkblue', 'darkviolet', 'mediumblue', 'deeppink', 'dodgerblue', 'magenta']
if all(param is not None for param in [y1_stdevs, y2_stdevs]):
for data in data_dict:
host.errorbar(data['x_axis']['values'], data['y1_axis']['values'],
yerr=data['y1_stdev']['values'], fmt='none', ecolor=colors[0], capthick=3, capsize=1)
par1.errorbar(data['x_axis']['values'], data['y2_axis']['values'],
yerr=data['y2_stdev']['values'], fmt='none', ecolor=colors[1], capthick=3, capsize=1)
colors.pop(0)
colors.pop(0)
# Title of the graph
# plt.title(title)
plt.legend(lines, [l.get_label() for l in lines], loc='upper left',
ncol=1) # shadow=True)
plt.savefig(str(directory) + '/' + str(title) + '.pdf', format="pdf", bbox_inches="tight")
plt.savefig(str(directory) + '/' + str(title) + '.png', format="png", bbox_inches="tight")
plt.savefig(str(directory) + '/' + str(title) + '.eps', format="eps", bbox_inches="tight")
plt.show()
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
def read_results(filename, x_axis, y1_axis, y2_axis, y1_expected=None, y1_stdev=None, y2_stdev=None):
# Common dict structure
data_dict = {'x_axis': {'label': '', 'values': []},
'y1_axis': {'label': '', 'values': []},
'y2_axis': {'label': '', 'values': []}
}
if all(param is not None for param in [y1_stdev, y2_stdev]):
data_dict['y1_stdev'] = {'label': '', 'values': []}
data_dict['y2_stdev'] = {'label': '', 'values': []}
if y1_expected is not None:
data_dict['y1_expected'] = {'label': '', 'values': []}
# Filename definition
df = pd.read_csv(filename, sep=',', header=0)
# Headers definition
header_names = {'x_axis': x_axis,
'y1_axis': y1_axis,
'y2_axis': y2_axis
}
if all(param is not None for param in [y1_stdev, y2_stdev]):
header_names['y1_stdev'] = y1_stdev
header_names['y2_stdev'] = y2_stdev
if y1_expected is not None:
header_names['y1_expected'] = y1_expected
# Populating with the header fields
for header_value in df.columns.values:
for key, value in header_names.items():
if value in header_value:
data_dict[key]['label'] = header_value
data_dict[key]['values'] = []
# Populating with the values
for index, row in df.iterrows():
for key, value in data_dict.items():
data_dict[key]['values'].append(row[value['label']])
return data_dict
|
10,491 | b6bc2cc268adeb29cf8d916661c13141d4b1f8e9 | """
Script to Query ArangoDB. See README.md.
Author: Volker Hoffman <volker.hoffmann@sintef.no>
Update: 06 June 2018
"""
from __future__ import print_function
import pyArango.connection
import json
import time
import argparse
def connect_to_database():
conn = pyArango.connection.Connection(arangoURL='http://192.168.1.45:8529')
db = conn['_system']
return db
def read_campaign_keys_from_json(fname):
with open(fname, 'r') as f:
json_of_keywords = json.load(f)
campaign_keys = []
for json_of_keyword in json_of_keywords:
campaign_keys.append(int(json_of_keyword['key']))
return campaign_keys
def read_query_from_file(fname):
with open(fname, 'r') as f:
lines = f.readlines()
qry = ''.join(lines)
return qry
# run query against database
# for Bind Parameters (aka bindVars), see
# https://github.com/tariqdaouda/pyArango#queries--aql
# https://docs.arangodb.com/3.3/AQL/Fundamentals/BindParameters.html
# do note the peculiarities about string processing
# also see query.aql
def run_query(db, qry, campaign_key=546426629):
queryResult = db.AQLQuery(qry, rawResults=True, batchSize=64, count=True, \
bindVars={'campaign_key': campaign_key})
return queryResult
def query_to_result_list(queryResult):
t0 = time.time()
result_list = []
for ii, qq in enumerate(queryResult):
if ii % 5000 == 0:
t1 = time.time()
print("** Fetched %07d/%07d Records (%.1fs Elapsed)" % \
(ii, queryResult.count, t1-t0))
result_list.append(qq)
t1 = time.time()
print("** Fetched %06d/%06d Records (%.1fs Elapsed)" % \
(ii+1, queryResult.count, t1-t0))
return result_list
# write query output to file
def result_list_to_file(result_list, fname):
with open(fname, 'w') as f:
json.dump(result_list, f)
print('// Parsing Arguments')
parser = argparse.ArgumentParser()
parser.add_argument('--keyfile', default='1m10small.json', \
help='Name of JSON File w/ Campaign Keys.')
args = parser.parse_args()
print('// Connecting to Database')
db = connect_to_database()
print("// Loading Campaign Keys (%s)" % args.keyfile)
campaign_keys = read_campaign_keys_from_json(args.keyfile)
print('** List of Requested Keys is:')
print(campaign_keys)
print('// Looping Over Campaign Keys')
print('')
for campaign_key in campaign_keys:
t00 = time.time()
print("**** Running Query/Fetch/Save for Campaign Key %i" % campaign_key)
print(">>>> %s UTC" % time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()))
print('// Loading Query')
qry = read_query_from_file('query.aql')
print('** Query:')
print(qry)
print("// Excecuting Query for Campaign Key %i" % campaign_key)
queryResult = run_query(db, qry, campaign_key=campaign_key)
print("** Query Returned %i Records" % queryResult.count)
if queryResult.response['cached'] == True:
print("** Query Was Cached")
else:
print("** Query Took %.2f Seconds" % \
queryResult.response['extra']['stats']['executionTime'])
print('// Fetching Resulting. Wallclock Ticking.')
result_list = query_to_result_list(queryResult)
fname = "result_%i.json" % campaign_key
print("// Writing Result to File (%s)" % fname)
t0 = time.time()
result_list_to_file(result_list, fname)
t1 = time.time()
print("** Took %.1fs" % (t1-t0))
print("**** Finished for Campaign Key %i" % campaign_key)
print("**** Round-Trip Time %.1fs" % (time.time() - t00))
print(">>>> %s UTC" % time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()))
print('')
print('!! Done')
|
10,492 | 95942c9e825639385c7d4b1f73ee615215053478 |
"""Services Module."""
import shell
def run_service_command(serviceName, command='status'):
"""Run Service Command."""
command = "sudo service %s %s" % (serviceName, command)
shell.run_shell_cmd(command)
def stop_service(serviceName):
"""Stop Service."""
run_service_command(serviceName, 'stop')
def start_service(serviceName):
"""Start Service."""
run_service_command(serviceName, 'start')
|
10,493 | 62273a72af52cea2659a7139fc702a49cd05d4d9 | #!/usr/bin/env python
import os
from setuptools import setup
with open('README.rst', encoding='utf-8') as readme_file:
readme = readme_file.read()
try:
# Might be missing if no pandoc installed
with open('CHANGELOG.rst', encoding='utf-8') as history_file:
history = history_file.read()
except IOError:
history = ""
def read_requirements(ext: str = 'in'):
with open(os.path.join('requirements', f'base.{ext}'), encoding='utf-8') as fp:
lines = [line.split('#', 1)[0].strip()
for line in fp]
# drop empty lines:
return [line
for line in lines
if line and not line.startswith('#')]
def get_requirements(locked: bool):
requirements = read_requirements('txt' if locked else 'in')
if os.name == 'nt':
# sh predecessor working under Windows:
requirements.append('pbs')
else:
requirements.extend(['sh<2'])
return requirements
setup(
name='kibitzr',
version='7.0.5',
description="Self hosted web page changes monitoring",
long_description=readme + '\n\n' + history,
author="Peter Demin",
author_email='kibitzrrr@gmail.com',
url='https://github.com/kibitzr/kibitzr',
packages=[
'kibitzr',
],
package_dir={
'kibitzr': 'kibitzr',
},
entry_points={
'console_scripts': [
'kibitzr=kibitzr.cli:extended_cli'
]
},
include_package_data=True,
license="MIT license",
zip_safe=False,
keywords='kibitzr',
classifiers=[
'Development Status :: 6 - Mature',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
python_requires='>=3.8',
test_suite='tests',
install_requires=get_requirements(locked=False),
extras_require={
'locked': get_requirements(locked=True),
},
setup_requires=['pytest-runner'],
tests_require=[
'pytest',
'pytest-pep8',
'pylint',
'mock',
'pytest-mock',
],
)
|
10,494 | b6774fa2338acc8cf754dc1cd1511743236c9b17 | #coding=utf8
import sys,os,os.path
reload(sys)
sys.setdefaultencoding('utf8')
from doclib import doclib
from numpy import *
import nmf
def getarticlewords():
dl=doclib('data/doclib/')
dl.load()
return dl.allwords,dl.articlewords,dl.articletitles
def makematrix(allw,articlew):
wordvec=[]
for w,c in allw.items():
#if c>2 and c<len(articlew)*0.6:
if c>1:
wordvec.append(w)
ll=[[(word in f and f[word] or 0) for word in wordvec ] for f in articlew ]
return ll,wordvec
def showfeatures(w,h,titles,wordvec,out="data/features.txt"):
outfile=file(out,'w')
pc,wc=shape(h)
toppatterns=[[] ] * len(titles)
patternnames=[]
for i in range(pc):
slist=[]
for j in range(wc):
slist.append((h[i,j],wordvec[j]))
slist.sort(reverse=True)
n=[s[1] for s in slist[:6]]
outfile.write( " ".join(n)+'\n')
patternnames.append(n)
flist=[]
for j in range(len(titles)):
flist.append((w[j,i],titles[j]))
toppatterns[j].append((w[j,i],i,titles[j]))
flist.sort(reverse=True)
for f in flist[:5]:
outfile.write("%f %s\n" % (f[0],f[1]))
outfile.write('\n')
return toppatterns,patternnames
def showarticles(titles,toppatterns,patternnames,out='data/articles.txt'):
outfile=open(out,'w')
for j in range(len(titles)):
outfile.write(titles[j]+'\n')
toppatterns[j].sort(reverse=True)
for i in range(3):
outfile.write( "%f %s\n" % (toppatterns[j][i][0], " ".join(patternnames[toppatterns[j][i][1]])) )
outfile.write('\n')
if __name__=='__main__':
allw,artw,artt= getarticlewords()
wordmatrix,wordvec=makematrix(allw,artw)
print wordvec[0:10]
print wordmatrix[1][0:10]
v=matrix(wordmatrix)
weights,feat=nmf.factorize(v,pc=5,iter=100)
topp,pn=showfeatures(weights,feat,artt,wordvec)
showarticles(artt,topp,pn)
|
10,495 | 48d91936f900dadae69629e1b48d581c32f47534 | #!/usr/bin/env python3
""""
Model that uses standard machine learning method - linear regression
It can be used for whole molecules or their fragments, but it has to be consistent.
If we use fragments then the similarity is computed as average similarity of the descriptors.
input model_configuration should look like this:
{"model_name": "linear_regression_model", "fragments": "ecfp.6", "molecules": 0/1}
where 0 means that we use fragments, 1 means we use molecules
"""
import json
from sklearn import linear_model
from model_interface import IModel
from model_factory import register_model
import inputoutput_utils
class LinearRegressionModel(IModel):
model_name = "linear_regression_model"
def name(self):
return self.model_name
def create_model(self, active_fragments: str, inactive_fragments: str,
active_descriptors: str, inactive_descriptors: str,
model_configuration: dict):
act_descriptors = extract_descriptors(active_descriptors, model_configuration)
inact_descriptors = extract_descriptors(inactive_descriptors, model_configuration)
model = {
"configuration": model_configuration,
"data": {
"active": act_descriptors,
"inactive": inact_descriptors
}
}
return model
def save_to_json_file(self, output_file: str, model: dict):
inputoutput_utils.save_to_json_file(output_file, model)
def score_model(self, model_configuration: dict, fragments_file: str,
descriptors_file: str, output_file: str):
inputoutput_utils.create_parent_directory(output_file)
reg = linear_model.LinearRegression()
# get activity list
actives = [1 for i in range(len(model_configuration["data"]["active"]))]
inactives = [0 for i in range(len(model_configuration["data"]["inactive"]))]
activity = actives + inactives
reg.fit(model_configuration["data"]["active"] + model_configuration["data"]["inactive"],
activity)
test_descriptors = extract_descriptors(descriptors_file, model_configuration["configuration"])
molecule_file = int(model_configuration["configuration"]["molecules"])
prediction = (reg.predict(test_descriptors))
if molecule_file == 1:
first_line = True
with open(output_file, "w", encoding="utf-8") as output_stream:
with open(fragments_file, "r", encoding="utf-8") as input_stream:
for num_line, new_line in enumerate(input_stream):
line = json.loads(new_line)
score = {
"name": line["name"],
"score": prediction[num_line]
}
if first_line:
first_line = False
else:
output_stream.write("\n")
json.dump(score, output_stream)
else:
num_of_fragment = [0]
names_of_molecules = []
with open(fragments_file, "r", encoding="utf-8") as fragments_stream:
suma = 0
for new_line in fragments_stream:
line = json.loads(new_line)
fragment_length = len(line["fragments"])
suma += fragment_length
num_of_fragment.append(suma)
names_of_molecules.append(line["name"])
first_line = True
with open(output_file, "w", encoding="utf-8") as output_stream:
for i in range(len(num_of_fragment) - 1):
prediction_of_molecule = prediction[num_of_fragment[i]:num_of_fragment[i+1]]
sim = sum(prediction_of_molecule) / len(prediction_of_molecule)
score = {
"name": names_of_molecules[i],
"score": sim
}
if first_line:
first_line = False
else:
output_stream.write("\n")
json.dump(score, output_stream)
def extract_descriptors(input_file: str, model_configuration: dict) -> list:
descriptors = []
with open(input_file, "r", encoding="utf-8") as stream:
line = stream.readline()
line_parts = line.split(",")
if (((line_parts[1] == "index") & (int(model_configuration["molecules"]) == 1)) |
((line_parts[1] != "index") & (int(model_configuration["molecules"]) == 0))):
print("Wrong input")
exit(1)
for line in stream:
line_parts = line.split(",")
descriptors.append(list(map(float, line_parts[1:])))
return descriptors
register_model(LinearRegressionModel.model_name, lambda: LinearRegressionModel())
|
10,496 | 10f0d1eee2cf39fc6e07662c6efc230020daa10b | from pymongo import MongoClient
from bson.raw_bson import RawBSONDocument
from bson.codec_options import CodecOptions
import requests
import json
client = MongoClient()
codec_options = CodecOptions(document_class=RawBSONDocument)
client = MongoClient('mongodb://localhost:27017')
db = client['jacaranda-db']
def propertiesLatLog(collection):
mydoc = collection.aggregate([{
"$group": {
"_id": {
"_id": "$_id",
"country": "$country",
"district": "$district"
}
}
}, {
"$project": {
"_id": 1,
"country": 1,
"district": 1
}
}])
res = [sub['_id'] for sub in mydoc]
for r in res:
dist = r['district']
dist = dist.replace(" ", "+")
country = r['country']
api_key = 'AIzaSyAy1Z3e2qtLg7IvpEiMcObLfHUH9HrWcYE'
url = 'https://maps.googleapis.com/maps/api/geocode/json?address=' + \
dist+',+'+country+',+'+'&key='+api_key
res_ob = requests.get(url)
latlongJson = res_ob.json()
resu = latlongJson['results']
searchQuery = {"_id": r["_id"]}
for latlong in resu:
updateQuery = {"$set": {"property_location": latlong['geometry']}}
# to update the collection to specific property
x = collection.find_one_and_update(searchQuery, updateQuery)
print(list(x))
collection = db.formatted_properties
propertiesLatLog(collection)
|
10,497 | 597bda36405f8e362256e108a5ad017fd8cd0ce8 |
from cgml.constants import SCHEMA_IDS as SID
from cgml.validators import validateSchema
def makeSchema(n_in=None,
n_out=None,
nLayers=1,
inputDropRate=2,
modelType=None,
costFunction=None,
activationFunction="tanh",
useDropout=True):
last_n_in = n_in
layers = []
inputDropoutRate = (0.2 if useDropout else 0.0)
dropoutRate = (0.5 if useDropout else 0.0)
if nLayers > 1:
for i in range(nLayers - 1):
curr_n_out = int(round(last_n_in / inputDropRate))
if curr_n_out <= n_out:
curr_n_out = n_out
layer = {SID.LAYER_NAME: "hidden{0}".format(i),
SID.LAYER_N_IN: last_n_in,
SID.LAYER_N_OUT: curr_n_out,
SID.LAYER_ACTIVATION: activationFunction,
SID.LAYER_DROPOUT: (inputDropoutRate if i == 0 else dropoutRate)}
layers.append(layer)
last_n_in = curr_n_out
# No dropout with nLayers == 1, which is linear model
lastLayer = {SID.LAYER_NAME: "output",
SID.LAYER_N_IN: last_n_in,
SID.LAYER_N_OUT: n_out,
SID.LAYER_ACTIVATION: "linear",
SID.LAYER_DROPOUT: (0.0 if nLayers == 1 else dropoutRate)}
layers.append(lastLayer)
schema = {SID.DESCRIPTION: "schema by maker",
SID.MODEL_TYPE: modelType,
SID.SUPERVISED_COST: {SID.COST_NAME: "output",
SID.COST_TYPE: costFunction},
SID.GRAPH: layers}
validateSchema(schema)
return schema
|
10,498 | b3b25c18a4af5c6d83c0f935aba1b074519b15a3 | from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth import login, authenticate
from django.contrib.auth.views import logout
from django.contrib.auth.mixins import PermissionRequiredMixin
from .forms import StudentRegisterForm, TeacherRegisterForm, NewGroupForm, NewAssignmentForm
from .models import Student, Teacher, Classroom, Group, Assignment, StudentAssignment, User
from django.views.generic import ListView, DetailView, CreateView, FormView, TemplateView
# Create your views here.
def index(request):
return render(request, 'content.html')
# def register(request):
# if request.method == 'POST':
# form = NewUserRegistration(request.POST)
# if form.is_valid():
# form.save()
# username = form.cleaned_data.get('username')
# raw_password = form.cleaned_data.get('password1')
# # first_name = form.cleaned_data.get('first_name')
# # last_name = form.cleaned_data.get('last_name')
# # email = form.cleaned_data.get('email')
# user = authenticate(username=username, password=raw_password)
# if form.cleaned_data.get('student_or_teacher') == 's':
# new_student = Student.objects.create(user=user)
# new_student.save()
# elif form.cleaned_data.get('student_or_teacher') == 't':
# new_teacher = Teacher.objects.create(user=user)
# new_teacher.save()
# login(request, user)
# return redirect('index')
# else:
# form = NewUserRegistration()
# return render(request, 'register_form.html', {'form': form})
class RegisterView(TemplateView):
template_name = 'registration/register.html'
class StudentRegisterView(CreateView):
model = User
form_class = StudentRegisterForm
template_name = 'registration/register_form.html'
def form_valid(self, form):
user = form.save()
login(self.request, user)
return redirect('/')
class TeacherRegisterView(CreateView):
model = User
form_class = TeacherRegisterForm
template_name = 'registration/register_form.html'
def form_valid(self, form):
user = form.save()
login(self.request, user)
return redirect('/')
def logoout_view(request):
logout(request)
class ClassroomListView(ListView):
model = Classroom
context_object_name = 'classroom_list'
class ClassroomDetailView(DetailView):
model = Classroom
context_object_name = 'classroom'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['lessons'] = self.get_object().schoollesson_set.all()
return context
class TeacherListView(ListView):
model = Teacher
context_object_name = 'teacher_list'
class GroupListView(ListView):
model = Group
context_object_name = 'group_list'
class GroupDetailView(DetailView):
model = Group
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['student_list'] = self.get_object().students.all()
return context
class GroupCreateView(CreateView):
model = Group
form_class = NewGroupForm
class AssignmentListView(ListView):
model = Assignment
def get_queryset(self):
slug = self.kwargs.get('slug', self.request.GET.get('slug'))
queryset = Assignment.objects.filter(group=Group.objects.get(slug=slug))
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['group_slug'] = self.kwargs.get('slug', self.request.GET.get('slug'))
return context
class AssignmentDetailView(DetailView):
model = Assignment
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['student_assignments'] = StudentAssignment.objects.filter(assignment=self.get_object())
return context
#
class AssignmentCreateView(CreateView):
model = Assignment
form_class = NewAssignmentForm
def get_initial(self):
group_slug = self.kwargs.get('slug', self.request.POST.get('slug'))
group = get_object_or_404(Group, slug=group_slug)
return {'group_slug': group_slug}
|
10,499 | f8512db4b717612e06515a617592384039aec9ad | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, chardet, codecs, math, pymongo, Queue, os, re
from foofind.utils import u, logging
from threading import Thread
from multiprocessing import Pool
class EntitiesFetcher(Thread):
def __init__(self, server, results):
super(EntitiesFetcher, self).__init__()
self.daemon = True
self.server = server
self.results = results
self.requests = Queue.Queue()
def run(self):
gconn = None
not_found_count = 0
with open("nf_ntts.csv", "w") as not_found_ntts:
while True:
# obtiene peticiones de buscar entidades
afile = self.requests.get(True)
if afile is None:
self.requests.task_done()
break
if not gconn:
gconn = pymongo.Connection(self.server, slave_okay=True)
try:
# busca la entidad principal
main_ntt_id = int(afile["se"]["_id"])
ntt = gconn.ontology.ontology.find_one({"_id":main_ntt_id})
ntts1_info = set()
ntts2_info = set()
if ntt:
afile["se"]["info"] = ntt
# busca entidades de primer y segundo nivel
if "r" in ntt and ntt["r"]:
# genera la lista de entidades y tipos de relacion de primer nivel
ntts1_info = {(ntt_id, relation[:3])
for relation, relation_ids in ntt["r"].iteritems()
for ntt_id in relation_ids if ntt_id!=main_ntt_id}
# si hay entidades de primer nivel...
if ntts1_info:
# obtiene entidades de primer nivel
ntts1_ids = [ntt_id for ntt_id, relation in ntts1_info]
ntts1 = list(gconn.ontology.ontology.find({"_id":{"$in":ntts1_ids}}))
# genera la lista de entidades y tipos de relacion de segundo nivel
ntts1_ids.append(main_ntt_id) # añade el id de la relacion para usar la lista como filtro
ntts2_info = {(ntt_id, relation[:3])
for ntt2 in ntts1 if "r" in ntt2
for relation, relation_ids in ntt2["r"].iteritems()
for ntt_id in relation_ids if ntt_id not in ntts1_ids}
afile["se"]["rel"] = (ntts1_info, ntts2_info)
else:
not_found_ntts.write(str(afile["_id"])+"\n")
not_found_count += 1
del afile["se"]["_id"]
except BaseException:
ntt_id = str(afile["se"]["_id"]) if "_id" in afile["se"] else "???"
del afile["se"]["_id"]
gconn.close()
gconn = None
logging.exception("Error obtaining entities for file %s: %s."%(str(afile["_id"]), ntt_id))
self.results.put(afile)
self.requests.task_done()
if not_found_count:
logging.warn("Entities not found for some files. Check file nf_ntts.csv.")
class FilesFetcher(Thread):
def __init__(self, server, entities_server, filter, batch_size, stop_set, stop_set_len, last_count, processes):
super(FilesFetcher, self).__init__()
self.daemon = True
self.server = server
self.batch_size = batch_size
self.results = Queue.Queue(batch_size*processes)
self.filter = filter
self.complete = False
self.entities = EntitiesFetcher(entities_server, self.results)
self.stop_set = stop_set
self.stop_set_len = stop_set_len
self.total_count = self.last_count = last_count
def run(self):
self.complete = False
gconn = pymongo.Connection(self.server, slave_okay=True)
gdb = gconn.foofind
gfoo = gdb.foo
self.entities.start()
cursor = gfoo.find(self.filter, timeout=False).batch_size(self.batch_size)
if self.stop_set_len:
cursor = cursor.sort([("$natural",pymongo.DESCENDING)])
new_stop_set = set()
must_stop = add_to_stop_set = self.stop_set_len
self.total_count = gfoo.count()
count_limit = max(0,self.total_count-self.last_count)
hard_limit = -100 - int(count_limit/1000.) # limite duro: 1 borrado cada mil ficheros más 100 fijos
for f in cursor:
if not 's' in f:
f['s'] = 9
if self.stop_set_len:
# construye el nuevo stop set
if add_to_stop_set:
new_stop_set.add(str(f["_id"]))
add_to_stop_set -= 1
# comprueba el stop set actual
if str(f["_id"]) in self.stop_set:
must_stop-=1
if must_stop==0:
break
else:
continue
# limite por cantidad de ficheros
count_limit += 1
# para si ya ha recorrido el numero probable de ficheros y ha visto alguno del conjunto de parada
# o si ha visto más del número limite de ficheros
if count_limit<0 and must_stop<self.stop_set_len or count_limit<hard_limit:
if add_to_stop_set and self.stop_set:
new_stop_set.update(self.stop_set)
break
if "se" in f and f["se"]:
self.entities.requests.put(f)
else:
self.results.put(f)
self.entities.requests.put(None)
self.entities.requests.join()
# actualiza el nuevo stop set
if self.stop_set_len:
self.stop_set = new_stop_set
self.complete = True
def __iter__(self):
return self
def next(self):
while True:
if self.results.empty() and self.complete:
raise StopIteration
try:
return self.results.get(True, 3)
except:
pass
space_join = " ".join
XML_ILLEGAL_CHARS_RE = re.compile(u'[\x00-\x08<>\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]')
def tag(_name, _children=None, separator="", children_type=None, **kwargs):
if _children is False:
return u""
else:
attr = (" " + space_join('%s="%s"' % (key, u(val)) for key, val in kwargs.iteritems() if val)) if kwargs else ""
if _children:
if children_type is list:
return u"<%s%s>%s</%s>" % (_name, attr, separator.join(_children), _name)
elif children_type is unicode:
escaped_children = space_join(XML_ILLEGAL_CHARS_RE.split(u(_children)))
if "&" in escaped_children:
return u"<%s%s><![CDATA[%s]]></%s>" % (_name, attr, escaped_children, _name)
else:
return u"<%s%s>%s</%s>" % (_name, attr, escaped_children, _name)
elif children_type is str:
return u"<%s%s>%s</%s>" % (_name, attr, _children, _name)
elif children_type is float:
return u"<%s%s>%.8f</%s>" % (_name, attr, _children, _name)
else:
return u"<%s%s>%s</%s>" % (_name, attr, unicode(_children), _name)
else:
return u"<%s%s/>" % (_name, attr)
def set_globals(fields, attrs, init_file, stats_file):
setattr(sys.modules[__name__], "init_file", init_file)
setattr(sys.modules[__name__], "stats_file", stats_file)
setattr(sys.modules[__name__], "items", [(item["name"], item["field"], item["field_type"]) for item in fields+attrs])
def generate_file(args):
file_id, afile = args
try:
if not init_file(afile): return None, None
doc = [tag(n, afile[f] if f and f in afile and afile[f] else False, children_type=t, separator=",") for n,f,t in items]
return tag("sphinx:document", doc, id=file_id, children_type=list), afile
except BaseException as e:
logging.exception("Error processing file %s.\n"%str(afile["_id"]))
return None, e
outwrite = None
generate_id = None
class XmlPipe2:
def __init__(self, processes, fields, attrs, stats, gen_id):
global outwrite, generate_id
outwrite = codecs.getwriter("utf-8")(sys.stdout).write
self.processes = processes
self.fields = fields
self.attrs = attrs
self.stats = stats
self.pool = Pool(processes=processes) if processes>1 else None
self.count = 0
generate_id = gen_id
def generate_header(self):
outwrite(u"<?xml version=\"1.0\" encoding=\"utf-8\"?><sphinx:docset><sphinx:schema>")
outwrite(u"".join(tag("sphinx:field", name=f["name"]) for f in self.fields))
outwrite(u"".join(tag("sphinx:attr", name=a["name"], type=a["type"], bits=a.get("bits"), default=a.get("default")) for a in self.attrs))
outwrite(u"</sphinx:schema>")
def generate_footer(self):
outwrite(u"</sphinx:docset>")
def generate(self, server, entities_server, part, afilter, batch_size, stop_set=None, stop_set_len=0, last_count=None, headers=True):
ff = FilesFetcher(server, entities_server, afilter, batch_size, stop_set, stop_set_len, last_count, self.processes)
ff.start()
if headers: self.generate_header()
count = error_count = 0
logging.warn("Comienza indexado en servidor %s."%server)
if self.pool:
for doc, extra in self.pool.imap(generate_file, (generate_id(afile, part) for afile in ff)):
count+=1
if doc:
outwrite(doc)
stats_file(extra, self.stats)
elif extra:
error_count += 1
if error_count>100: raise extra # ante mas de 100 errores, detiene la indexacion con error
if count%1000000==0:
outwrite("\n")
logging.warn("Progreso de indexado del servidor %s."%(server), extra={"count":count, "error_count":error_count})
else:
for afile in ff:
doc, extra = generate_file(generate_id(afile, part))
count+=1
if doc:
outwrite(doc+"\n")
stats_file(extra, self.stats)
elif extra:
error_count += 1
if error_count>100: raise extra # ante mas de 100 errores, detiene la indexacion con error
if count%1000000==0:
logging.warn("Progreso de indexado del servidor %s."%(server), extra={"count":count, "error_count":error_count})
if headers: self.generate_footer()
logging.warn("Finaliza indexado en servidor %s."%server)
self.total_count = ff.total_count
self.count = count
return ff.stop_set
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.